aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/80211.tmpl495
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/DocBook/mac80211.tmpl337
-rw-r--r--Documentation/networking/dccp.txt29
-rw-r--r--Documentation/networking/ip-sysctl.txt27
-rw-r--r--Documentation/networking/phonet.txt16
-rw-r--r--Documentation/networking/timestamping.txt22
-rw-r--r--MAINTAINERS42
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom-peripherals.c54
-rw-r--r--arch/s390/include/asm/qdio.h13
-rw-r--r--drivers/atm/firestream.c4
-rw-r--r--drivers/atm/horizon.c6
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/firewire/net.c13
-rw-r--r--drivers/ieee1394/eth1394.c16
-rw-r--r--drivers/isdn/capi/capidrv.c17
-rw-r--r--drivers/isdn/divert/isdn_divert.c6
-rw-r--r--drivers/isdn/hisax/hfc_sx.c13
-rw-r--r--drivers/isdn/i4l/isdn_tty.c15
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c1
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/isdn/mISDN/stack.c7
-rw-r--r--drivers/isdn/pcbit/edss1.c2
-rw-r--r--drivers/isdn/pcbit/edss1.h2
-rw-r--r--drivers/net/3c503.c8
-rw-r--r--drivers/net/3c515.c6
-rw-r--r--drivers/net/3c523.c2
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/Kconfig37
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/acenic.c2
-rw-r--r--drivers/net/amd8111e.c18
-rw-r--r--drivers/net/amd8111e.h1
-rw-r--r--drivers/net/appletalk/ipddp.c10
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/arm/am79c961a.c35
-rw-r--r--drivers/net/arm/am79c961a.h1
-rw-r--r--drivers/net/arm/ep93xx_eth.c39
-rw-r--r--drivers/net/arm/ether1.c34
-rw-r--r--drivers/net/arm/ether1.h1
-rw-r--r--drivers/net/arm/ether3.c33
-rw-r--r--drivers/net/arm/ether3.h1
-rw-r--r--drivers/net/atarilance.c24
-rw-r--r--drivers/net/atl1c/atl1c.h1
-rw-r--r--drivers/net/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/atlx/atl1.c8
-rw-r--r--drivers/net/atlx/atl2.c10
-rw-r--r--drivers/net/atp.c2
-rw-r--r--drivers/net/au1000_eth.c313
-rw-r--r--drivers/net/au1000_eth.h42
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/bcm63xx_enet.c62
-rw-r--r--drivers/net/bcm63xx_enet.h1
-rw-r--r--drivers/net/benet/be.h14
-rw-r--r--drivers/net/benet/be_cmds.c4
-rw-r--r--drivers/net/benet/be_ethtool.c3
-rw-r--r--drivers/net/benet/be_main.c62
-rw-r--r--drivers/net/bfin_mac.c10
-rw-r--r--drivers/net/bmac.c9
-rw-r--r--drivers/net/bna/Makefile11
-rw-r--r--drivers/net/bna/bfa_cee.c291
-rw-r--r--drivers/net/bna/bfa_cee.h64
-rw-r--r--drivers/net/bna/bfa_defs.h243
-rw-r--r--drivers/net/bna/bfa_defs_cna.h223
-rw-r--r--drivers/net/bna/bfa_defs_mfg_comm.h244
-rw-r--r--drivers/net/bna/bfa_defs_status.h216
-rw-r--r--drivers/net/bna/bfa_ioc.c1738
-rw-r--r--drivers/net/bna/bfa_ioc.h301
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c392
-rw-r--r--drivers/net/bna/bfa_sm.h88
-rw-r--r--drivers/net/bna/bfa_wc.h69
-rw-r--r--drivers/net/bna/bfi.h392
-rw-r--r--drivers/net/bna/bfi_cna.h199
-rw-r--r--drivers/net/bna/bfi_ctreg.h637
-rw-r--r--drivers/net/bna/bfi_ll.h438
-rw-r--r--drivers/net/bna/bna.h654
-rw-r--r--drivers/net/bna/bna_ctrl.c3624
-rw-r--r--drivers/net/bna/bna_hw.h1491
-rw-r--r--drivers/net/bna/bna_txrx.c4209
-rw-r--r--drivers/net/bna/bna_types.h1128
-rw-r--r--drivers/net/bna/bnad.c3267
-rw-r--r--drivers/net/bna/bnad.h333
-rw-r--r--drivers/net/bna/bnad_ethtool.c1277
-rw-r--r--drivers/net/bna/cna.h81
-rw-r--r--drivers/net/bna/cna_fwimg.c64
-rw-r--r--drivers/net/bnx2.c66
-rw-r--r--drivers/net/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c42
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h16
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c233
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h130
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c8585
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h238
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c757
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h53
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/bsd_comp.c2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c8
-rw-r--r--drivers/net/cassini.c6
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/chelsio/vsc7326.c2
-rw-r--r--drivers/net/cnic.c2
-rw-r--r--drivers/net/cpmac.c39
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c24
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/cxgb3/regs.h4
-rw-r--r--drivers/net/cxgb3/sge.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c7
-rw-r--r--drivers/net/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c94
-rw-r--r--drivers/net/cxgb4/sge.c19
-rw-r--r--drivers/net/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h5
-rw-r--r--drivers/net/cxgb4vf/sge.c3
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/declance.c2
-rw-r--r--drivers/net/defxx.c66
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_main.c241
-rw-r--r--drivers/net/e1000e/ethtool.c4
-rw-r--r--drivers/net/e1000e/netdev.c42
-rw-r--r--drivers/net/eepro.c8
-rw-r--r--drivers/net/ehea/ehea_main.c12
-rw-r--r--drivers/net/enic/enic.h2
-rw-r--r--drivers/net/enic/enic_main.c19
-rw-r--r--drivers/net/enic/vnic_dev.c25
-rw-r--r--drivers/net/enic/vnic_devcmd.h12
-rw-r--r--drivers/net/enic/vnic_enet.h2
-rw-r--r--drivers/net/enic/vnic_resource.h13
-rw-r--r--drivers/net/enic/vnic_rq.c6
-rw-r--r--drivers/net/enic/vnic_rq.h2
-rw-r--r--drivers/net/enic/vnic_vic.c7
-rw-r--r--drivers/net/enic/vnic_wq.c6
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/eth16i.c16
-rw-r--r--drivers/net/ethoc.c6
-rw-r--r--drivers/net/fealnx.c4
-rw-r--r--drivers/net/fec_mpc52xx.c6
-rw-r--r--drivers/net/forcedeth.c8
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/fsl_pq_mdio.c4
-rw-r--r--drivers/net/gianfar.c19
-rw-r--r--drivers/net/gianfar_ethtool.c4
-rw-r--r--drivers/net/greth.c6
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/scc.c3
-rw-r--r--drivers/net/hp.c8
-rw-r--r--drivers/net/hp100.c6
-rw-r--r--drivers/net/hydra.c13
-rw-r--r--drivers/net/ibm_newemac/core.c6
-rw-r--r--drivers/net/ibm_newemac/core.h6
-rw-r--r--drivers/net/ibmlana.c2
-rw-r--r--drivers/net/ibmveth.c953
-rw-r--r--drivers/net/ibmveth.h59
-rw-r--r--drivers/net/igb/e1000_82575.c18
-rw-r--r--drivers/net/igb/e1000_defines.h31
-rw-r--r--drivers/net/igb/e1000_hw.h2
-rw-r--r--drivers/net/igb/e1000_phy.c206
-rw-r--r--drivers/net/igb/e1000_phy.h2
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_main.c28
-rw-r--r--drivers/net/igbvf/ethtool.c2
-rw-r--r--drivers/net/igbvf/netdev.c4
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/irda/donauboe.c4
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/nsc-ircc.c2
-rw-r--r--drivers/net/irda/sir_dev.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/irda/stir4200.c2
-rw-r--r--drivers/net/irda/via-ircc.c3
-rw-r--r--drivers/net/irda/via-ircc.h2
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c32
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c2
-rw-r--r--drivers/net/ixgb/ixgb_hw.c14
-rw-r--r--drivers/net/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ixgbe/ixgbe.h30
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c57
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c387
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c7
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1721
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ixgbevf/ethtool.c153
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h1
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c32
-rw-r--r--drivers/net/ixgbevf/vf.h2
-rw-r--r--drivers/net/jme.c100
-rw-r--r--drivers/net/jme.h3
-rw-r--r--drivers/net/ll_temac_main.c4
-rw-r--r--drivers/net/loopback.c20
-rw-r--r--drivers/net/lp486e.c2
-rw-r--r--drivers/net/mac8390.c48
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/macvtap.c99
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mlx4/Makefile2
-rw-r--r--drivers/net/mlx4/alloc.c17
-rw-r--r--drivers/net/mlx4/en_ethtool.c173
-rw-r--r--drivers/net/mlx4/en_main.c24
-rw-r--r--drivers/net/mlx4/en_netdev.c28
-rw-r--r--drivers/net/mlx4/en_port.c32
-rw-r--r--drivers/net/mlx4/en_port.h14
-rw-r--r--drivers/net/mlx4/en_rx.c104
-rw-r--r--drivers/net/mlx4/en_selftest.c179
-rw-r--r--drivers/net/mlx4/en_tx.c20
-rw-r--r--drivers/net/mlx4/eq.c44
-rw-r--r--drivers/net/mlx4/fw.c15
-rw-r--r--drivers/net/mlx4/fw.h6
-rw-r--r--drivers/net/mlx4/main.c6
-rw-r--r--drivers/net/mlx4/mlx4_en.h39
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c10
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/niu.c38
-rw-r--r--drivers/net/ns83820.c53
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pasemi_mac_ethtool.c16
-rw-r--r--drivers/net/pch_gbe/Makefile4
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h659
-rw-r--r--drivers/net/pch_gbe/pch_gbe_api.c245
-rw-r--r--drivers/net/pch_gbe/pch_gbe_api.h36
-rw-r--r--drivers/net/pch_gbe/pch_gbe_ethtool.c584
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c2473
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c499
-rw-r--r--drivers/net/pch_gbe/pch_gbe_phy.c274
-rw-r--r--drivers/net/pch_gbe/pch_gbe_phy.h37
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/3c574_cs.c88
-rw-r--r--drivers/net/pcmcia/3c589_cs.c17
-rw-r--r--drivers/net/pcmcia/axnet_cs.c187
-rw-r--r--drivers/net/pcmcia/com20020_cs.c32
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c60
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c26
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c56
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c116
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c105
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c104
-rw-r--r--drivers/net/pcnet32.c4
-rw-r--r--drivers/net/plip.c9
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/pppox.c4
-rw-r--r--drivers/net/pptp.c726
-rw-r--r--drivers/net/ps3_gelic_net.c4
-rw-r--r--drivers/net/ps3_gelic_wireless.c6
-rw-r--r--drivers/net/pxa168_eth.c3
-rw-r--r--drivers/net/qla3xxx.c4
-rw-r--r--drivers/net/qlcnic/qlcnic.h108
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c267
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c75
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h23
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c77
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c309
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1163
-rw-r--r--drivers/net/qlge/qlge_main.c34
-rw-r--r--drivers/net/r6040.c70
-rw-r--r--drivers/net/r8169.c18
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c37
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/sb1250-mac.c2
-rw-r--r--drivers/net/sc92031.c11
-rw-r--r--drivers/net/sfc/Makefile7
-rw-r--r--drivers/net/sfc/efx.c334
-rw-r--r--drivers/net/sfc/efx.h36
-rw-r--r--drivers/net/sfc/ethtool.c171
-rw-r--r--drivers/net/sfc/falcon.c136
-rw-r--r--drivers/net/sfc/falcon_boards.c203
-rw-r--r--drivers/net/sfc/falcon_gmac.c230
-rw-r--r--drivers/net/sfc/filter.c445
-rw-r--r--drivers/net/sfc/filter.h189
-rw-r--r--drivers/net/sfc/mac.h1
-rw-r--r--drivers/net/sfc/mdio_10g.c30
-rw-r--r--drivers/net/sfc/net_driver.h112
-rw-r--r--drivers/net/sfc/nic.c197
-rw-r--r--drivers/net/sfc/phy.h18
-rw-r--r--drivers/net/sfc/regs.h14
-rw-r--r--drivers/net/sfc/rx.c73
-rw-r--r--drivers/net/sfc/selftest.c7
-rw-r--r--drivers/net/sfc/siena.c4
-rw-r--r--drivers/net/sfc/tenxpress.c424
-rw-r--r--drivers/net/sfc/tx.c78
-rw-r--r--drivers/net/sfc/txc43128_phy.c560
-rw-r--r--drivers/net/sfc/workarounds.h9
-rw-r--r--drivers/net/sh_eth.c4
-rw-r--r--drivers/net/sis900.c8
-rw-r--r--drivers/net/skfp/cfm.c10
-rw-r--r--drivers/net/skfp/drvfbi.c16
-rw-r--r--drivers/net/skfp/ess.c46
-rw-r--r--drivers/net/skfp/fplustm.c24
-rw-r--r--drivers/net/skfp/hwmtm.c30
-rw-r--r--drivers/net/skfp/hwt.c4
-rw-r--r--drivers/net/skfp/pcmplc.c22
-rw-r--r--drivers/net/skfp/pmf.c62
-rw-r--r--drivers/net/skfp/queue.c2
-rw-r--r--drivers/net/skfp/skfddi.c116
-rw-r--r--drivers/net/skfp/smt.c78
-rw-r--r--drivers/net/skfp/smtdef.c4
-rw-r--r--drivers/net/skfp/smtinit.c2
-rw-r--r--drivers/net/skfp/srf.c2
-rw-r--r--drivers/net/skge.c5
-rw-r--r--drivers/net/sky2.c3
-rw-r--r--drivers/net/slip.c93
-rw-r--r--drivers/net/slip.h9
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/starfire.c10
-rw-r--r--drivers/net/stmmac/Kconfig5
-rw-r--r--drivers/net/stmmac/common.h55
-rw-r--r--drivers/net/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c34
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c18
-rw-r--r--drivers/net/stmmac/dwmac100_core.c29
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c18
-rw-r--r--drivers/net/stmmac/dwmac_dma.h16
-rw-r--r--drivers/net/stmmac/dwmac_lib.c22
-rw-r--r--drivers/net/stmmac/enh_desc.c4
-rw-r--r--drivers/net/stmmac/norm_desc.c19
-rw-r--r--drivers/net/stmmac/stmmac.h9
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c37
-rw-r--r--drivers/net/stmmac/stmmac_main.c216
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c26
-rw-r--r--drivers/net/sun3lance.c4
-rw-r--r--drivers/net/sunbmac.c2
-rw-r--r--drivers/net/sundance.c164
-rw-r--r--drivers/net/sungem.c211
-rw-r--r--drivers/net/sungem_phy.c5
-rw-r--r--drivers/net/sunhme.c10
-rw-r--r--drivers/net/sunlance.c2
-rw-r--r--drivers/net/sunqe.c2
-rw-r--r--drivers/net/sunvnet.c50
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tehuti.c34
-rw-r--r--drivers/net/tehuti.h1
-rw-r--r--drivers/net/tg3.c165
-rw-r--r--drivers/net/tg3.h22
-rw-r--r--drivers/net/tlan.c10
-rw-r--r--drivers/net/tlan.h8
-rw-r--r--drivers/net/tokenring/proteon.c2
-rw-r--r--drivers/net/tokenring/smctr.c500
-rw-r--r--drivers/net/tokenring/tms380tr.c64
-rw-r--r--drivers/net/tokenring/tmspci.c10
-rw-r--r--drivers/net/tsi108_eth.c2
-rw-r--r--drivers/net/tulip/de4x5.c56
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/interrupt.c77
-rw-r--r--drivers/net/tulip/tulip.h3
-rw-r--r--drivers/net/tulip/tulip_core.c10
-rw-r--r--drivers/net/tulip/uli526x.c4
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c15
-rw-r--r--drivers/net/typhoon.c50
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/cx82310_eth.c346
-rw-r--r--drivers/net/usb/hso.c9
-rw-r--r--drivers/net/usb/kaweth.c9
-rw-r--r--drivers/net/usb/sierra_net.c4
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/via-velocity.h11
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vxge/vxge-main.c34
-rw-r--r--drivers/net/vxge/vxge-main.h1
-rw-r--r--drivers/net/wan/c101.c2
-rw-r--r--drivers/net/wan/cycx_drv.c14
-rw-r--r--drivers/net/wan/cycx_main.c6
-rw-r--r--drivers/net/wan/dlci.c42
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c6
-rw-r--r--drivers/net/wan/n2.c6
-rw-r--r--drivers/net/wan/pc300_drv.c20
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/sdla.c108
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wan/z85230.c4
-rw-r--r--drivers/net/wd.c8
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/airo.c13
-rw-r--r--drivers/net/wireless/at76c50x-usb.c7
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile4
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c31
-rw-r--r--drivers/net/wireless/ath/ath.h35
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c22
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c2075
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h9
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c211
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c21
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c99
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h44
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c30
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h75
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c109
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c290
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c53
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h42
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c134
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c305
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h54
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c55
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c582
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c74
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c113
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig41
-rw-r--r--drivers/net/wireless/ath/carl9170/Makefile4
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h627
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c188
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.h158
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c906
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.h134
-rw-r--r--drivers/net/wireless/ath/carl9170/eeprom.h216
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c395
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h268
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h237
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h736
-rw-r--r--drivers/net/wireless/ath/carl9170/led.c190
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c604
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c1855
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c1810
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.h567
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c909
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c1373
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c1138
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h7
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h412
-rw-r--r--drivers/net/wireless/ath/debug.h2
-rw-r--r--drivers/net/wireless/ath/key.c568
-rw-r--r--drivers/net/wireless/ath/reg.h23
-rw-r--r--drivers/net/wireless/b43/main.c30
-rw-r--r--drivers/net/wireless/b43/phy_n.c64
-rw-r--r--drivers/net/wireless/b43legacy/main.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c10
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c12
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig10
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c86
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c165
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c503
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c147
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c724
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c332
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c704
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c153
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c87
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c724
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h535
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c945
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h98
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c170
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h241
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c638
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c442
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c224
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h64
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c47
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c339
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c7
-rw-r--r--drivers/net/wireless/libertas/cfg.c68
-rw-r--r--drivers/net/wireless/libertas/decl.h13
-rw-r--r--drivers/net/wireless/libertas/if_cs.c130
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c161
-rw-r--r--drivers/net/wireless/libertas/if_sdio.h4
-rw-r--r--drivers/net/wireless/libertas/if_spi.c150
-rw-r--r--drivers/net/wireless/libertas/if_spi.h5
-rw-r--r--drivers/net/wireless/libertas/if_usb.c60
-rw-r--r--drivers/net/wireless/libertas/if_usb.h1
-rw-r--r--drivers/net/wireless/libertas/main.c105
-rw-r--r--drivers/net/wireless/libertas/mesh.c2
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c57
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c31
-rw-r--r--drivers/net/wireless/orinoco/hw.c9
-rw-r--r--drivers/net/wireless/orinoco/wext.c11
-rw-r--r--drivers/net/wireless/p54/Kconfig18
-rw-r--r--drivers/net/wireless/p54/eeprom.c21
-rw-r--r--drivers/net/wireless/p54/fwio.c6
-rw-r--r--drivers/net/wireless/p54/main.c9
-rw-r--r--drivers/net/wireless/p54/p54spi.c9
-rw-r--r--drivers/net/wireless/p54/p54spi_eeprom.h2
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c25
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/ray_cs.c44
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c147
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c154
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c71
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h50
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c457
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h27
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c259
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c159
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h34
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c21
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c71
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c82
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h56
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c298
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c118
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c88
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c143
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c9
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig5
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h7
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h10
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_debugfs.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_debugfs.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_event.c33
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_event.h3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_io.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c55
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.h8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_reg.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.c26
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h3
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c11
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h1
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.h9
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c23
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_scan.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_sdio.c58
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_platform_data.c28
-rw-r--r--drivers/net/wireless/wl3501_cs.c11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c5
-rw-r--r--drivers/net/xen-netfront.c14
-rw-r--r--drivers/net/xilinx_emaclite.c15
-rw-r--r--drivers/net/yellowfin.c2
-rw-r--r--drivers/s390/cio/qdio.h29
-rw-r--r--drivers/s390/cio/qdio_debug.c33
-rw-r--r--drivers/s390/cio/qdio_main.c138
-rw-r--r--drivers/s390/cio/qdio_setup.c1
-rw-r--r--drivers/s390/cio/qdio_thinint.c66
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/qeth_core.h17
-rw-r--r--drivers/s390/net/qeth_core_main.c26
-rw-r--r--drivers/s390/net/qeth_l2_main.c173
-rw-r--r--drivers/s390/net/qeth_l3_main.c189
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c6
-rw-r--r--drivers/usb/atm/cxacru.c18
-rw-r--r--drivers/uwb/address.c5
-rw-r--r--drivers/uwb/wlp/wss-lc.c7
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/atmdev.h2
-rw-r--r--include/linux/dccp.h6
-rw-r--r--include/linux/etherdevice.h22
-rw-r--r--include/linux/ethtool.h188
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/if_ether.h2
-rw-r--r--include/linux/if_macvlan.h9
-rw-r--r--include/linux/if_pppox.h52
-rw-r--r--include/linux/if_vlan.h5
-rw-r--r--include/linux/in.h19
-rw-r--r--include/linux/inetdevice.h14
-rw-r--r--include/linux/mlx4/cmd.h1
-rw-r--r--include/linux/mlx4/device.h7
-rw-r--r--include/linux/netdevice.h48
-rw-r--r--include/linux/nl80211.h170
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/phonet.h2
-rw-r--r--include/linux/phy.h4
-rw-r--r--include/linux/pkt_cls.h1
-rw-r--r--include/linux/rds.h115
-rw-r--r--include/linux/rtnetlink.h20
-rw-r--r--include/linux/skbuff.h83
-rw-r--r--include/linux/ssb/ssb_regs.h1
-rw-r--r--include/linux/stmmac.h6
-rw-r--r--include/linux/tc_act/Kbuild1
-rw-r--r--include/linux/tc_act/tc_csum.h32
-rw-r--r--include/linux/tc_ematch/tc_em_meta.h1
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/linux/wl12xx.h (renamed from include/linux/spi/wl12xx.h)10
-rw-r--r--include/net/addrconf.h63
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/cfg80211.h240
-rw-r--r--include/net/gre.h18
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/inet_ecn.h2
-rw-r--r--include/net/ip.h6
-rw-r--r--include/net/ipv6.h35
-rw-r--r--include/net/irda/irlan_common.h1
-rw-r--r--include/net/irda/irlan_event.h2
-rw-r--r--include/net/irda/irlap.h2
-rw-r--r--include/net/irda/irlmp.h2
-rw-r--r--include/net/irda/irttp.h2
-rw-r--r--include/net/mac80211.h109
-rw-r--r--include/net/phonet/pep.h5
-rw-r--r--include/net/phonet/phonet.h5
-rw-r--r--include/net/phonet/pn_dev.h1
-rw-r--r--include/net/raw.h5
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/sctp.h60
-rw-r--r--include/net/sctp/sm.h10
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/sctp/tsnmap.h2
-rw-r--r--include/net/sock.h14
-rw-r--r--include/net/tc_act/tc_csum.h15
-rw-r--r--include/net/tcp.h9
-rw-r--r--include/net/tipc/tipc_msg.h10
-rw-r--r--net/802/fc.c2
-rw-r--r--net/802/fddi.c12
-rw-r--r--net/802/hippi.c2
-rw-r--r--net/802/tr.c2
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_core.c16
-rw-r--r--net/8021q/vlan_dev.c9
-rw-r--r--net/9p/client.c4
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/lec.c1
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_route.c4
-rw-r--r--net/bluetooth/af_bluetooth.c5
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bridge/br_if.c29
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_netfilter.c107
-rw-r--r--net/caif/caif_dev.c24
-rw-r--r--net/caif/caif_socket.c26
-rw-r--r--net/caif/cfcnfg.c49
-rw-r--r--net/caif/cfctrl.c59
-rw-r--r--net/caif/cfdbgl.c4
-rw-r--r--net/caif/cfdgml.c11
-rw-r--r--net/caif/cffrml.c14
-rw-r--r--net/caif/cfmuxl.c14
-rw-r--r--net/caif/cfpkt_skbuff.c48
-rw-r--r--net/caif/cfrfml.c12
-rw-r--r--net/caif/cfserl.c4
-rw-r--r--net/caif/cfsrvl.c17
-rw-r--r--net/caif/cfutill.c12
-rw-r--r--net/caif/cfveil.c11
-rw-r--r--net/caif/cfvidl.c6
-rw-r--r--net/caif/chnl_net.c47
-rw-r--r--net/can/raw.c4
-rw-r--r--net/core/datagram.c5
-rw-r--r--net/core/dev.c285
-rw-r--r--net/core/ethtool.c88
-rw-r--r--net/core/flow.c82
-rw-r--r--net/core/gen_estimator.c4
-rw-r--r--net/core/iovec.c6
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/net-sysfs.c5
-rw-r--r--net/core/pktgen.c12
-rw-r--r--net/core/rtnetlink.c31
-rw-r--r--net/core/skbuff.c92
-rw-r--r--net/core/sock.c4
-rw-r--r--net/core/utils.c15
-rw-r--r--net/dccp/ccid.h46
-rw-r--r--net/dccp/ccids/Kconfig31
-rw-r--r--net/dccp/ccids/ccid2.c287
-rw-r--r--net/dccp/ccids/ccid2.h35
-rw-r--r--net/dccp/ccids/ccid3.c253
-rw-r--r--net/dccp/ccids/ccid3.h51
-rw-r--r--net/dccp/ccids/lib/loss_interval.c2
-rw-r--r--net/dccp/ccids/lib/packet_history.c39
-rw-r--r--net/dccp/ccids/lib/packet_history.h22
-rw-r--r--net/dccp/ccids/lib/tfrc.h1
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c14
-rw-r--r--net/dccp/options.c25
-rw-r--r--net/decnet/dn_nsp_out.c8
-rw-r--r--net/econet/af_econet.c6
-rw-r--r--net/ethernet/eth.c8
-rw-r--r--net/ipv4/Kconfig7
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c8
-rw-r--r--net/ipv4/arp.c228
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/fib_trie.c55
-rw-r--r--net/ipv4/gre.c151
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/ip_fragment.c6
-rw-r--r--net/ipv4/ip_gre.c83
-rw-r--r--net/ipv4/ip_options.c3
-rw-r--r--net/ipv4/ip_output.c24
-rw-r--r--net/ipv4/ipip.c74
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c31
-rw-r--r--net/ipv4/protocol.c31
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c13
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_input.c27
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c31
-rw-r--r--net/ipv4/tcp_timer.c40
-rw-r--r--net/ipv4/tcp_westwood.c2
-rw-r--r--net/ipv4/tunnel4.c19
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/xfrm4_tunnel.c4
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/addrlabel.c5
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/exthdrs_core.c4
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6_tunnel.c67
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ndisc.c26
-rw-r--r--net/ipv6/netfilter/ip6_tables.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/protocol.c32
-rw-r--r--net/ipv6/raw.c12
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c14
-rw-r--r--net/ipv6/sit.c71
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/tunnel6.c17
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c4
-rw-r--r--net/irda/af_irda.c14
-rw-r--r--net/irda/discovery.c2
-rw-r--r--net/irda/ircomm/ircomm_tty.c4
-rw-r--r--net/irda/irlan/irlan_eth.c32
-rw-r--r--net/irda/irlan/irlan_event.c2
-rw-r--r--net/irda/irlmp.c2
-rw-r--r--net/irda/irlmp_frame.c2
-rw-r--r--net/irda/irnet/irnet_irda.c22
-rw-r--r--net/irda/irnet/irnet_ppp.c8
-rw-r--r--net/irda/irnet/irnet_ppp.h3
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/l2tp_eth.c1
-rw-r--r--net/l2tp/l2tp_ppp.c2
-rw-r--r--net/mac80211/aes_ccm.c6
-rw-r--r--net/mac80211/aes_cmac.c6
-rw-r--r--net/mac80211/agg-rx.c22
-rw-r--r--net/mac80211/cfg.c145
-rw-r--r--net/mac80211/chan.c2
-rw-r--r--net/mac80211/debugfs.c6
-rw-r--r--net/mac80211/debugfs_key.c55
-rw-r--r--net/mac80211/driver-ops.h14
-rw-r--r--net/mac80211/driver-trace.h42
-rw-r--r--net/mac80211/ht.c28
-rw-r--r--net/mac80211/ibss.c12
-rw-r--r--net/mac80211/ieee80211_i.h68
-rw-r--r--net/mac80211/iface.c401
-rw-r--r--net/mac80211/key.c113
-rw-r--r--net/mac80211/key.h10
-rw-r--r--net/mac80211/main.c179
-rw-r--r--net/mac80211/mlme.c106
-rw-r--r--net/mac80211/offchannel.c26
-rw-r--r--net/mac80211/pm.c3
-rw-r--r--net/mac80211/rate.c11
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c2
-rw-r--r--net/mac80211/rx.c522
-rw-r--r--net/mac80211/scan.c66
-rw-r--r--net/mac80211/sta_info.c25
-rw-r--r--net/mac80211/sta_info.h16
-rw-r--r--net/mac80211/status.c11
-rw-r--r--net/mac80211/tx.c68
-rw-r--r--net/mac80211/util.c35
-rw-r--r--net/mac80211/wep.c2
-rw-r--r--net/mac80211/work.c39
-rw-r--r--net/mac80211/wpa.c32
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c22
-rw-r--r--net/netfilter/xt_hashlimit.c15
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/af_phonet.c17
-rw-r--r--net/phonet/datagram.c13
-rw-r--r--net/phonet/pep.c35
-rw-r--r--net/phonet/pn_dev.c5
-rw-r--r--net/phonet/socket.c190
-rw-r--r--net/rds/af_rds.c26
-rw-r--r--net/rds/bind.c82
-rw-r--r--net/rds/cong.c8
-rw-r--r--net/rds/connection.c157
-rw-r--r--net/rds/ib.c194
-rw-r--r--net/rds/ib.h100
-rw-r--r--net/rds/ib_cm.c184
-rw-r--r--net/rds/ib_rdma.c318
-rw-r--r--net/rds/ib_recv.c549
-rw-r--r--net/rds/ib_send.c682
-rw-r--r--net/rds/ib_stats.c2
-rw-r--r--net/rds/ib_sysctl.c17
-rw-r--r--net/rds/info.c12
-rw-r--r--net/rds/iw.c4
-rw-r--r--net/rds/iw.h11
-rw-r--r--net/rds/iw_cm.c14
-rw-r--r--net/rds/iw_rdma.c5
-rw-r--r--net/rds/iw_recv.c24
-rw-r--r--net/rds/iw_send.c93
-rw-r--r--net/rds/iw_sysctl.c4
-rw-r--r--net/rds/loop.c31
-rw-r--r--net/rds/message.c118
-rw-r--r--net/rds/page.c5
-rw-r--r--net/rds/rdma.c339
-rw-r--r--net/rds/rdma.h85
-rw-r--r--net/rds/rdma_transport.c42
-rw-r--r--net/rds/rds.h187
-rw-r--r--net/rds/recv.c9
-rw-r--r--net/rds/send.c544
-rw-r--r--net/rds/stats.c6
-rw-r--r--net/rds/sysctl.c4
-rw-r--r--net/rds/tcp.c8
-rw-r--r--net/rds/tcp.h9
-rw-r--r--net/rds/tcp_connect.c2
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/rds/tcp_recv.c14
-rw-r--r--net/rds/tcp_send.c66
-rw-r--r--net/rds/threads.c69
-rw-r--r--net/rds/transport.c19
-rw-r--r--net/rds/xlist.h80
-rw-r--r--net/rfkill/input.c2
-rw-r--r--net/rose/rose_link.c4
-rw-r--r--net/sched/Kconfig10
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_csum.c595
-rw-r--r--net/sched/cls_flow.c74
-rw-r--r--net/sched/em_meta.c6
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_sfq.c33
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/chunk.c2
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/objcnt.c5
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c34
-rw-r--r--net/sctp/probe.c4
-rw-r--r--net/sctp/protocol.c19
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/sm_sideeffect.c21
-rw-r--r--net/sctp/sm_statefuns.c20
-rw-r--r--net/sctp/sm_statetable.c42
-rw-r--r--net/sctp/socket.c85
-rw-r--r--net/sctp/transport.c9
-rw-r--r--net/socket.c30
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c44
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c2
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c2
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/tipc/addr.c2
-rw-r--r--net/tipc/bcast.c41
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/tipc/core.c6
-rw-r--r--net/tipc/dbg.c4
-rw-r--r--net/tipc/discover.c8
-rw-r--r--net/tipc/eth_media.c48
-rw-r--r--net/tipc/link.c31
-rw-r--r--net/tipc/link.h16
-rw-r--r--net/tipc/msg.h6
-rw-r--r--net/tipc/name_table.c50
-rw-r--r--net/tipc/net.c1
-rw-r--r--net/tipc/node.c28
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/port.c19
-rw-r--r--net/tipc/port.h2
-rw-r--r--net/tipc/socket.c83
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/unix/af_unix.c5
-rw-r--r--net/wireless/core.c66
-rw-r--r--net/wireless/core.h32
-rw-r--r--net/wireless/mlme.c152
-rw-r--r--net/wireless/nl80211.c208
-rw-r--r--net/wireless/nl80211.h14
-rw-r--r--net/wireless/radiotap.c3
-rw-r--r--net/wireless/reg.c22
-rw-r--r--net/wireless/sme.c9
-rw-r--r--net/wireless/sysfs.c9
-rw-r--r--net/wireless/util.c28
-rw-r--r--net/wireless/wext-core.c2
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--net/x25/af_x25.c34
973 files changed, 75255 insertions, 23359 deletions
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
new file mode 100644
index 000000000000..19a1210c2530
--- /dev/null
+++ b/Documentation/DocBook/80211.tmpl
@@ -0,0 +1,495 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<!DOCTYPE set PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
4<set>
5 <setinfo>
6 <title>The 802.11 subsystems &ndash; for kernel developers</title>
7 <subtitle>
8 Explaining wireless 802.11 networking in the Linux kernel
9 </subtitle>
10
11 <copyright>
12 <year>2007-2009</year>
13 <holder>Johannes Berg</holder>
14 </copyright>
15
16 <authorgroup>
17 <author>
18 <firstname>Johannes</firstname>
19 <surname>Berg</surname>
20 <affiliation>
21 <address><email>johannes@sipsolutions.net</email></address>
22 </affiliation>
23 </author>
24 </authorgroup>
25
26 <legalnotice>
27 <para>
28 This documentation is free software; you can redistribute
29 it and/or modify it under the terms of the GNU General Public
30 License version 2 as published by the Free Software Foundation.
31 </para>
32 <para>
33 This documentation is distributed in the hope that it will be
34 useful, but WITHOUT ANY WARRANTY; without even the implied
35 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
36 See the GNU General Public License for more details.
37 </para>
38 <para>
39 You should have received a copy of the GNU General Public
40 License along with this documentation; if not, write to the Free
41 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
42 MA 02111-1307 USA
43 </para>
44 <para>
45 For more details see the file COPYING in the source
46 distribution of Linux.
47 </para>
48 </legalnotice>
49
50 <abstract>
51 <para>
52 These books attempt to give a description of the
53 various subsystems that play a role in 802.11 wireless
54 networking in Linux. Since these books are for kernel
55 developers they attempts to document the structures
56 and functions used in the kernel as well as giving a
57 higher-level overview.
58 </para>
59 <para>
60 The reader is expected to be familiar with the 802.11
61 standard as published by the IEEE in 802.11-2007 (or
62 possibly later versions). References to this standard
63 will be given as "802.11-2007 8.1.5".
64 </para>
65 </abstract>
66 </setinfo>
67 <book id="cfg80211-developers-guide">
68 <bookinfo>
69 <title>The cfg80211 subsystem</title>
70
71 <abstract>
72!Pinclude/net/cfg80211.h Introduction
73 </abstract>
74 </bookinfo>
75 <chapter>
76 <title>Device registration</title>
77!Pinclude/net/cfg80211.h Device registration
78!Finclude/net/cfg80211.h ieee80211_band
79!Finclude/net/cfg80211.h ieee80211_channel_flags
80!Finclude/net/cfg80211.h ieee80211_channel
81!Finclude/net/cfg80211.h ieee80211_rate_flags
82!Finclude/net/cfg80211.h ieee80211_rate
83!Finclude/net/cfg80211.h ieee80211_sta_ht_cap
84!Finclude/net/cfg80211.h ieee80211_supported_band
85!Finclude/net/cfg80211.h cfg80211_signal_type
86!Finclude/net/cfg80211.h wiphy_params_flags
87!Finclude/net/cfg80211.h wiphy_flags
88!Finclude/net/cfg80211.h wiphy
89!Finclude/net/cfg80211.h wireless_dev
90!Finclude/net/cfg80211.h wiphy_new
91!Finclude/net/cfg80211.h wiphy_register
92!Finclude/net/cfg80211.h wiphy_unregister
93!Finclude/net/cfg80211.h wiphy_free
94
95!Finclude/net/cfg80211.h wiphy_name
96!Finclude/net/cfg80211.h wiphy_dev
97!Finclude/net/cfg80211.h wiphy_priv
98!Finclude/net/cfg80211.h priv_to_wiphy
99!Finclude/net/cfg80211.h set_wiphy_dev
100!Finclude/net/cfg80211.h wdev_priv
101 </chapter>
102 <chapter>
103 <title>Actions and configuration</title>
104!Pinclude/net/cfg80211.h Actions and configuration
105!Finclude/net/cfg80211.h cfg80211_ops
106!Finclude/net/cfg80211.h vif_params
107!Finclude/net/cfg80211.h key_params
108!Finclude/net/cfg80211.h survey_info_flags
109!Finclude/net/cfg80211.h survey_info
110!Finclude/net/cfg80211.h beacon_parameters
111!Finclude/net/cfg80211.h plink_actions
112!Finclude/net/cfg80211.h station_parameters
113!Finclude/net/cfg80211.h station_info_flags
114!Finclude/net/cfg80211.h rate_info_flags
115!Finclude/net/cfg80211.h rate_info
116!Finclude/net/cfg80211.h station_info
117!Finclude/net/cfg80211.h monitor_flags
118!Finclude/net/cfg80211.h mpath_info_flags
119!Finclude/net/cfg80211.h mpath_info
120!Finclude/net/cfg80211.h bss_parameters
121!Finclude/net/cfg80211.h ieee80211_txq_params
122!Finclude/net/cfg80211.h cfg80211_crypto_settings
123!Finclude/net/cfg80211.h cfg80211_auth_request
124!Finclude/net/cfg80211.h cfg80211_assoc_request
125!Finclude/net/cfg80211.h cfg80211_deauth_request
126!Finclude/net/cfg80211.h cfg80211_disassoc_request
127!Finclude/net/cfg80211.h cfg80211_ibss_params
128!Finclude/net/cfg80211.h cfg80211_connect_params
129!Finclude/net/cfg80211.h cfg80211_pmksa
130!Finclude/net/cfg80211.h cfg80211_send_rx_auth
131!Finclude/net/cfg80211.h cfg80211_send_auth_timeout
132!Finclude/net/cfg80211.h __cfg80211_auth_canceled
133!Finclude/net/cfg80211.h cfg80211_send_rx_assoc
134!Finclude/net/cfg80211.h cfg80211_send_assoc_timeout
135!Finclude/net/cfg80211.h cfg80211_send_deauth
136!Finclude/net/cfg80211.h __cfg80211_send_deauth
137!Finclude/net/cfg80211.h cfg80211_send_disassoc
138!Finclude/net/cfg80211.h __cfg80211_send_disassoc
139!Finclude/net/cfg80211.h cfg80211_ibss_joined
140!Finclude/net/cfg80211.h cfg80211_connect_result
141!Finclude/net/cfg80211.h cfg80211_roamed
142!Finclude/net/cfg80211.h cfg80211_disconnected
143!Finclude/net/cfg80211.h cfg80211_ready_on_channel
144!Finclude/net/cfg80211.h cfg80211_remain_on_channel_expired
145!Finclude/net/cfg80211.h cfg80211_new_sta
146!Finclude/net/cfg80211.h cfg80211_rx_mgmt
147!Finclude/net/cfg80211.h cfg80211_mgmt_tx_status
148!Finclude/net/cfg80211.h cfg80211_cqm_rssi_notify
149!Finclude/net/cfg80211.h cfg80211_michael_mic_failure
150 </chapter>
151 <chapter>
152 <title>Scanning and BSS list handling</title>
153!Pinclude/net/cfg80211.h Scanning and BSS list handling
154!Finclude/net/cfg80211.h cfg80211_ssid
155!Finclude/net/cfg80211.h cfg80211_scan_request
156!Finclude/net/cfg80211.h cfg80211_scan_done
157!Finclude/net/cfg80211.h cfg80211_bss
158!Finclude/net/cfg80211.h cfg80211_inform_bss_frame
159!Finclude/net/cfg80211.h cfg80211_inform_bss
160!Finclude/net/cfg80211.h cfg80211_unlink_bss
161!Finclude/net/cfg80211.h cfg80211_find_ie
162!Finclude/net/cfg80211.h ieee80211_bss_get_ie
163 </chapter>
164 <chapter>
165 <title>Utility functions</title>
166!Pinclude/net/cfg80211.h Utility functions
167!Finclude/net/cfg80211.h ieee80211_channel_to_frequency
168!Finclude/net/cfg80211.h ieee80211_frequency_to_channel
169!Finclude/net/cfg80211.h ieee80211_get_channel
170!Finclude/net/cfg80211.h ieee80211_get_response_rate
171!Finclude/net/cfg80211.h ieee80211_hdrlen
172!Finclude/net/cfg80211.h ieee80211_get_hdrlen_from_skb
173!Finclude/net/cfg80211.h ieee80211_radiotap_iterator
174 </chapter>
175 <chapter>
176 <title>Data path helpers</title>
177!Pinclude/net/cfg80211.h Data path helpers
178!Finclude/net/cfg80211.h ieee80211_data_to_8023
179!Finclude/net/cfg80211.h ieee80211_data_from_8023
180!Finclude/net/cfg80211.h ieee80211_amsdu_to_8023s
181!Finclude/net/cfg80211.h cfg80211_classify8021d
182 </chapter>
183 <chapter>
184 <title>Regulatory enforcement infrastructure</title>
185!Pinclude/net/cfg80211.h Regulatory enforcement infrastructure
186!Finclude/net/cfg80211.h regulatory_hint
187!Finclude/net/cfg80211.h wiphy_apply_custom_regulatory
188!Finclude/net/cfg80211.h freq_reg_info
189 </chapter>
190 <chapter>
191 <title>RFkill integration</title>
192!Pinclude/net/cfg80211.h RFkill integration
193!Finclude/net/cfg80211.h wiphy_rfkill_set_hw_state
194!Finclude/net/cfg80211.h wiphy_rfkill_start_polling
195!Finclude/net/cfg80211.h wiphy_rfkill_stop_polling
196 </chapter>
197 <chapter>
198 <title>Test mode</title>
199!Pinclude/net/cfg80211.h Test mode
200!Finclude/net/cfg80211.h cfg80211_testmode_alloc_reply_skb
201!Finclude/net/cfg80211.h cfg80211_testmode_reply
202!Finclude/net/cfg80211.h cfg80211_testmode_alloc_event_skb
203!Finclude/net/cfg80211.h cfg80211_testmode_event
204 </chapter>
205 </book>
206 <book id="mac80211-developers-guide">
207 <bookinfo>
208 <title>The mac80211 subsystem</title>
209 <abstract>
210!Pinclude/net/mac80211.h Introduction
211!Pinclude/net/mac80211.h Warning
212 </abstract>
213 </bookinfo>
214
215 <toc></toc>
216
217 <!--
218 Generally, this document shall be ordered by increasing complexity.
219 It is important to note that readers should be able to read only
220 the first few sections to get a working driver and only advanced
221 usage should require reading the full document.
222 -->
223
224 <part>
225 <title>The basic mac80211 driver interface</title>
226 <partintro>
227 <para>
228 You should read and understand the information contained
229 within this part of the book while implementing a driver.
230 In some chapters, advanced usage is noted, that may be
231 skipped at first.
232 </para>
233 <para>
234 This part of the book only covers station and monitor mode
235 functionality, additional information required to implement
236 the other modes is covered in the second part of the book.
237 </para>
238 </partintro>
239
240 <chapter id="basics">
241 <title>Basic hardware handling</title>
242 <para>TBD</para>
243 <para>
244 This chapter shall contain information on getting a hw
245 struct allocated and registered with mac80211.
246 </para>
247 <para>
248 Since it is required to allocate rates/modes before registering
249 a hw struct, this chapter shall also contain information on setting
250 up the rate/mode structs.
251 </para>
252 <para>
253 Additionally, some discussion about the callbacks and
254 the general programming model should be in here, including
255 the definition of ieee80211_ops which will be referred to
256 a lot.
257 </para>
258 <para>
259 Finally, a discussion of hardware capabilities should be done
260 with references to other parts of the book.
261 </para>
262 <!-- intentionally multiple !F lines to get proper order -->
263!Finclude/net/mac80211.h ieee80211_hw
264!Finclude/net/mac80211.h ieee80211_hw_flags
265!Finclude/net/mac80211.h SET_IEEE80211_DEV
266!Finclude/net/mac80211.h SET_IEEE80211_PERM_ADDR
267!Finclude/net/mac80211.h ieee80211_ops
268!Finclude/net/mac80211.h ieee80211_alloc_hw
269!Finclude/net/mac80211.h ieee80211_register_hw
270!Finclude/net/mac80211.h ieee80211_get_tx_led_name
271!Finclude/net/mac80211.h ieee80211_get_rx_led_name
272!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
273!Finclude/net/mac80211.h ieee80211_get_radio_led_name
274!Finclude/net/mac80211.h ieee80211_unregister_hw
275!Finclude/net/mac80211.h ieee80211_free_hw
276 </chapter>
277
278 <chapter id="phy-handling">
279 <title>PHY configuration</title>
280 <para>TBD</para>
281 <para>
282 This chapter should describe PHY handling including
283 start/stop callbacks and the various structures used.
284 </para>
285!Finclude/net/mac80211.h ieee80211_conf
286!Finclude/net/mac80211.h ieee80211_conf_flags
287 </chapter>
288
289 <chapter id="iface-handling">
290 <title>Virtual interfaces</title>
291 <para>TBD</para>
292 <para>
293 This chapter should describe virtual interface basics
294 that are relevant to the driver (VLANs, MGMT etc are not.)
295 It should explain the use of the add_iface/remove_iface
296 callbacks as well as the interface configuration callbacks.
297 </para>
298 <para>Things related to AP mode should be discussed there.</para>
299 <para>
300 Things related to supporting multiple interfaces should be
301 in the appropriate chapter, a BIG FAT note should be here about
302 this though and the recommendation to allow only a single
303 interface in STA mode at first!
304 </para>
305!Finclude/net/mac80211.h ieee80211_vif
306 </chapter>
307
308 <chapter id="rx-tx">
309 <title>Receive and transmit processing</title>
310 <sect1>
311 <title>what should be here</title>
312 <para>TBD</para>
313 <para>
314 This should describe the receive and transmit
315 paths in mac80211/the drivers as well as
316 transmit status handling.
317 </para>
318 </sect1>
319 <sect1>
320 <title>Frame format</title>
321!Pinclude/net/mac80211.h Frame format
322 </sect1>
323 <sect1>
324 <title>Packet alignment</title>
325!Pnet/mac80211/rx.c Packet alignment
326 </sect1>
327 <sect1>
328 <title>Calling into mac80211 from interrupts</title>
329!Pinclude/net/mac80211.h Calling mac80211 from interrupts
330 </sect1>
331 <sect1>
332 <title>functions/definitions</title>
333!Finclude/net/mac80211.h ieee80211_rx_status
334!Finclude/net/mac80211.h mac80211_rx_flags
335!Finclude/net/mac80211.h ieee80211_tx_info
336!Finclude/net/mac80211.h ieee80211_rx
337!Finclude/net/mac80211.h ieee80211_rx_irqsafe
338!Finclude/net/mac80211.h ieee80211_tx_status
339!Finclude/net/mac80211.h ieee80211_tx_status_irqsafe
340!Finclude/net/mac80211.h ieee80211_rts_get
341!Finclude/net/mac80211.h ieee80211_rts_duration
342!Finclude/net/mac80211.h ieee80211_ctstoself_get
343!Finclude/net/mac80211.h ieee80211_ctstoself_duration
344!Finclude/net/mac80211.h ieee80211_generic_frame_duration
345!Finclude/net/mac80211.h ieee80211_wake_queue
346!Finclude/net/mac80211.h ieee80211_stop_queue
347!Finclude/net/mac80211.h ieee80211_wake_queues
348!Finclude/net/mac80211.h ieee80211_stop_queues
349 </sect1>
350 </chapter>
351
352 <chapter id="filters">
353 <title>Frame filtering</title>
354!Pinclude/net/mac80211.h Frame filtering
355!Finclude/net/mac80211.h ieee80211_filter_flags
356 </chapter>
357 </part>
358
359 <part id="advanced">
360 <title>Advanced driver interface</title>
361 <partintro>
362 <para>
363 Information contained within this part of the book is
364 of interest only for advanced interaction of mac80211
365 with drivers to exploit more hardware capabilities and
366 improve performance.
367 </para>
368 </partintro>
369
370 <chapter id="hardware-crypto-offload">
371 <title>Hardware crypto acceleration</title>
372!Pinclude/net/mac80211.h Hardware crypto acceleration
373 <!-- intentionally multiple !F lines to get proper order -->
374!Finclude/net/mac80211.h set_key_cmd
375!Finclude/net/mac80211.h ieee80211_key_conf
376!Finclude/net/mac80211.h ieee80211_key_flags
377 </chapter>
378
379 <chapter id="powersave">
380 <title>Powersave support</title>
381!Pinclude/net/mac80211.h Powersave support
382 </chapter>
383
384 <chapter id="beacon-filter">
385 <title>Beacon filter support</title>
386!Pinclude/net/mac80211.h Beacon filter support
387!Finclude/net/mac80211.h ieee80211_beacon_loss
388 </chapter>
389
390 <chapter id="qos">
391 <title>Multiple queues and QoS support</title>
392 <para>TBD</para>
393!Finclude/net/mac80211.h ieee80211_tx_queue_params
394 </chapter>
395
396 <chapter id="AP">
397 <title>Access point mode support</title>
398 <para>TBD</para>
399 <para>Some parts of the if_conf should be discussed here instead</para>
400 <para>
401 Insert notes about VLAN interfaces with hw crypto here or
402 in the hw crypto chapter.
403 </para>
404!Finclude/net/mac80211.h ieee80211_get_buffered_bc
405!Finclude/net/mac80211.h ieee80211_beacon_get
406 </chapter>
407
408 <chapter id="multi-iface">
409 <title>Supporting multiple virtual interfaces</title>
410 <para>TBD</para>
411 <para>
412 Note: WDS with identical MAC address should almost always be OK
413 </para>
414 <para>
415 Insert notes about having multiple virtual interfaces with
416 different MAC addresses here, note which configurations are
417 supported by mac80211, add notes about supporting hw crypto
418 with it.
419 </para>
420 </chapter>
421
422 <chapter id="hardware-scan-offload">
423 <title>Hardware scan offload</title>
424 <para>TBD</para>
425!Finclude/net/mac80211.h ieee80211_scan_completed
426 </chapter>
427 </part>
428
429 <part id="rate-control">
430 <title>Rate control interface</title>
431 <partintro>
432 <para>TBD</para>
433 <para>
434 This part of the book describes the rate control algorithm
435 interface and how it relates to mac80211 and drivers.
436 </para>
437 </partintro>
438 <chapter id="dummy">
439 <title>dummy chapter</title>
440 <para>TBD</para>
441 </chapter>
442 </part>
443
444 <part id="internal">
445 <title>Internals</title>
446 <partintro>
447 <para>TBD</para>
448 <para>
449 This part of the book describes mac80211 internals.
450 </para>
451 </partintro>
452
453 <chapter id="key-handling">
454 <title>Key handling</title>
455 <sect1>
456 <title>Key handling basics</title>
457!Pnet/mac80211/key.c Key handling basics
458 </sect1>
459 <sect1>
460 <title>MORE TBD</title>
461 <para>TBD</para>
462 </sect1>
463 </chapter>
464
465 <chapter id="rx-processing">
466 <title>Receive processing</title>
467 <para>TBD</para>
468 </chapter>
469
470 <chapter id="tx-processing">
471 <title>Transmit processing</title>
472 <para>TBD</para>
473 </chapter>
474
475 <chapter id="sta-info">
476 <title>Station info handling</title>
477 <sect1>
478 <title>Programming information</title>
479!Fnet/mac80211/sta_info.h sta_info
480!Fnet/mac80211/sta_info.h ieee80211_sta_info_flags
481 </sect1>
482 <sect1>
483 <title>STA information lifetime rules</title>
484!Pnet/mac80211/sta_info.c STA information lifetime rules
485 </sect1>
486 </chapter>
487
488 <chapter id="synchronisation">
489 <title>Synchronisation</title>
490 <para>TBD</para>
491 <para>Locking, lots of RCU</para>
492 </chapter>
493 </part>
494 </book>
495</set>
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 34929f24c284..8b6e00a71034 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
12 kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \ 12 kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ 13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ 14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
15 mac80211.xml debugobjects.xml sh.xml regulator.xml \ 15 80211.xml debugobjects.xml sh.xml regulator.xml \
16 alsa-driver-api.xml writing-an-alsa-driver.xml \ 16 alsa-driver-api.xml writing-an-alsa-driver.xml \
17 tracepoint.xml media.xml drm.xml 17 tracepoint.xml media.xml drm.xml
18 18
diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl
deleted file mode 100644
index affb15a344a1..000000000000
--- a/Documentation/DocBook/mac80211.tmpl
+++ /dev/null
@@ -1,337 +0,0 @@
1<?xml version="1.0" encoding="UTF-8"?>
2<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
3 "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
4
5<book id="mac80211-developers-guide">
6 <bookinfo>
7 <title>The mac80211 subsystem for kernel developers</title>
8
9 <authorgroup>
10 <author>
11 <firstname>Johannes</firstname>
12 <surname>Berg</surname>
13 <affiliation>
14 <address><email>johannes@sipsolutions.net</email></address>
15 </affiliation>
16 </author>
17 </authorgroup>
18
19 <copyright>
20 <year>2007-2009</year>
21 <holder>Johannes Berg</holder>
22 </copyright>
23
24 <legalnotice>
25 <para>
26 This documentation is free software; you can redistribute
27 it and/or modify it under the terms of the GNU General Public
28 License version 2 as published by the Free Software Foundation.
29 </para>
30
31 <para>
32 This documentation is distributed in the hope that it will be
33 useful, but WITHOUT ANY WARRANTY; without even the implied
34 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
35 See the GNU General Public License for more details.
36 </para>
37
38 <para>
39 You should have received a copy of the GNU General Public
40 License along with this documentation; if not, write to the Free
41 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
42 MA 02111-1307 USA
43 </para>
44
45 <para>
46 For more details see the file COPYING in the source
47 distribution of Linux.
48 </para>
49 </legalnotice>
50
51 <abstract>
52!Pinclude/net/mac80211.h Introduction
53!Pinclude/net/mac80211.h Warning
54 </abstract>
55 </bookinfo>
56
57 <toc></toc>
58
59<!--
60Generally, this document shall be ordered by increasing complexity.
61It is important to note that readers should be able to read only
62the first few sections to get a working driver and only advanced
63usage should require reading the full document.
64-->
65
66 <part>
67 <title>The basic mac80211 driver interface</title>
68 <partintro>
69 <para>
70 You should read and understand the information contained
71 within this part of the book while implementing a driver.
72 In some chapters, advanced usage is noted, that may be
73 skipped at first.
74 </para>
75 <para>
76 This part of the book only covers station and monitor mode
77 functionality, additional information required to implement
78 the other modes is covered in the second part of the book.
79 </para>
80 </partintro>
81
82 <chapter id="basics">
83 <title>Basic hardware handling</title>
84 <para>TBD</para>
85 <para>
86 This chapter shall contain information on getting a hw
87 struct allocated and registered with mac80211.
88 </para>
89 <para>
90 Since it is required to allocate rates/modes before registering
91 a hw struct, this chapter shall also contain information on setting
92 up the rate/mode structs.
93 </para>
94 <para>
95 Additionally, some discussion about the callbacks and
96 the general programming model should be in here, including
97 the definition of ieee80211_ops which will be referred to
98 a lot.
99 </para>
100 <para>
101 Finally, a discussion of hardware capabilities should be done
102 with references to other parts of the book.
103 </para>
104<!-- intentionally multiple !F lines to get proper order -->
105!Finclude/net/mac80211.h ieee80211_hw
106!Finclude/net/mac80211.h ieee80211_hw_flags
107!Finclude/net/mac80211.h SET_IEEE80211_DEV
108!Finclude/net/mac80211.h SET_IEEE80211_PERM_ADDR
109!Finclude/net/mac80211.h ieee80211_ops
110!Finclude/net/mac80211.h ieee80211_alloc_hw
111!Finclude/net/mac80211.h ieee80211_register_hw
112!Finclude/net/mac80211.h ieee80211_get_tx_led_name
113!Finclude/net/mac80211.h ieee80211_get_rx_led_name
114!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
115!Finclude/net/mac80211.h ieee80211_get_radio_led_name
116!Finclude/net/mac80211.h ieee80211_unregister_hw
117!Finclude/net/mac80211.h ieee80211_free_hw
118 </chapter>
119
120 <chapter id="phy-handling">
121 <title>PHY configuration</title>
122 <para>TBD</para>
123 <para>
124 This chapter should describe PHY handling including
125 start/stop callbacks and the various structures used.
126 </para>
127!Finclude/net/mac80211.h ieee80211_conf
128!Finclude/net/mac80211.h ieee80211_conf_flags
129 </chapter>
130
131 <chapter id="iface-handling">
132 <title>Virtual interfaces</title>
133 <para>TBD</para>
134 <para>
135 This chapter should describe virtual interface basics
136 that are relevant to the driver (VLANs, MGMT etc are not.)
137 It should explain the use of the add_iface/remove_iface
138 callbacks as well as the interface configuration callbacks.
139 </para>
140 <para>Things related to AP mode should be discussed there.</para>
141 <para>
142 Things related to supporting multiple interfaces should be
143 in the appropriate chapter, a BIG FAT note should be here about
144 this though and the recommendation to allow only a single
145 interface in STA mode at first!
146 </para>
147!Finclude/net/mac80211.h ieee80211_vif
148 </chapter>
149
150 <chapter id="rx-tx">
151 <title>Receive and transmit processing</title>
152 <sect1>
153 <title>what should be here</title>
154 <para>TBD</para>
155 <para>
156 This should describe the receive and transmit
157 paths in mac80211/the drivers as well as
158 transmit status handling.
159 </para>
160 </sect1>
161 <sect1>
162 <title>Frame format</title>
163!Pinclude/net/mac80211.h Frame format
164 </sect1>
165 <sect1>
166 <title>Packet alignment</title>
167!Pnet/mac80211/rx.c Packet alignment
168 </sect1>
169 <sect1>
170 <title>Calling into mac80211 from interrupts</title>
171!Pinclude/net/mac80211.h Calling mac80211 from interrupts
172 </sect1>
173 <sect1>
174 <title>functions/definitions</title>
175!Finclude/net/mac80211.h ieee80211_rx_status
176!Finclude/net/mac80211.h mac80211_rx_flags
177!Finclude/net/mac80211.h ieee80211_tx_info
178!Finclude/net/mac80211.h ieee80211_rx
179!Finclude/net/mac80211.h ieee80211_rx_irqsafe
180!Finclude/net/mac80211.h ieee80211_tx_status
181!Finclude/net/mac80211.h ieee80211_tx_status_irqsafe
182!Finclude/net/mac80211.h ieee80211_rts_get
183!Finclude/net/mac80211.h ieee80211_rts_duration
184!Finclude/net/mac80211.h ieee80211_ctstoself_get
185!Finclude/net/mac80211.h ieee80211_ctstoself_duration
186!Finclude/net/mac80211.h ieee80211_generic_frame_duration
187!Finclude/net/mac80211.h ieee80211_wake_queue
188!Finclude/net/mac80211.h ieee80211_stop_queue
189!Finclude/net/mac80211.h ieee80211_wake_queues
190!Finclude/net/mac80211.h ieee80211_stop_queues
191 </sect1>
192 </chapter>
193
194 <chapter id="filters">
195 <title>Frame filtering</title>
196!Pinclude/net/mac80211.h Frame filtering
197!Finclude/net/mac80211.h ieee80211_filter_flags
198 </chapter>
199 </part>
200
201 <part id="advanced">
202 <title>Advanced driver interface</title>
203 <partintro>
204 <para>
205 Information contained within this part of the book is
206 of interest only for advanced interaction of mac80211
207 with drivers to exploit more hardware capabilities and
208 improve performance.
209 </para>
210 </partintro>
211
212 <chapter id="hardware-crypto-offload">
213 <title>Hardware crypto acceleration</title>
214!Pinclude/net/mac80211.h Hardware crypto acceleration
215<!-- intentionally multiple !F lines to get proper order -->
216!Finclude/net/mac80211.h set_key_cmd
217!Finclude/net/mac80211.h ieee80211_key_conf
218!Finclude/net/mac80211.h ieee80211_key_alg
219!Finclude/net/mac80211.h ieee80211_key_flags
220 </chapter>
221
222 <chapter id="powersave">
223 <title>Powersave support</title>
224!Pinclude/net/mac80211.h Powersave support
225 </chapter>
226
227 <chapter id="beacon-filter">
228 <title>Beacon filter support</title>
229!Pinclude/net/mac80211.h Beacon filter support
230!Finclude/net/mac80211.h ieee80211_beacon_loss
231 </chapter>
232
233 <chapter id="qos">
234 <title>Multiple queues and QoS support</title>
235 <para>TBD</para>
236!Finclude/net/mac80211.h ieee80211_tx_queue_params
237 </chapter>
238
239 <chapter id="AP">
240 <title>Access point mode support</title>
241 <para>TBD</para>
242 <para>Some parts of the if_conf should be discussed here instead</para>
243 <para>
244 Insert notes about VLAN interfaces with hw crypto here or
245 in the hw crypto chapter.
246 </para>
247!Finclude/net/mac80211.h ieee80211_get_buffered_bc
248!Finclude/net/mac80211.h ieee80211_beacon_get
249 </chapter>
250
251 <chapter id="multi-iface">
252 <title>Supporting multiple virtual interfaces</title>
253 <para>TBD</para>
254 <para>
255 Note: WDS with identical MAC address should almost always be OK
256 </para>
257 <para>
258 Insert notes about having multiple virtual interfaces with
259 different MAC addresses here, note which configurations are
260 supported by mac80211, add notes about supporting hw crypto
261 with it.
262 </para>
263 </chapter>
264
265 <chapter id="hardware-scan-offload">
266 <title>Hardware scan offload</title>
267 <para>TBD</para>
268!Finclude/net/mac80211.h ieee80211_scan_completed
269 </chapter>
270 </part>
271
272 <part id="rate-control">
273 <title>Rate control interface</title>
274 <partintro>
275 <para>TBD</para>
276 <para>
277 This part of the book describes the rate control algorithm
278 interface and how it relates to mac80211 and drivers.
279 </para>
280 </partintro>
281 <chapter id="dummy">
282 <title>dummy chapter</title>
283 <para>TBD</para>
284 </chapter>
285 </part>
286
287 <part id="internal">
288 <title>Internals</title>
289 <partintro>
290 <para>TBD</para>
291 <para>
292 This part of the book describes mac80211 internals.
293 </para>
294 </partintro>
295
296 <chapter id="key-handling">
297 <title>Key handling</title>
298 <sect1>
299 <title>Key handling basics</title>
300!Pnet/mac80211/key.c Key handling basics
301 </sect1>
302 <sect1>
303 <title>MORE TBD</title>
304 <para>TBD</para>
305 </sect1>
306 </chapter>
307
308 <chapter id="rx-processing">
309 <title>Receive processing</title>
310 <para>TBD</para>
311 </chapter>
312
313 <chapter id="tx-processing">
314 <title>Transmit processing</title>
315 <para>TBD</para>
316 </chapter>
317
318 <chapter id="sta-info">
319 <title>Station info handling</title>
320 <sect1>
321 <title>Programming information</title>
322!Fnet/mac80211/sta_info.h sta_info
323!Fnet/mac80211/sta_info.h ieee80211_sta_info_flags
324 </sect1>
325 <sect1>
326 <title>STA information lifetime rules</title>
327!Pnet/mac80211/sta_info.c STA information lifetime rules
328 </sect1>
329 </chapter>
330
331 <chapter id="synchronisation">
332 <title>Synchronisation</title>
333 <para>TBD</para>
334 <para>Locking, lots of RCU</para>
335 </chapter>
336 </part>
337</book>
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index a62fdf7a6bff..271d524a4c8d 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -1,18 +1,20 @@
1DCCP protocol 1DCCP protocol
2============ 2=============
3 3
4 4
5Contents 5Contents
6======== 6========
7
8- Introduction 7- Introduction
9- Missing features 8- Missing features
10- Socket options 9- Socket options
10- Sysctl variables
11- IOCTLs
12- Other tunables
11- Notes 13- Notes
12 14
15
13Introduction 16Introduction
14============ 17============
15
16Datagram Congestion Control Protocol (DCCP) is an unreliable, connection 18Datagram Congestion Control Protocol (DCCP) is an unreliable, connection
17oriented protocol designed to solve issues present in UDP and TCP, particularly 19oriented protocol designed to solve issues present in UDP and TCP, particularly
18for real-time and multimedia (streaming) traffic. 20for real-time and multimedia (streaming) traffic.
@@ -29,9 +31,9 @@ It has a base protocol and pluggable congestion control IDs (CCIDs).
29DCCP is a Proposed Standard (RFC 2026), and the homepage for DCCP as a protocol 31DCCP is a Proposed Standard (RFC 2026), and the homepage for DCCP as a protocol
30is at http://www.ietf.org/html.charters/dccp-charter.html 32is at http://www.ietf.org/html.charters/dccp-charter.html
31 33
34
32Missing features 35Missing features
33================ 36================
34
35The Linux DCCP implementation does not currently support all the features that are 37The Linux DCCP implementation does not currently support all the features that are
36specified in RFCs 4340...42. 38specified in RFCs 4340...42.
37 39
@@ -45,7 +47,6 @@ http://linux-net.osdl.org/index.php/DCCP_Testing#Experimental_DCCP_source_tree
45 47
46Socket options 48Socket options
47============== 49==============
48
49DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of 50DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
50service codes (RFC 4340, sec. 8.1.2); if this socket option is not set, 51service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
51the socket will fall back to 0 (which means that no meaningful service code 52the socket will fall back to 0 (which means that no meaningful service code
@@ -112,6 +113,7 @@ DCCP_SOCKOPT_CCID_TX_INFO
112On unidirectional connections it is useful to close the unused half-connection 113On unidirectional connections it is useful to close the unused half-connection
113via shutdown (SHUT_WR or SHUT_RD): this will reduce per-packet processing costs. 114via shutdown (SHUT_WR or SHUT_RD): this will reduce per-packet processing costs.
114 115
116
115Sysctl variables 117Sysctl variables
116================ 118================
117Several DCCP default parameters can be managed by the following sysctls 119Several DCCP default parameters can be managed by the following sysctls
@@ -155,15 +157,30 @@ sync_ratelimit = 125 ms
155 sequence-invalid packets on the same socket (RFC 4340, 7.5.4). The unit 157 sequence-invalid packets on the same socket (RFC 4340, 7.5.4). The unit
156 of this parameter is milliseconds; a value of 0 disables rate-limiting. 158 of this parameter is milliseconds; a value of 0 disables rate-limiting.
157 159
160
158IOCTLS 161IOCTLS
159====== 162======
160FIONREAD 163FIONREAD
161 Works as in udp(7): returns in the `int' argument pointer the size of 164 Works as in udp(7): returns in the `int' argument pointer the size of
162 the next pending datagram in bytes, or 0 when no datagram is pending. 165 the next pending datagram in bytes, or 0 when no datagram is pending.
163 166
167
168Other tunables
169==============
170Per-route rto_min support
171 CCID-2 supports the RTAX_RTO_MIN per-route setting for the minimum value
172 of the RTO timer. This setting can be modified via the 'rto_min' option
173 of iproute2; for example:
174 > ip route change 10.0.0.0/24 rto_min 250j dev wlan0
175 > ip route add 10.0.0.254/32 rto_min 800j dev wlan0
176 > ip route show dev wlan0
177 CCID-3 also supports the rto_min setting: it is used to define the lower
178 bound for the expiry of the nofeedback timer. This can be useful on LANs
179 with very low RTTs (e.g., loopback, Gbit ethernet).
180
181
164Notes 182Notes
165===== 183=====
166
167DCCP does not travel through NAT successfully at present on many boxes. This is 184DCCP does not travel through NAT successfully at present on many boxes. This is
168because the checksum covers the pseudo-header as per TCP and UDP. Linux NAT 185because the checksum covers the pseudo-header as per TCP and UDP. Linux NAT
169support for DCCP has been added. 186support for DCCP has been added.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index f350c69b2bb4..c7165f4cb792 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1014,6 +1014,12 @@ conf/interface/*:
1014accept_ra - BOOLEAN 1014accept_ra - BOOLEAN
1015 Accept Router Advertisements; autoconfigure using them. 1015 Accept Router Advertisements; autoconfigure using them.
1016 1016
1017 Possible values are:
1018 0 Do not accept Router Advertisements.
1019 1 Accept Router Advertisements if forwarding is disabled.
1020 2 Overrule forwarding behaviour. Accept Router Advertisements
1021 even if forwarding is enabled.
1022
1017 Functional default: enabled if local forwarding is disabled. 1023 Functional default: enabled if local forwarding is disabled.
1018 disabled if local forwarding is enabled. 1024 disabled if local forwarding is enabled.
1019 1025
@@ -1075,7 +1081,12 @@ forwarding - BOOLEAN
1075 Note: It is recommended to have the same setting on all 1081 Note: It is recommended to have the same setting on all
1076 interfaces; mixed router/host scenarios are rather uncommon. 1082 interfaces; mixed router/host scenarios are rather uncommon.
1077 1083
1078 FALSE: 1084 Possible values are:
1085 0 Forwarding disabled
1086 1 Forwarding enabled
1087 2 Forwarding enabled (Hybrid Mode)
1088
1089 FALSE (0):
1079 1090
1080 By default, Host behaviour is assumed. This means: 1091 By default, Host behaviour is assumed. This means:
1081 1092
@@ -1085,18 +1096,24 @@ forwarding - BOOLEAN
1085 Advertisements (and do autoconfiguration). 1096 Advertisements (and do autoconfiguration).
1086 4. If accept_redirects is TRUE (default), accept Redirects. 1097 4. If accept_redirects is TRUE (default), accept Redirects.
1087 1098
1088 TRUE: 1099 TRUE (1):
1089 1100
1090 If local forwarding is enabled, Router behaviour is assumed. 1101 If local forwarding is enabled, Router behaviour is assumed.
1091 This means exactly the reverse from the above: 1102 This means exactly the reverse from the above:
1092 1103
1093 1. IsRouter flag is set in Neighbour Advertisements. 1104 1. IsRouter flag is set in Neighbour Advertisements.
1094 2. Router Solicitations are not sent. 1105 2. Router Solicitations are not sent.
1095 3. Router Advertisements are ignored. 1106 3. Router Advertisements are ignored unless accept_ra is 2.
1096 4. Redirects are ignored. 1107 4. Redirects are ignored.
1097 1108
1098 Default: FALSE if global forwarding is disabled (default), 1109 TRUE (2):
1099 otherwise TRUE. 1110
1111 Hybrid mode. Same behaviour as TRUE, except for:
1112
1113 2. Router Solicitations are being sent when necessary.
1114
1115 Default: 0 (disabled) if global forwarding is disabled (default),
1116 otherwise 1 (enabled).
1100 1117
1101hop_limit - INTEGER 1118hop_limit - INTEGER
1102 Default Hop Limit to set. 1119 Default Hop Limit to set.
diff --git a/Documentation/networking/phonet.txt b/Documentation/networking/phonet.txt
index 6e8ce09f9c73..cf76608a2d35 100644
--- a/Documentation/networking/phonet.txt
+++ b/Documentation/networking/phonet.txt
@@ -112,6 +112,22 @@ However, connect() and getpeername() are not supported, as they did
112not seem useful with Phonet usages (could be added easily). 112not seem useful with Phonet usages (could be added easily).
113 113
114 114
115Resource subscription
116---------------------
117
118A Phonet datagram socket can be subscribed to any number of 8-bits
119Phonet resources, as follow:
120
121 uint32_t res = 0xXX;
122 ioctl(fd, SIOCPNADDRESOURCE, &res);
123
124Subscription is similarly cancelled using the SIOCPNDELRESOURCE I/O
125control request, or when the socket is closed.
126
127Note that no more than one socket can be subcribed to any given
128resource at a time. If not, ioctl() will return EBUSY.
129
130
115Phonet Pipe protocol 131Phonet Pipe protocol
116-------------------- 132--------------------
117 133
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index e8c8f4f06c67..98097d8cb910 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -172,15 +172,19 @@ struct skb_shared_hwtstamps {
172}; 172};
173 173
174Time stamps for outgoing packets are to be generated as follows: 174Time stamps for outgoing packets are to be generated as follows:
175- In hard_start_xmit(), check if skb_tx(skb)->hardware is set no-zero. 175- In hard_start_xmit(), check if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
176 If yes, then the driver is expected to do hardware time stamping. 176 is set no-zero. If yes, then the driver is expected to do hardware time
177 stamping.
177- If this is possible for the skb and requested, then declare 178- If this is possible for the skb and requested, then declare
178 that the driver is doing the time stamping by setting the field 179 that the driver is doing the time stamping by setting the flag
179 skb_tx(skb)->in_progress non-zero. You might want to keep a pointer 180 SKBTX_IN_PROGRESS in skb_shinfo(skb)->tx_flags , e.g. with
180 to the associated skb for the next step and not free the skb. A driver 181
181 not supporting hardware time stamping doesn't do that. A driver must 182 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
182 never touch sk_buff::tstamp! It is used to store software generated 183
183 time stamps by the network subsystem. 184 You might want to keep a pointer to the associated skb for the next step
185 and not free the skb. A driver not supporting hardware time stamping doesn't
186 do that. A driver must never touch sk_buff::tstamp! It is used to store
187 software generated time stamps by the network subsystem.
184- As soon as the driver has sent the packet and/or obtained a 188- As soon as the driver has sent the packet and/or obtained a
185 hardware time stamp for it, it passes the time stamp back by 189 hardware time stamp for it, it passes the time stamp back by
186 calling skb_hwtstamp_tx() with the original skb, the raw 190 calling skb_hwtstamp_tx() with the original skb, the raw
@@ -191,6 +195,6 @@ Time stamps for outgoing packets are to be generated as follows:
191 this would occur at a later time in the processing pipeline than other 195 this would occur at a later time in the processing pipeline than other
192 software time stamping and therefore could lead to unexpected deltas 196 software time stamping and therefore could lead to unexpected deltas
193 between time stamps. 197 between time stamps.
194- If the driver did not call set skb_tx(skb)->in_progress, then 198- If the driver did not set the SKBTX_IN_PROGRESS flag (see above), then
195 dev_hard_start_xmit() checks whether software time stamping 199 dev_hard_start_xmit() checks whether software time stamping
196 is wanted as fallback and potentially generates the time stamp. 200 is wanted as fallback and potentially generates the time stamp.
diff --git a/MAINTAINERS b/MAINTAINERS
index 44e659530910..3168d0cbd358 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1120,6 +1120,13 @@ W: http://wireless.kernel.org/en/users/Drivers/ar9170
1120S: Maintained 1120S: Maintained
1121F: drivers/net/wireless/ath/ar9170/ 1121F: drivers/net/wireless/ath/ar9170/
1122 1122
1123CARL9170 LINUX COMMUNITY WIRELESS DRIVER
1124M: Christian Lamparter <chunkeey@googlemail.com>
1125L: linux-wireless@vger.kernel.org
1126W: http://wireless.kernel.org/en/users/Drivers/carl9170
1127S: Maintained
1128F: drivers/net/wireless/ath/carl9170/
1129
1123ATK0110 HWMON DRIVER 1130ATK0110 HWMON DRIVER
1124M: Luca Tettamanti <kronos.it@gmail.com> 1131M: Luca Tettamanti <kronos.it@gmail.com>
1125L: lm-sensors@lm-sensors.org 1132L: lm-sensors@lm-sensors.org
@@ -1398,6 +1405,13 @@ L: linux-scsi@vger.kernel.org
1398S: Supported 1405S: Supported
1399F: drivers/scsi/bfa/ 1406F: drivers/scsi/bfa/
1400 1407
1408BROCADE BNA 10 GIGABIT ETHERNET DRIVER
1409M: Rasesh Mody <rmody@brocade.com>
1410M: Debashis Dutt <ddutt@brocade.com>
1411L: netdev@vger.kernel.org
1412S: Supported
1413F: drivers/net/bna/
1414
1401BSG (block layer generic sg v4 driver) 1415BSG (block layer generic sg v4 driver)
1402M: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> 1416M: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
1403L: linux-scsi@vger.kernel.org 1417L: linux-scsi@vger.kernel.org
@@ -2881,6 +2895,12 @@ M: Brian King <brking@us.ibm.com>
2881S: Supported 2895S: Supported
2882F: drivers/scsi/ipr.* 2896F: drivers/scsi/ipr.*
2883 2897
2898IBM Power Virtual Ethernet Device Driver
2899M: Santiago Leon <santil@linux.vnet.ibm.com>
2900L: netdev@vger.kernel.org
2901S: Supported
2902F: drivers/net/ibmveth.*
2903
2884IBM ServeRAID RAID DRIVER 2904IBM ServeRAID RAID DRIVER
2885P: Jack Hammer 2905P: Jack Hammer
2886M: Dave Jeffery <ipslinux@adaptec.com> 2906M: Dave Jeffery <ipslinux@adaptec.com>
@@ -4328,13 +4348,12 @@ F: Documentation/filesystems/dlmfs.txt
4328F: fs/ocfs2/ 4348F: fs/ocfs2/
4329 4349
4330ORINOCO DRIVER 4350ORINOCO DRIVER
4331M: Pavel Roskin <proski@gnu.org>
4332M: David Gibson <hermes@gibson.dropbear.id.au>
4333L: linux-wireless@vger.kernel.org 4351L: linux-wireless@vger.kernel.org
4334L: orinoco-users@lists.sourceforge.net 4352L: orinoco-users@lists.sourceforge.net
4335L: orinoco-devel@lists.sourceforge.net 4353L: orinoco-devel@lists.sourceforge.net
4354W: http://linuxwireless.org/en/users/Drivers/orinoco
4336W: http://www.nongnu.org/orinoco/ 4355W: http://www.nongnu.org/orinoco/
4337S: Maintained 4356S: Orphan
4338F: drivers/net/wireless/orinoco/ 4357F: drivers/net/wireless/orinoco/
4339 4358
4340OSD LIBRARY and FILESYSTEM 4359OSD LIBRARY and FILESYSTEM
@@ -6400,7 +6419,7 @@ S: Maintained
6400F: drivers/input/misc/wistron_btns.c 6419F: drivers/input/misc/wistron_btns.c
6401 6420
6402WL1251 WIRELESS DRIVER 6421WL1251 WIRELESS DRIVER
6403M: Kalle Valo <kalle.valo@iki.fi> 6422M: Kalle Valo <kvalo@adurom.com>
6404L: linux-wireless@vger.kernel.org 6423L: linux-wireless@vger.kernel.org
6405W: http://wireless.kernel.org 6424W: http://wireless.kernel.org
6406T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git 6425T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
@@ -6415,6 +6434,7 @@ W: http://wireless.kernel.org
6415T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git 6434T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
6416S: Maintained 6435S: Maintained
6417F: drivers/net/wireless/wl12xx/wl1271* 6436F: drivers/net/wireless/wl12xx/wl1271*
6437F: include/linux/wl12xx.h
6418 6438
6419WL3501 WIRELESS PCMCIA CARD DRIVER 6439WL3501 WIRELESS PCMCIA CARD DRIVER
6420M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 6440M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
@@ -6559,6 +6579,20 @@ M: "Maciej W. Rozycki" <macro@linux-mips.org>
6559S: Maintained 6579S: Maintained
6560F: drivers/serial/zs.* 6580F: drivers/serial/zs.*
6561 6581
6582GRE DEMULTIPLEXER DRIVER
6583M: Dmitry Kozlov <xeb@mail.ru>
6584L: netdev@vger.kernel.org
6585S: Maintained
6586F: net/ipv4/gre.c
6587F: include/net/gre.h
6588
6589PPTP DRIVER
6590M: Dmitry Kozlov <xeb@mail.ru>
6591L: netdev@vger.kernel.org
6592S: Maintained
6593F: drivers/net/pptp.c
6594W: http://sourceforge.net/projects/accel-pptp
6595
6562THE REST 6596THE REST
6563M: Linus Torvalds <torvalds@linux-foundation.org> 6597M: Linus Torvalds <torvalds@linux-foundation.org>
6564L: linux-kernel@vger.kernel.org 6598L: linux-kernel@vger.kernel.org
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index dd3af2be13be..7ea1eb4a26b4 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -25,7 +25,7 @@
25#include <linux/spi/ads7846.h> 25#include <linux/spi/ads7846.h>
26#include <linux/regulator/machine.h> 26#include <linux/regulator/machine.h>
27#include <linux/i2c/twl.h> 27#include <linux/i2c/twl.h>
28#include <linux/spi/wl12xx.h> 28#include <linux/wl12xx.h>
29#include <linux/mtd/partitions.h> 29#include <linux/mtd/partitions.h>
30#include <linux/mtd/nand.h> 30#include <linux/mtd/nand.h>
31#include <linux/leds.h> 31#include <linux/leds.h>
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 9a5eb87425fc..ce28a851dcd3 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -14,7 +14,7 @@
14#include <linux/input.h> 14#include <linux/input.h>
15#include <linux/input/matrix_keypad.h> 15#include <linux/input/matrix_keypad.h>
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <linux/spi/wl12xx.h> 17#include <linux/wl12xx.h>
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/i2c/twl.h> 19#include <linux/i2c/twl.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 6b3984964cc5..6aa0728fa15d 100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -16,6 +16,8 @@
16#include <linux/gpio.h> 16#include <linux/gpio.h>
17#include <linux/i2c/twl.h> 17#include <linux/i2c/twl.h>
18#include <linux/regulator/machine.h> 18#include <linux/regulator/machine.h>
19#include <linux/regulator/fixed.h>
20#include <linux/wl12xx.h>
19 21
20#include <asm/mach-types.h> 22#include <asm/mach-types.h>
21#include <asm/mach/arch.h> 23#include <asm/mach/arch.h>
@@ -27,6 +29,9 @@
27#include "mux.h" 29#include "mux.h"
28#include "hsmmc.h" 30#include "hsmmc.h"
29 31
32#define OMAP_ZOOM_WLAN_PMENA_GPIO (101)
33#define OMAP_ZOOM_WLAN_IRQ_GPIO (162)
34
30/* Zoom2 has Qwerty keyboard*/ 35/* Zoom2 has Qwerty keyboard*/
31static int board_keymap[] = { 36static int board_keymap[] = {
32 KEY(0, 0, KEY_E), 37 KEY(0, 0, KEY_E),
@@ -106,6 +111,11 @@ static struct regulator_consumer_supply zoom_vmmc2_supply = {
106 .supply = "vmmc", 111 .supply = "vmmc",
107}; 112};
108 113
114static struct regulator_consumer_supply zoom_vmmc3_supply = {
115 .supply = "vmmc",
116 .dev_name = "mmci-omap-hs.2",
117};
118
109/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */ 119/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
110static struct regulator_init_data zoom_vmmc1 = { 120static struct regulator_init_data zoom_vmmc1 = {
111 .constraints = { 121 .constraints = {
@@ -151,6 +161,38 @@ static struct regulator_init_data zoom_vsim = {
151 .consumer_supplies = &zoom_vsim_supply, 161 .consumer_supplies = &zoom_vsim_supply,
152}; 162};
153 163
164static struct regulator_init_data zoom_vmmc3 = {
165 .constraints = {
166 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
167 },
168 .num_consumer_supplies = 1,
169 .consumer_supplies = &zoom_vmmc3_supply,
170};
171
172static struct fixed_voltage_config zoom_vwlan = {
173 .supply_name = "vwl1271",
174 .microvolts = 1800000, /* 1.8V */
175 .gpio = OMAP_ZOOM_WLAN_PMENA_GPIO,
176 .startup_delay = 70000, /* 70msec */
177 .enable_high = 1,
178 .enabled_at_boot = 0,
179 .init_data = &zoom_vmmc3,
180};
181
182static struct platform_device omap_vwlan_device = {
183 .name = "reg-fixed-voltage",
184 .id = 1,
185 .dev = {
186 .platform_data = &zoom_vwlan,
187 },
188};
189
190struct wl12xx_platform_data omap_zoom_wlan_data __initdata = {
191 .irq = OMAP_GPIO_IRQ(OMAP_ZOOM_WLAN_IRQ_GPIO),
192 /* ZOOM ref clock is 26 MHz */
193 .board_ref_clock = 1,
194};
195
154static struct omap2_hsmmc_info mmc[] __initdata = { 196static struct omap2_hsmmc_info mmc[] __initdata = {
155 { 197 {
156 .name = "external", 198 .name = "external",
@@ -168,6 +210,14 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
168 .nonremovable = true, 210 .nonremovable = true,
169 .power_saving = true, 211 .power_saving = true,
170 }, 212 },
213 {
214 .name = "wl1271",
215 .mmc = 3,
216 .wires = 4,
217 .gpio_wp = -EINVAL,
218 .gpio_cd = -EINVAL,
219 .nonremovable = true,
220 },
171 {} /* Terminator */ 221 {} /* Terminator */
172}; 222};
173 223
@@ -279,7 +329,11 @@ static void enable_board_wakeup_source(void)
279 329
280void __init zoom_peripherals_init(void) 330void __init zoom_peripherals_init(void)
281{ 331{
332 if (wl12xx_set_platform_data(&omap_zoom_wlan_data))
333 pr_err("error setting wl12xx data\n");
334
282 omap_i2c_init(); 335 omap_i2c_init();
336 platform_device_register(&omap_vwlan_device);
283 usb_musb_init(&musb_board_data); 337 usb_musb_init(&musb_board_data);
284 enable_board_wakeup_source(); 338 enable_board_wakeup_source();
285} 339}
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 2ba630276295..46e96bc1f5a1 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -360,6 +360,7 @@ struct qdio_initialize {
360 unsigned int no_output_qs; 360 unsigned int no_output_qs;
361 qdio_handler_t *input_handler; 361 qdio_handler_t *input_handler;
362 qdio_handler_t *output_handler; 362 qdio_handler_t *output_handler;
363 void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
363 unsigned long int_parm; 364 unsigned long int_parm;
364 void **input_sbal_addr_array; 365 void **input_sbal_addr_array;
365 void **output_sbal_addr_array; 366 void **output_sbal_addr_array;
@@ -377,11 +378,13 @@ struct qdio_initialize {
377extern int qdio_allocate(struct qdio_initialize *); 378extern int qdio_allocate(struct qdio_initialize *);
378extern int qdio_establish(struct qdio_initialize *); 379extern int qdio_establish(struct qdio_initialize *);
379extern int qdio_activate(struct ccw_device *); 380extern int qdio_activate(struct ccw_device *);
380 381extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
381extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 382 unsigned int);
382 int q_nr, unsigned int bufnr, unsigned int count); 383extern int qdio_start_irq(struct ccw_device *, int);
383extern int qdio_shutdown(struct ccw_device*, int); 384extern int qdio_stop_irq(struct ccw_device *, int);
385extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
386extern int qdio_shutdown(struct ccw_device *, int);
384extern int qdio_free(struct ccw_device *); 387extern int qdio_free(struct ccw_device *);
385extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*); 388extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
386 389
387#endif /* __QDIO_H__ */ 390#endif /* __QDIO_H__ */
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 8717809787fb..5d86bb803e94 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -444,8 +444,8 @@ static inline void fs_kfree_skb (struct sk_buff * skb)
444#define ROUND_NEAREST 3 444#define ROUND_NEAREST 3
445/********** make rate (not quite as much fun as Horizon) **********/ 445/********** make rate (not quite as much fun as Horizon) **********/
446 446
447static unsigned int make_rate (unsigned int rate, int r, 447static int make_rate(unsigned int rate, int r,
448 u16 * bits, unsigned int * actual) 448 u16 *bits, unsigned int *actual)
449{ 449{
450 unsigned char exp = -1; /* hush gcc */ 450 unsigned char exp = -1; /* hush gcc */
451 unsigned int man = -1; /* hush gcc */ 451 unsigned int man = -1; /* hush gcc */
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 54720baa7363..a95790452a68 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -1645,10 +1645,8 @@ static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
1645 unsigned short d = 0; 1645 unsigned short d = 0;
1646 char * s = skb->data; 1646 char * s = skb->data;
1647 if (*s++ == 'D') { 1647 if (*s++ == 'D') {
1648 for (i = 0; i < 4; ++i) { 1648 for (i = 0; i < 4; ++i)
1649 d = (d<<4) | ((*s <= '9') ? (*s - '0') : (*s - 'a' + 10)); 1649 d = (d << 4) | hex_to_bin(*s++);
1650 ++s;
1651 }
1652 PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d); 1650 PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d);
1653 } 1651 }
1654 } 1652 }
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ee9ddeb53417..8b358d7d958f 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -220,7 +220,7 @@ static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
220 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) { 220 while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
221 dev->ffL.tcq_rd += 2; 221 dev->ffL.tcq_rd += 2;
222 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 222 if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
223 dev->ffL.tcq_rd = dev->ffL.tcq_st; 223 dev->ffL.tcq_rd = dev->ffL.tcq_st;
224 if (dev->ffL.tcq_rd == dev->host_tcq_wr) 224 if (dev->ffL.tcq_rd == dev->host_tcq_wr)
225 return 0xFFFF; 225 return 0xFFFF;
226 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd); 226 desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 33f8421c71cc..18fdd9703b48 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -8,7 +8,6 @@
8 8
9#include <linux/bug.h> 9#include <linux/bug.h>
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/ethtool.h>
12#include <linux/firewire.h> 11#include <linux/firewire.h>
13#include <linux/firewire-constants.h> 12#include <linux/firewire-constants.h>
14#include <linux/highmem.h> 13#include <linux/highmem.h>
@@ -1361,17 +1360,6 @@ static int fwnet_change_mtu(struct net_device *net, int new_mtu)
1361 return 0; 1360 return 0;
1362} 1361}
1363 1362
1364static void fwnet_get_drvinfo(struct net_device *net,
1365 struct ethtool_drvinfo *info)
1366{
1367 strcpy(info->driver, KBUILD_MODNAME);
1368 strcpy(info->bus_info, "ieee1394");
1369}
1370
1371static const struct ethtool_ops fwnet_ethtool_ops = {
1372 .get_drvinfo = fwnet_get_drvinfo,
1373};
1374
1375static const struct net_device_ops fwnet_netdev_ops = { 1363static const struct net_device_ops fwnet_netdev_ops = {
1376 .ndo_open = fwnet_open, 1364 .ndo_open = fwnet_open,
1377 .ndo_stop = fwnet_stop, 1365 .ndo_stop = fwnet_stop,
@@ -1390,7 +1378,6 @@ static void fwnet_init_dev(struct net_device *net)
1390 net->hard_header_len = FWNET_HLEN; 1378 net->hard_header_len = FWNET_HLEN;
1391 net->type = ARPHRD_IEEE1394; 1379 net->type = ARPHRD_IEEE1394;
1392 net->tx_queue_len = 10; 1380 net->tx_queue_len = 10;
1393 SET_ETHTOOL_OPS(net, &fwnet_ethtool_ops);
1394} 1381}
1395 1382
1396/* caller must hold fwnet_device_mutex */ 1383/* caller must hold fwnet_device_mutex */
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index bc289e367e30..63403822330e 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -58,7 +58,6 @@
58#include <linux/tcp.h> 58#include <linux/tcp.h>
59#include <linux/skbuff.h> 59#include <linux/skbuff.h>
60#include <linux/bitops.h> 60#include <linux/bitops.h>
61#include <linux/ethtool.h>
62#include <asm/uaccess.h> 61#include <asm/uaccess.h>
63#include <asm/delay.h> 62#include <asm/delay.h>
64#include <asm/unaligned.h> 63#include <asm/unaligned.h>
@@ -173,8 +172,6 @@ static netdev_tx_t ether1394_tx(struct sk_buff *skb,
173 struct net_device *dev); 172 struct net_device *dev);
174static void ether1394_iso(struct hpsb_iso *iso); 173static void ether1394_iso(struct hpsb_iso *iso);
175 174
176static const struct ethtool_ops ethtool_ops;
177
178static int ether1394_write(struct hpsb_host *host, int srcid, int destid, 175static int ether1394_write(struct hpsb_host *host, int srcid, int destid,
179 quadlet_t *data, u64 addr, size_t len, u16 flags); 176 quadlet_t *data, u64 addr, size_t len, u16 flags);
180static void ether1394_add_host(struct hpsb_host *host); 177static void ether1394_add_host(struct hpsb_host *host);
@@ -525,8 +522,6 @@ static void ether1394_init_dev(struct net_device *dev)
525 dev->header_ops = &ether1394_header_ops; 522 dev->header_ops = &ether1394_header_ops;
526 dev->netdev_ops = &ether1394_netdev_ops; 523 dev->netdev_ops = &ether1394_netdev_ops;
527 524
528 SET_ETHTOOL_OPS(dev, &ethtool_ops);
529
530 dev->watchdog_timeo = ETHER1394_TIMEOUT; 525 dev->watchdog_timeo = ETHER1394_TIMEOUT;
531 dev->flags = IFF_BROADCAST | IFF_MULTICAST; 526 dev->flags = IFF_BROADCAST | IFF_MULTICAST;
532 dev->features = NETIF_F_HIGHDMA; 527 dev->features = NETIF_F_HIGHDMA;
@@ -1695,17 +1690,6 @@ fail:
1695 return NETDEV_TX_OK; 1690 return NETDEV_TX_OK;
1696} 1691}
1697 1692
1698static void ether1394_get_drvinfo(struct net_device *dev,
1699 struct ethtool_drvinfo *info)
1700{
1701 strcpy(info->driver, driver_name);
1702 strcpy(info->bus_info, "ieee1394"); /* FIXME provide more detail? */
1703}
1704
1705static const struct ethtool_ops ethtool_ops = {
1706 .get_drvinfo = ether1394_get_drvinfo
1707};
1708
1709static int __init ether1394_init_module(void) 1693static int __init ether1394_init_module(void)
1710{ 1694{
1711 int err; 1695 int err;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 2978bdaa6b88..e54e79d4e2c1 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -1515,8 +1515,13 @@ static int decodeFVteln(char *teln, unsigned long *bmaskp, int *activep)
1515 while (*s) { 1515 while (*s) {
1516 int digit1 = 0; 1516 int digit1 = 0;
1517 int digit2 = 0; 1517 int digit2 = 0;
1518 if (!isdigit(*s)) return -3; 1518 char *endp;
1519 while (isdigit(*s)) { digit1 = digit1*10 + (*s - '0'); s++; } 1519
1520 digit1 = simple_strtoul(s, &endp, 10);
1521 if (s == endp)
1522 return -3;
1523 s = endp;
1524
1520 if (digit1 <= 0 || digit1 > 30) return -4; 1525 if (digit1 <= 0 || digit1 > 30) return -4;
1521 if (*s == 0 || *s == ',' || *s == ' ') { 1526 if (*s == 0 || *s == ',' || *s == ' ') {
1522 bmask |= (1 << digit1); 1527 bmask |= (1 << digit1);
@@ -1526,8 +1531,12 @@ static int decodeFVteln(char *teln, unsigned long *bmaskp, int *activep)
1526 } 1531 }
1527 if (*s != '-') return -5; 1532 if (*s != '-') return -5;
1528 s++; 1533 s++;
1529 if (!isdigit(*s)) return -3; 1534
1530 while (isdigit(*s)) { digit2 = digit2*10 + (*s - '0'); s++; } 1535 digit2 = simple_strtoul(s, &endp, 10);
1536 if (s == endp)
1537 return -3;
1538 s = endp;
1539
1531 if (digit2 <= 0 || digit2 > 30) return -4; 1540 if (digit2 <= 0 || digit2 > 30) return -4;
1532 if (*s == 0 || *s == ',' || *s == ' ') { 1541 if (*s == 0 || *s == ',' || *s == ' ') {
1533 if (digit1 > digit2) 1542 if (digit1 > digit2)
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 70cf6bac7a5a..48e6d220f62c 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -77,7 +77,7 @@ static void deflect_timer_expire(ulong arg)
77 77
78 case DEFLECT_ALERT: 78 case DEFLECT_ALERT:
79 cs->ics.command = ISDN_CMD_REDIR; /* protocol */ 79 cs->ics.command = ISDN_CMD_REDIR; /* protocol */
80 strcpy(cs->ics.parm.setup.phone,cs->deflect_dest); 80 strlcpy(cs->ics.parm.setup.phone, cs->deflect_dest, sizeof(cs->ics.parm.setup.phone));
81 strcpy(cs->ics.parm.setup.eazmsn,"Testtext delayed"); 81 strcpy(cs->ics.parm.setup.eazmsn,"Testtext delayed");
82 divert_if.ll_cmd(&cs->ics); 82 divert_if.ll_cmd(&cs->ics);
83 spin_lock_irqsave(&divert_lock, flags); 83 spin_lock_irqsave(&divert_lock, flags);
@@ -251,7 +251,7 @@ int deflect_extern_action(u_char cmd, ulong callid, char *to_nr)
251 251
252 case 2: /* redir */ 252 case 2: /* redir */
253 del_timer(&cs->timer); 253 del_timer(&cs->timer);
254 strcpy(cs->ics.parm.setup.phone, to_nr); 254 strlcpy(cs->ics.parm.setup.phone, to_nr, sizeof(cs->ics.parm.setup.phone));
255 strcpy(cs->ics.parm.setup.eazmsn, "Testtext manual"); 255 strcpy(cs->ics.parm.setup.eazmsn, "Testtext manual");
256 ic.command = ISDN_CMD_REDIR; 256 ic.command = ISDN_CMD_REDIR;
257 if ((i = divert_if.ll_cmd(&ic))) 257 if ((i = divert_if.ll_cmd(&ic)))
@@ -480,7 +480,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
480 if (!cs->timer.expires) 480 if (!cs->timer.expires)
481 { strcpy(ic->parm.setup.eazmsn,"Testtext direct"); 481 { strcpy(ic->parm.setup.eazmsn,"Testtext direct");
482 ic->parm.setup.screen = dv->rule.screen; 482 ic->parm.setup.screen = dv->rule.screen;
483 strcpy(ic->parm.setup.phone,dv->rule.to_nr); 483 strlcpy(ic->parm.setup.phone, dv->rule.to_nr, sizeof(ic->parm.setup.phone));
484 cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */ 484 cs->akt_state = DEFLECT_AUTODEL; /* delete after timeout */
485 cs->timer.expires = jiffies + (HZ * AUTODEL_TIME); 485 cs->timer.expires = jiffies + (HZ * AUTODEL_TIME);
486 retval = 5; 486 retval = 5;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index be5faf4aa868..5aa138eb0b3c 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -234,13 +234,14 @@ read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max)
234 count++; 234 count++;
235 if (count > trans_max) 235 if (count > trans_max)
236 count = trans_max; /* limit length */ 236 count = trans_max; /* limit length */
237 if ((skb = dev_alloc_skb(count))) { 237 skb = dev_alloc_skb(count);
238 dst = skb_put(skb, count); 238 if (skb) {
239 while (count--) 239 dst = skb_put(skb, count);
240 while (count--)
240 *dst++ = Read_hfc(cs, HFCSX_FIF_DRD); 241 *dst++ = Read_hfc(cs, HFCSX_FIF_DRD);
241 return(skb); 242 return skb;
242 } 243 } else
243 else return(NULL); /* no memory */ 244 return NULL; /* no memory */
244 } 245 }
245 246
246 do { 247 do {
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 51dc60da333b..c463162843ba 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -14,7 +14,7 @@
14#include <linux/isdn.h> 14#include <linux/isdn.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/smp_lock.h> 17#include <linux/mutex.h>
18#include "isdn_common.h" 18#include "isdn_common.h"
19#include "isdn_tty.h" 19#include "isdn_tty.h"
20#ifdef CONFIG_ISDN_AUDIO 20#ifdef CONFIG_ISDN_AUDIO
@@ -28,6 +28,7 @@
28 28
29/* Prototypes */ 29/* Prototypes */
30 30
31static DEFINE_MUTEX(modem_info_mutex);
31static int isdn_tty_edit_at(const char *, int, modem_info *); 32static int isdn_tty_edit_at(const char *, int, modem_info *);
32static void isdn_tty_check_esc(const u_char *, u_char, int, int *, u_long *); 33static void isdn_tty_check_esc(const u_char *, u_char, int, int *, u_long *);
33static void isdn_tty_modem_reset_regs(modem_info *, int); 34static void isdn_tty_modem_reset_regs(modem_info *, int);
@@ -1354,14 +1355,14 @@ isdn_tty_tiocmget(struct tty_struct *tty, struct file *file)
1354 if (tty->flags & (1 << TTY_IO_ERROR)) 1355 if (tty->flags & (1 << TTY_IO_ERROR))
1355 return -EIO; 1356 return -EIO;
1356 1357
1357 lock_kernel(); 1358 mutex_lock(&modem_info_mutex);
1358#ifdef ISDN_DEBUG_MODEM_IOCTL 1359#ifdef ISDN_DEBUG_MODEM_IOCTL
1359 printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line); 1360 printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line);
1360#endif 1361#endif
1361 1362
1362 control = info->mcr; 1363 control = info->mcr;
1363 status = info->msr; 1364 status = info->msr;
1364 unlock_kernel(); 1365 mutex_unlock(&modem_info_mutex);
1365 return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) 1366 return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
1366 | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) 1367 | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
1367 | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) 1368 | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
@@ -1385,7 +1386,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
1385 printk(KERN_DEBUG "ttyI%d ioctl TIOCMxxx: %x %x\n", info->line, set, clear); 1386 printk(KERN_DEBUG "ttyI%d ioctl TIOCMxxx: %x %x\n", info->line, set, clear);
1386#endif 1387#endif
1387 1388
1388 lock_kernel(); 1389 mutex_lock(&modem_info_mutex);
1389 if (set & TIOCM_RTS) 1390 if (set & TIOCM_RTS)
1390 info->mcr |= UART_MCR_RTS; 1391 info->mcr |= UART_MCR_RTS;
1391 if (set & TIOCM_DTR) { 1392 if (set & TIOCM_DTR) {
@@ -1407,7 +1408,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
1407 isdn_tty_modem_hup(info, 1); 1408 isdn_tty_modem_hup(info, 1);
1408 } 1409 }
1409 } 1410 }
1410 unlock_kernel(); 1411 mutex_unlock(&modem_info_mutex);
1411 return 0; 1412 return 0;
1412} 1413}
1413 1414
@@ -3515,7 +3516,7 @@ isdn_tty_parse_at(modem_info * info)
3515{ 3516{
3516 atemu *m = &info->emu; 3517 atemu *m = &info->emu;
3517 char *p; 3518 char *p;
3518 char ds[40]; 3519 char ds[ISDN_MSNLEN];
3519 3520
3520#ifdef ISDN_DEBUG_AT 3521#ifdef ISDN_DEBUG_AT
3521 printk(KERN_DEBUG "AT: '%s'\n", m->mdmcmd); 3522 printk(KERN_DEBUG "AT: '%s'\n", m->mdmcmd);
@@ -3594,7 +3595,7 @@ isdn_tty_parse_at(modem_info * info)
3594 break; 3595 break;
3595 case '3': 3596 case '3':
3596 p++; 3597 p++;
3597 sprintf(ds, "\r\n%d", info->emu.charge); 3598 snprintf(ds, sizeof(ds), "\r\n%d", info->emu.charge);
3598 isdn_tty_at_cout(ds, info); 3599 isdn_tty_at_cout(ds, info);
3599 break; 3600 break;
3600 default:; 3601 default:;
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 713ef2b805a2..76d9e673b4e1 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -1237,6 +1237,7 @@ dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb)
1237 if (dsp->cmx_delay) 1237 if (dsp->cmx_delay)
1238 dsp->rx_W = (dsp->rx_R + dsp->cmx_delay) 1238 dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
1239 & CMX_BUFF_MASK; 1239 & CMX_BUFF_MASK;
1240 else
1240 dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1)) 1241 dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1))
1241 & CMX_BUFF_MASK; 1242 & CMX_BUFF_MASK;
1242 } else { 1243 } else {
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 22f38e48ac4e..5b59796ed250 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -972,7 +972,7 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
972 if (debug & DEBUG_L1OIP_SOCKET) 972 if (debug & DEBUG_L1OIP_SOCKET)
973 printk(KERN_DEBUG "%s: got new ip address from user " 973 printk(KERN_DEBUG "%s: got new ip address from user "
974 "space.\n", __func__); 974 "space.\n", __func__);
975 l1oip_socket_open(hc); 975 l1oip_socket_open(hc);
976 break; 976 break;
977 case MISDN_CTRL_UNSETPEER: 977 case MISDN_CTRL_UNSETPEER:
978 if (debug & DEBUG_L1OIP_SOCKET) 978 if (debug & DEBUG_L1OIP_SOCKET)
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index b159bd59e64e..a5b632e67552 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -18,7 +18,6 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/mISDNif.h> 19#include <linux/mISDNif.h>
20#include <linux/kthread.h> 20#include <linux/kthread.h>
21#include <linux/smp_lock.h>
22#include "core.h" 21#include "core.h"
23 22
24static u_int *debug; 23static u_int *debug;
@@ -205,13 +204,7 @@ mISDNStackd(void *data)
205 struct mISDNstack *st = data; 204 struct mISDNstack *st = data;
206 int err = 0; 205 int err = 0;
207 206
208#ifdef CONFIG_SMP
209 lock_kernel();
210#endif
211 sigfillset(&current->blocked); 207 sigfillset(&current->blocked);
212#ifdef CONFIG_SMP
213 unlock_kernel();
214#endif
215 if (*debug & DEBUG_MSG_THREAD) 208 if (*debug & DEBUG_MSG_THREAD)
216 printk(KERN_DEBUG "mISDNStackd %s started\n", 209 printk(KERN_DEBUG "mISDNStackd %s started\n",
217 dev_name(&st->dev->dev)); 210 dev_name(&st->dev->dev));
diff --git a/drivers/isdn/pcbit/edss1.c b/drivers/isdn/pcbit/edss1.c
index d5920ae22d73..80c9c16fd5ef 100644
--- a/drivers/isdn/pcbit/edss1.c
+++ b/drivers/isdn/pcbit/edss1.c
@@ -33,7 +33,7 @@
33#include "callbacks.h" 33#include "callbacks.h"
34 34
35 35
36char * isdn_state_table[] = { 36const char * const isdn_state_table[] = {
37 "Closed", 37 "Closed",
38 "Call initiated", 38 "Call initiated",
39 "Overlap sending", 39 "Overlap sending",
diff --git a/drivers/isdn/pcbit/edss1.h b/drivers/isdn/pcbit/edss1.h
index 0b64f97015d8..39f8346e28c5 100644
--- a/drivers/isdn/pcbit/edss1.h
+++ b/drivers/isdn/pcbit/edss1.h
@@ -90,7 +90,7 @@ struct fsm_timer_entry {
90 unsigned long timeout; /* in seconds */ 90 unsigned long timeout; /* in seconds */
91}; 91};
92 92
93extern char * isdn_state_table[]; 93extern const char * const isdn_state_table[];
94 94
95void pcbit_fsm_event(struct pcbit_dev *, struct pcbit_chan *, 95void pcbit_fsm_event(struct pcbit_dev *, struct pcbit_chan *,
96 unsigned short event, struct callb_data *); 96 unsigned short event, struct callb_data *);
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index baac246561b9..4777a1cbcd8d 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -337,10 +337,10 @@ el2_probe1(struct net_device *dev, int ioaddr)
337 /* Finish setting the board's parameters. */ 337 /* Finish setting the board's parameters. */
338 ei_status.stop_page = EL2_MB1_STOP_PG; 338 ei_status.stop_page = EL2_MB1_STOP_PG;
339 ei_status.word16 = wordlength; 339 ei_status.word16 = wordlength;
340 ei_status.reset_8390 = &el2_reset_8390; 340 ei_status.reset_8390 = el2_reset_8390;
341 ei_status.get_8390_hdr = &el2_get_8390_hdr; 341 ei_status.get_8390_hdr = el2_get_8390_hdr;
342 ei_status.block_input = &el2_block_input; 342 ei_status.block_input = el2_block_input;
343 ei_status.block_output = &el2_block_output; 343 ei_status.block_output = el2_block_output;
344 344
345 if (dev->irq == 2) 345 if (dev->irq == 2)
346 dev->irq = 9; 346 dev->irq = 9;
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 3bba835f1a21..cdf7226a7c43 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -662,7 +662,9 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
662 pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n"); 662 pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n");
663 663
664 { 664 {
665 char *ram_split[] = { "5:3", "3:1", "1:1", "3:5" }; 665 static const char * const ram_split[] = {
666 "5:3", "3:1", "1:1", "3:5"
667 };
666 __u32 config; 668 __u32 config;
667 EL3WINDOW(3); 669 EL3WINDOW(3);
668 vp->available_media = inw(ioaddr + Wn3_Options); 670 vp->available_media = inw(ioaddr + Wn3_Options);
@@ -734,7 +736,7 @@ static int corkscrew_open(struct net_device *dev)
734 init_timer(&vp->timer); 736 init_timer(&vp->timer);
735 vp->timer.expires = jiffies + media_tbl[dev->if_port].wait; 737 vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
736 vp->timer.data = (unsigned long) dev; 738 vp->timer.data = (unsigned long) dev;
737 vp->timer.function = &corkscrew_timer; /* timer handler */ 739 vp->timer.function = corkscrew_timer; /* timer handler */
738 add_timer(&vp->timer); 740 add_timer(&vp->timer);
739 } else 741 } else
740 dev->if_port = vp->default_media; 742 dev->if_port = vp->default_media;
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index a7b0e5e43a52..ca00f0a11217 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -463,7 +463,7 @@ static int __init do_elmc_probe(struct net_device *dev)
463 463
464 /* we didn't find any 3c523 in the slots we checked for */ 464 /* we didn't find any 3c523 in the slots we checked for */
465 if (slot == MCA_NOTFOUND) 465 if (slot == MCA_NOTFOUND)
466 return ((base_addr || irq) ? -ENXIO : -ENODEV); 466 return (base_addr || irq) ? -ENXIO : -ENODEV;
467 467
468 mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC"); 468 mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC");
469 mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev); 469 mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index fa42103b2874..ed964964fe1f 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1742,7 +1742,7 @@ vortex_open(struct net_device *dev)
1742 1742
1743 /* Use the now-standard shared IRQ implementation. */ 1743 /* Use the now-standard shared IRQ implementation. */
1744 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? 1744 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
1745 &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) { 1745 boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
1746 pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); 1746 pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
1747 goto err; 1747 goto err;
1748 } 1748 }
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 4a4f6b81e32d..237d4ea5a416 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -561,7 +561,7 @@ rx_status_loop:
561 if (cp_rx_csum_ok(status)) 561 if (cp_rx_csum_ok(status))
562 skb->ip_summed = CHECKSUM_UNNECESSARY; 562 skb->ip_summed = CHECKSUM_UNNECESSARY;
563 else 563 else
564 skb->ip_summed = CHECKSUM_NONE; 564 skb_checksum_none_assert(skb);
565 565
566 skb_put(skb, len); 566 skb_put(skb, len);
567 567
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2cc81a54cbf3..ef683a993dce 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2515,6 +2515,18 @@ config S6GMAC
2515 2515
2516source "drivers/net/stmmac/Kconfig" 2516source "drivers/net/stmmac/Kconfig"
2517 2517
2518config PCH_GBE
2519 tristate "PCH Gigabit Ethernet"
2520 depends on PCI
2521 ---help---
2522 This is a gigabit ethernet driver for Topcliff PCH.
2523 Topcliff PCH is the platform controller hub that is used in Intel's
2524 general embedded platform.
2525 Topcliff PCH has Gigabit Ethernet interface.
2526 Using this interface, it is able to access system devices connected
2527 to Gigabit Ethernet.
2528 This driver enables Gigabit Ethernet function.
2529
2518endif # NETDEV_1000 2530endif # NETDEV_1000
2519 2531
2520# 2532#
@@ -2869,6 +2881,20 @@ config QLGE
2869 To compile this driver as a module, choose M here: the module 2881 To compile this driver as a module, choose M here: the module
2870 will be called qlge. 2882 will be called qlge.
2871 2883
2884config BNA
2885 tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
2886 depends on PCI
2887 ---help---
2888 This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
2889 cards.
2890 To compile this driver as a module, choose M here: the module
2891 will be called bna.
2892
2893 For general information and support, go to the Brocade support
2894 website at:
2895
2896 <http://support.brocade.com>
2897
2872source "drivers/net/sfc/Kconfig" 2898source "drivers/net/sfc/Kconfig"
2873 2899
2874source "drivers/net/benet/Kconfig" 2900source "drivers/net/benet/Kconfig"
@@ -3202,6 +3228,17 @@ config PPPOE
3202 which contains instruction on how to use this driver (under 3228 which contains instruction on how to use this driver (under
3203 the heading "Kernel mode PPPoE"). 3229 the heading "Kernel mode PPPoE").
3204 3230
3231config PPTP
3232 tristate "PPP over IPv4 (PPTP) (EXPERIMENTAL)"
3233 depends on EXPERIMENTAL && PPP && NET_IPGRE_DEMUX
3234 help
3235 Support for PPP over IPv4.(Point-to-Point Tunneling Protocol)
3236
3237 This driver requires pppd plugin to work in client mode or
3238 modified pptpd (poptop) to work in server mode.
3239 See http://accel-pptp.sourceforge.net/ for information how to
3240 utilize this module.
3241
3205config PPPOATM 3242config PPPOATM
3206 tristate "PPP over ATM" 3243 tristate "PPP over ATM"
3207 depends on ATM && PPP 3244 depends on ATM && PPP
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3e8f150c4b14..b8bf93d4a132 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_ENIC) += enic/
34obj-$(CONFIG_JME) += jme.o 34obj-$(CONFIG_JME) += jme.o
35obj-$(CONFIG_BE2NET) += benet/ 35obj-$(CONFIG_BE2NET) += benet/
36obj-$(CONFIG_VMXNET3) += vmxnet3/ 36obj-$(CONFIG_VMXNET3) += vmxnet3/
37obj-$(CONFIG_BNA) += bna/
37 38
38gianfar_driver-objs := gianfar.o \ 39gianfar_driver-objs := gianfar.o \
39 gianfar_ethtool.o \ 40 gianfar_ethtool.o \
@@ -162,6 +163,7 @@ obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
162obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o 163obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
163obj-$(CONFIG_PPPOE) += pppox.o pppoe.o 164obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
164obj-$(CONFIG_PPPOL2TP) += pppox.o 165obj-$(CONFIG_PPPOL2TP) += pppox.o
166obj-$(CONFIG_PPTP) += pppox.o pptp.o
165 167
166obj-$(CONFIG_SLIP) += slip.o 168obj-$(CONFIG_SLIP) += slip.o
167obj-$(CONFIG_SLHC) += slhc.o 169obj-$(CONFIG_SLHC) += slhc.o
@@ -296,3 +298,4 @@ obj-$(CONFIG_WIMAX) += wimax/
296obj-$(CONFIG_CAIF) += caif/ 298obj-$(CONFIG_CAIF) += caif/
297 299
298obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ 300obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
301obj-$(CONFIG_PCH_GBE) += pch_gbe/
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index b9a591604e5b..41d9911202d0 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2033,7 +2033,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
2033 skb->csum = htons(csum); 2033 skb->csum = htons(csum);
2034 skb->ip_summed = CHECKSUM_COMPLETE; 2034 skb->ip_summed = CHECKSUM_COMPLETE;
2035 } else { 2035 } else {
2036 skb->ip_summed = CHECKSUM_NONE; 2036 skb_checksum_none_assert(skb);
2037 } 2037 }
2038 2038
2039 /* send it up */ 2039 /* send it up */
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 585c25f4b60c..58a0ab4923ee 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -396,7 +396,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
396 event_count = coal_conf->rx_event_count; 396 event_count = coal_conf->rx_event_count;
397 if( timeout > MAX_TIMEOUT || 397 if( timeout > MAX_TIMEOUT ||
398 event_count > MAX_EVENT_COUNT ) 398 event_count > MAX_EVENT_COUNT )
399 return -EINVAL; 399 return -EINVAL;
400 400
401 timeout = timeout * DELAY_TIMER_CONV; 401 timeout = timeout * DELAY_TIMER_CONV;
402 writel(VAL0|STINTEN, mmio+INTEN0); 402 writel(VAL0|STINTEN, mmio+INTEN0);
@@ -409,7 +409,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
409 event_count = coal_conf->tx_event_count; 409 event_count = coal_conf->tx_event_count;
410 if( timeout > MAX_TIMEOUT || 410 if( timeout > MAX_TIMEOUT ||
411 event_count > MAX_EVENT_COUNT ) 411 event_count > MAX_EVENT_COUNT )
412 return -EINVAL; 412 return -EINVAL;
413 413
414 414
415 timeout = timeout * DELAY_TIMER_CONV; 415 timeout = timeout * DELAY_TIMER_CONV;
@@ -903,18 +903,18 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
903} 903}
904 904
905/* 905/*
906This function reads the mib registers and returns the hardware statistics. It updates previous internal driver statistics with new values. 906 * This function reads the mib registers and returns the hardware statistics.
907*/ 907 * It updates previous internal driver statistics with new values.
908static struct net_device_stats *amd8111e_get_stats(struct net_device * dev) 908 */
909static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
909{ 910{
910 struct amd8111e_priv *lp = netdev_priv(dev); 911 struct amd8111e_priv *lp = netdev_priv(dev);
911 void __iomem *mmio = lp->mmio; 912 void __iomem *mmio = lp->mmio;
912 unsigned long flags; 913 unsigned long flags;
913 /* struct net_device_stats *prev_stats = &lp->prev_stats; */ 914 struct net_device_stats *new_stats = &dev->stats;
914 struct net_device_stats* new_stats = &lp->stats;
915 915
916 if(!lp->opened) 916 if (!lp->opened)
917 return &lp->stats; 917 return new_stats;
918 spin_lock_irqsave (&lp->lock, flags); 918 spin_lock_irqsave (&lp->lock, flags);
919 919
920 /* stats.rx_packets */ 920 /* stats.rx_packets */
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index ac36eb6981e3..b5926af03a7e 100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
@@ -787,7 +787,6 @@ struct amd8111e_priv{
787 struct vlan_group *vlgrp; 787 struct vlan_group *vlgrp;
788#endif 788#endif
789 char opened; 789 char opened;
790 struct net_device_stats stats;
791 unsigned int drv_rx_errors; 790 unsigned int drv_rx_errors;
792 struct amd8111e_coalesce_conf coal_conf; 791 struct amd8111e_coalesce_conf coal_conf;
793 792
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 0362c8d31a08..10d0dba572c2 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -244,7 +244,7 @@ static int ipddp_delete(struct ipddp_route *rt)
244 } 244 }
245 245
246 spin_unlock_bh(&ipddp_route_lock); 246 spin_unlock_bh(&ipddp_route_lock);
247 return (-ENOENT); 247 return -ENOENT;
248} 248}
249 249
250/* 250/*
@@ -259,10 +259,10 @@ static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
259 if(f->ip == rt->ip && 259 if(f->ip == rt->ip &&
260 f->at.s_net == rt->at.s_net && 260 f->at.s_net == rt->at.s_net &&
261 f->at.s_node == rt->at.s_node) 261 f->at.s_node == rt->at.s_node)
262 return (f); 262 return f;
263 } 263 }
264 264
265 return (NULL); 265 return NULL;
266} 266}
267 267
268static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 268static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -279,7 +279,7 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
279 switch(cmd) 279 switch(cmd)
280 { 280 {
281 case SIOCADDIPDDPRT: 281 case SIOCADDIPDDPRT:
282 return (ipddp_create(&rcp)); 282 return ipddp_create(&rcp);
283 283
284 case SIOCFINDIPDDPRT: 284 case SIOCFINDIPDDPRT:
285 spin_lock_bh(&ipddp_route_lock); 285 spin_lock_bh(&ipddp_route_lock);
@@ -297,7 +297,7 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
297 return -ENOENT; 297 return -ENOENT;
298 298
299 case SIOCDELIPDDPRT: 299 case SIOCDELIPDDPRT:
300 return (ipddp_delete(&rcp)); 300 return ipddp_delete(&rcp);
301 301
302 default: 302 default:
303 return -EINVAL; 303 return -EINVAL;
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index adc07551739e..e69eead12ec7 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -727,7 +727,7 @@ static int sendup_buffer (struct net_device *dev)
727 727
728 if (ltc->command != LT_RCVLAP) { 728 if (ltc->command != LT_RCVLAP) {
729 printk("unknown command 0x%02x from ltpc card\n",ltc->command); 729 printk("unknown command 0x%02x from ltpc card\n",ltc->command);
730 return(-1); 730 return -1;
731 } 731 }
732 dnode = ltc->dnode; 732 dnode = ltc->dnode;
733 snode = ltc->snode; 733 snode = ltc->snode;
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 8c496fb1ac9e..62f21106efec 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -300,8 +300,6 @@ am79c961_open(struct net_device *dev)
300 struct dev_priv *priv = netdev_priv(dev); 300 struct dev_priv *priv = netdev_priv(dev);
301 int ret; 301 int ret;
302 302
303 memset (&priv->stats, 0, sizeof (priv->stats));
304
305 ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev); 303 ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
306 if (ret) 304 if (ret)
307 return ret; 305 return ret;
@@ -347,8 +345,7 @@ am79c961_close(struct net_device *dev)
347 */ 345 */
348static struct net_device_stats *am79c961_getstats (struct net_device *dev) 346static struct net_device_stats *am79c961_getstats (struct net_device *dev)
349{ 347{
350 struct dev_priv *priv = netdev_priv(dev); 348 return &dev->stats;
351 return &priv->stats;
352} 349}
353 350
354static void am79c961_mc_hash(char *addr, unsigned short *hash) 351static void am79c961_mc_hash(char *addr, unsigned short *hash)
@@ -510,14 +507,14 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
510 507
511 if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) { 508 if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
512 am_writeword (dev, hdraddr + 2, RMD_OWN); 509 am_writeword (dev, hdraddr + 2, RMD_OWN);
513 priv->stats.rx_errors ++; 510 dev->stats.rx_errors++;
514 if (status & RMD_ERR) { 511 if (status & RMD_ERR) {
515 if (status & RMD_FRAM) 512 if (status & RMD_FRAM)
516 priv->stats.rx_frame_errors ++; 513 dev->stats.rx_frame_errors++;
517 if (status & RMD_CRC) 514 if (status & RMD_CRC)
518 priv->stats.rx_crc_errors ++; 515 dev->stats.rx_crc_errors++;
519 } else if (status & RMD_STP) 516 } else if (status & RMD_STP)
520 priv->stats.rx_length_errors ++; 517 dev->stats.rx_length_errors++;
521 continue; 518 continue;
522 } 519 }
523 520
@@ -531,12 +528,12 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
531 am_writeword(dev, hdraddr + 2, RMD_OWN); 528 am_writeword(dev, hdraddr + 2, RMD_OWN);
532 skb->protocol = eth_type_trans(skb, dev); 529 skb->protocol = eth_type_trans(skb, dev);
533 netif_rx(skb); 530 netif_rx(skb);
534 priv->stats.rx_bytes += len; 531 dev->stats.rx_bytes += len;
535 priv->stats.rx_packets ++; 532 dev->stats.rx_packets++;
536 } else { 533 } else {
537 am_writeword (dev, hdraddr + 2, RMD_OWN); 534 am_writeword (dev, hdraddr + 2, RMD_OWN);
538 printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name); 535 printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
539 priv->stats.rx_dropped ++; 536 dev->stats.rx_dropped++;
540 break; 537 break;
541 } 538 }
542 } while (1); 539 } while (1);
@@ -565,7 +562,7 @@ am79c961_tx(struct net_device *dev, struct dev_priv *priv)
565 if (status & TMD_ERR) { 562 if (status & TMD_ERR) {
566 u_int status2; 563 u_int status2;
567 564
568 priv->stats.tx_errors ++; 565 dev->stats.tx_errors++;
569 566
570 status2 = am_readword (dev, hdraddr + 6); 567 status2 = am_readword (dev, hdraddr + 6);
571 568
@@ -575,18 +572,18 @@ am79c961_tx(struct net_device *dev, struct dev_priv *priv)
575 am_writeword (dev, hdraddr + 6, 0); 572 am_writeword (dev, hdraddr + 6, 0);
576 573
577 if (status2 & TST_RTRY) 574 if (status2 & TST_RTRY)
578 priv->stats.collisions += 16; 575 dev->stats.collisions += 16;
579 if (status2 & TST_LCOL) 576 if (status2 & TST_LCOL)
580 priv->stats.tx_window_errors ++; 577 dev->stats.tx_window_errors++;
581 if (status2 & TST_LCAR) 578 if (status2 & TST_LCAR)
582 priv->stats.tx_carrier_errors ++; 579 dev->stats.tx_carrier_errors++;
583 if (status2 & TST_UFLO) 580 if (status2 & TST_UFLO)
584 priv->stats.tx_fifo_errors ++; 581 dev->stats.tx_fifo_errors++;
585 continue; 582 continue;
586 } 583 }
587 priv->stats.tx_packets ++; 584 dev->stats.tx_packets++;
588 len = am_readword (dev, hdraddr + 4); 585 len = am_readword (dev, hdraddr + 4);
589 priv->stats.tx_bytes += -len; 586 dev->stats.tx_bytes += -len;
590 } while (priv->txtail != priv->txhead); 587 } while (priv->txtail != priv->txhead);
591 588
592 netif_wake_queue(dev); 589 netif_wake_queue(dev);
@@ -616,7 +613,7 @@ am79c961_interrupt(int irq, void *dev_id)
616 } 613 }
617 if (status & CSR0_MISS) { 614 if (status & CSR0_MISS) {
618 handled = 1; 615 handled = 1;
619 priv->stats.rx_dropped ++; 616 dev->stats.rx_dropped++;
620 } 617 }
621 if (status & CSR0_CERR) { 618 if (status & CSR0_CERR) {
622 handled = 1; 619 handled = 1;
diff --git a/drivers/net/arm/am79c961a.h b/drivers/net/arm/am79c961a.h
index 483009fe6ec2..fd634d32756b 100644
--- a/drivers/net/arm/am79c961a.h
+++ b/drivers/net/arm/am79c961a.h
@@ -130,7 +130,6 @@
130#define ISALED0_LNKST 0x8000 130#define ISALED0_LNKST 0x8000
131 131
132struct dev_priv { 132struct dev_priv {
133 struct net_device_stats stats;
134 unsigned long rxbuffer[RX_BUFFERS]; 133 unsigned long rxbuffer[RX_BUFFERS];
135 unsigned long txbuffer[TX_BUFFERS]; 134 unsigned long txbuffer[TX_BUFFERS];
136 unsigned char txhead; 135 unsigned char txhead;
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 4a5ec9470aa1..5a77001b6d10 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -175,8 +175,6 @@ struct ep93xx_priv
175 struct net_device *dev; 175 struct net_device *dev;
176 struct napi_struct napi; 176 struct napi_struct napi;
177 177
178 struct net_device_stats stats;
179
180 struct mii_if_info mii; 178 struct mii_if_info mii;
181 u8 mdc_divisor; 179 u8 mdc_divisor;
182}; 180};
@@ -230,12 +228,6 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d
230 pr_info("mdio write timed out\n"); 228 pr_info("mdio write timed out\n");
231} 229}
232 230
233static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
234{
235 struct ep93xx_priv *ep = netdev_priv(dev);
236 return &(ep->stats);
237}
238
239static int ep93xx_rx(struct net_device *dev, int processed, int budget) 231static int ep93xx_rx(struct net_device *dev, int processed, int budget)
240{ 232{
241 struct ep93xx_priv *ep = netdev_priv(dev); 233 struct ep93xx_priv *ep = netdev_priv(dev);
@@ -267,15 +259,15 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
267 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1); 259 pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
268 260
269 if (!(rstat0 & RSTAT0_RWE)) { 261 if (!(rstat0 & RSTAT0_RWE)) {
270 ep->stats.rx_errors++; 262 dev->stats.rx_errors++;
271 if (rstat0 & RSTAT0_OE) 263 if (rstat0 & RSTAT0_OE)
272 ep->stats.rx_fifo_errors++; 264 dev->stats.rx_fifo_errors++;
273 if (rstat0 & RSTAT0_FE) 265 if (rstat0 & RSTAT0_FE)
274 ep->stats.rx_frame_errors++; 266 dev->stats.rx_frame_errors++;
275 if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA)) 267 if (rstat0 & (RSTAT0_RUNT | RSTAT0_EDATA))
276 ep->stats.rx_length_errors++; 268 dev->stats.rx_length_errors++;
277 if (rstat0 & RSTAT0_CRCE) 269 if (rstat0 & RSTAT0_CRCE)
278 ep->stats.rx_crc_errors++; 270 dev->stats.rx_crc_errors++;
279 goto err; 271 goto err;
280 } 272 }
281 273
@@ -300,10 +292,10 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
300 292
301 netif_receive_skb(skb); 293 netif_receive_skb(skb);
302 294
303 ep->stats.rx_packets++; 295 dev->stats.rx_packets++;
304 ep->stats.rx_bytes += length; 296 dev->stats.rx_bytes += length;
305 } else { 297 } else {
306 ep->stats.rx_dropped++; 298 dev->stats.rx_dropped++;
307 } 299 }
308 300
309err: 301err:
@@ -359,7 +351,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
359 int entry; 351 int entry;
360 352
361 if (unlikely(skb->len > MAX_PKT_SIZE)) { 353 if (unlikely(skb->len > MAX_PKT_SIZE)) {
362 ep->stats.tx_dropped++; 354 dev->stats.tx_dropped++;
363 dev_kfree_skb(skb); 355 dev_kfree_skb(skb);
364 return NETDEV_TX_OK; 356 return NETDEV_TX_OK;
365 } 357 }
@@ -415,17 +407,17 @@ static void ep93xx_tx_complete(struct net_device *dev)
415 if (tstat0 & TSTAT0_TXWE) { 407 if (tstat0 & TSTAT0_TXWE) {
416 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff; 408 int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
417 409
418 ep->stats.tx_packets++; 410 dev->stats.tx_packets++;
419 ep->stats.tx_bytes += length; 411 dev->stats.tx_bytes += length;
420 } else { 412 } else {
421 ep->stats.tx_errors++; 413 dev->stats.tx_errors++;
422 } 414 }
423 415
424 if (tstat0 & TSTAT0_OW) 416 if (tstat0 & TSTAT0_OW)
425 ep->stats.tx_window_errors++; 417 dev->stats.tx_window_errors++;
426 if (tstat0 & TSTAT0_TXU) 418 if (tstat0 & TSTAT0_TXU)
427 ep->stats.tx_fifo_errors++; 419 dev->stats.tx_fifo_errors++;
428 ep->stats.collisions += (tstat0 >> 16) & 0x1f; 420 dev->stats.collisions += (tstat0 >> 16) & 0x1f;
429 421
430 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1); 422 ep->tx_clean_pointer = (entry + 1) & (TX_QUEUE_ENTRIES - 1);
431 if (ep->tx_pending == TX_QUEUE_ENTRIES) 423 if (ep->tx_pending == TX_QUEUE_ENTRIES)
@@ -758,7 +750,6 @@ static const struct net_device_ops ep93xx_netdev_ops = {
758 .ndo_open = ep93xx_open, 750 .ndo_open = ep93xx_open,
759 .ndo_stop = ep93xx_close, 751 .ndo_stop = ep93xx_close,
760 .ndo_start_xmit = ep93xx_xmit, 752 .ndo_start_xmit = ep93xx_xmit,
761 .ndo_get_stats = ep93xx_get_stats,
762 .ndo_do_ioctl = ep93xx_ioctl, 753 .ndo_do_ioctl = ep93xx_ioctl,
763 .ndo_validate_addr = eth_validate_addr, 754 .ndo_validate_addr = eth_validate_addr,
764 .ndo_change_mtu = eth_change_mtu, 755 .ndo_change_mtu = eth_change_mtu,
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index b17ab5153f51..b00781c02d5d 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -68,7 +68,6 @@ static int ether1_open(struct net_device *dev);
68static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); 68static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
69static irqreturn_t ether1_interrupt(int irq, void *dev_id); 69static irqreturn_t ether1_interrupt(int irq, void *dev_id);
70static int ether1_close(struct net_device *dev); 70static int ether1_close(struct net_device *dev);
71static struct net_device_stats *ether1_getstats(struct net_device *dev);
72static void ether1_setmulticastlist(struct net_device *dev); 71static void ether1_setmulticastlist(struct net_device *dev);
73static void ether1_timeout(struct net_device *dev); 72static void ether1_timeout(struct net_device *dev);
74 73
@@ -649,8 +648,6 @@ ether1_open (struct net_device *dev)
649 if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev)) 648 if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
650 return -EAGAIN; 649 return -EAGAIN;
651 650
652 memset (&priv(dev)->stats, 0, sizeof (struct net_device_stats));
653
654 if (ether1_init_for_open (dev)) { 651 if (ether1_init_for_open (dev)) {
655 free_irq (dev->irq, dev); 652 free_irq (dev->irq, dev);
656 return -EAGAIN; 653 return -EAGAIN;
@@ -673,7 +670,7 @@ ether1_timeout(struct net_device *dev)
673 if (ether1_init_for_open (dev)) 670 if (ether1_init_for_open (dev))
674 printk (KERN_ERR "%s: unable to restart interface\n", dev->name); 671 printk (KERN_ERR "%s: unable to restart interface\n", dev->name);
675 672
676 priv(dev)->stats.tx_errors++; 673 dev->stats.tx_errors++;
677 netif_wake_queue(dev); 674 netif_wake_queue(dev);
678} 675}
679 676
@@ -802,21 +799,21 @@ again:
802 799
803 while (nop.nop_status & STAT_COMPLETE) { 800 while (nop.nop_status & STAT_COMPLETE) {
804 if (nop.nop_status & STAT_OK) { 801 if (nop.nop_status & STAT_OK) {
805 priv(dev)->stats.tx_packets ++; 802 dev->stats.tx_packets++;
806 priv(dev)->stats.collisions += (nop.nop_status & STAT_COLLISIONS); 803 dev->stats.collisions += (nop.nop_status & STAT_COLLISIONS);
807 } else { 804 } else {
808 priv(dev)->stats.tx_errors ++; 805 dev->stats.tx_errors++;
809 806
810 if (nop.nop_status & STAT_COLLAFTERTX) 807 if (nop.nop_status & STAT_COLLAFTERTX)
811 priv(dev)->stats.collisions ++; 808 dev->stats.collisions++;
812 if (nop.nop_status & STAT_NOCARRIER) 809 if (nop.nop_status & STAT_NOCARRIER)
813 priv(dev)->stats.tx_carrier_errors ++; 810 dev->stats.tx_carrier_errors++;
814 if (nop.nop_status & STAT_TXLOSTCTS) 811 if (nop.nop_status & STAT_TXLOSTCTS)
815 printk (KERN_WARNING "%s: cts lost\n", dev->name); 812 printk (KERN_WARNING "%s: cts lost\n", dev->name);
816 if (nop.nop_status & STAT_TXSLOWDMA) 813 if (nop.nop_status & STAT_TXSLOWDMA)
817 priv(dev)->stats.tx_fifo_errors ++; 814 dev->stats.tx_fifo_errors++;
818 if (nop.nop_status & STAT_COLLEXCESSIVE) 815 if (nop.nop_status & STAT_COLLEXCESSIVE)
819 priv(dev)->stats.collisions += 16; 816 dev->stats.collisions += 16;
820 } 817 }
821 818
822 if (nop.nop_link == caddr) { 819 if (nop.nop_link == caddr) {
@@ -879,13 +876,13 @@ ether1_recv_done (struct net_device *dev)
879 876
880 skb->protocol = eth_type_trans (skb, dev); 877 skb->protocol = eth_type_trans (skb, dev);
881 netif_rx (skb); 878 netif_rx (skb);
882 priv(dev)->stats.rx_packets ++; 879 dev->stats.rx_packets++;
883 } else 880 } else
884 priv(dev)->stats.rx_dropped ++; 881 dev->stats.rx_dropped++;
885 } else { 882 } else {
886 printk(KERN_WARNING "%s: %s\n", dev->name, 883 printk(KERN_WARNING "%s: %s\n", dev->name,
887 (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid"); 884 (rbd.rbd_status & RBD_EOF) ? "oversized packet" : "acnt not valid");
888 priv(dev)->stats.rx_dropped ++; 885 dev->stats.rx_dropped++;
889 } 886 }
890 887
891 nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS); 888 nexttail = ether1_readw(dev, priv(dev)->rx_tail, rfd_t, rfd_link, NORMALIRQS);
@@ -939,7 +936,7 @@ ether1_interrupt (int irq, void *dev_id)
939 printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name); 936 printk (KERN_WARNING "%s: RU went not ready: RU suspended\n", dev->name);
940 ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS); 937 ether1_writew(dev, SCB_CMDRXRESUME, SCB_ADDR, scb_t, scb_command, NORMALIRQS);
941 writeb(CTRL_CA, REG_CONTROL); 938 writeb(CTRL_CA, REG_CONTROL);
942 priv(dev)->stats.rx_dropped ++; /* we suspended due to lack of buffer space */ 939 dev->stats.rx_dropped++; /* we suspended due to lack of buffer space */
943 } else 940 } else
944 printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name, 941 printk(KERN_WARNING "%s: RU went not ready: %04X\n", dev->name,
945 ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS)); 942 ether1_readw(dev, SCB_ADDR, scb_t, scb_status, NORMALIRQS));
@@ -962,12 +959,6 @@ ether1_close (struct net_device *dev)
962 return 0; 959 return 0;
963} 960}
964 961
965static struct net_device_stats *
966ether1_getstats (struct net_device *dev)
967{
968 return &priv(dev)->stats;
969}
970
971/* 962/*
972 * Set or clear the multicast filter for this adaptor. 963 * Set or clear the multicast filter for this adaptor.
973 * num_addrs == -1 Promiscuous mode, receive all packets. 964 * num_addrs == -1 Promiscuous mode, receive all packets.
@@ -994,7 +985,6 @@ static const struct net_device_ops ether1_netdev_ops = {
994 .ndo_open = ether1_open, 985 .ndo_open = ether1_open,
995 .ndo_stop = ether1_close, 986 .ndo_stop = ether1_close,
996 .ndo_start_xmit = ether1_sendpacket, 987 .ndo_start_xmit = ether1_sendpacket,
997 .ndo_get_stats = ether1_getstats,
998 .ndo_set_multicast_list = ether1_setmulticastlist, 988 .ndo_set_multicast_list = ether1_setmulticastlist,
999 .ndo_tx_timeout = ether1_timeout, 989 .ndo_tx_timeout = ether1_timeout,
1000 .ndo_validate_addr = eth_validate_addr, 990 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/ether1.h b/drivers/net/arm/ether1.h
index c8a4b2389d85..3a5830ab3dc7 100644
--- a/drivers/net/arm/ether1.h
+++ b/drivers/net/arm/ether1.h
@@ -38,7 +38,6 @@
38 38
39struct ether1_priv { 39struct ether1_priv {
40 void __iomem *base; 40 void __iomem *base;
41 struct net_device_stats stats;
42 unsigned int tx_link; 41 unsigned int tx_link;
43 unsigned int tx_head; 42 unsigned int tx_head;
44 volatile unsigned int tx_tail; 43 volatile unsigned int tx_tail;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 1361b7367c28..44a8746f4014 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -81,7 +81,6 @@ static int ether3_open (struct net_device *dev);
81static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev); 81static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
82static irqreturn_t ether3_interrupt (int irq, void *dev_id); 82static irqreturn_t ether3_interrupt (int irq, void *dev_id);
83static int ether3_close (struct net_device *dev); 83static int ether3_close (struct net_device *dev);
84static struct net_device_stats *ether3_getstats (struct net_device *dev);
85static void ether3_setmulticastlist (struct net_device *dev); 84static void ether3_setmulticastlist (struct net_device *dev);
86static void ether3_timeout(struct net_device *dev); 85static void ether3_timeout(struct net_device *dev);
87 86
@@ -323,8 +322,6 @@ ether3_init_for_open(struct net_device *dev)
323{ 322{
324 int i; 323 int i;
325 324
326 memset(&priv(dev)->stats, 0, sizeof(struct net_device_stats));
327
328 /* Reset the chip */ 325 /* Reset the chip */
329 ether3_outw(CFG2_RESET, REG_CONFIG2); 326 ether3_outw(CFG2_RESET, REG_CONFIG2);
330 udelay(4); 327 udelay(4);
@@ -442,15 +439,6 @@ ether3_close(struct net_device *dev)
442} 439}
443 440
444/* 441/*
445 * Get the current statistics. This may be called with the card open or
446 * closed.
447 */
448static struct net_device_stats *ether3_getstats(struct net_device *dev)
449{
450 return &priv(dev)->stats;
451}
452
453/*
454 * Set or clear promiscuous/multicast mode filter for this adaptor. 442 * Set or clear promiscuous/multicast mode filter for this adaptor.
455 * 443 *
456 * We don't attempt any packet filtering. The card may have a SEEQ 8004 444 * We don't attempt any packet filtering. The card may have a SEEQ 8004
@@ -490,7 +478,7 @@ static void ether3_timeout(struct net_device *dev)
490 local_irq_restore(flags); 478 local_irq_restore(flags);
491 479
492 priv(dev)->regs.config2 |= CFG2_CTRLO; 480 priv(dev)->regs.config2 |= CFG2_CTRLO;
493 priv(dev)->stats.tx_errors += 1; 481 dev->stats.tx_errors += 1;
494 ether3_outw(priv(dev)->regs.config2, REG_CONFIG2); 482 ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
495 priv(dev)->tx_head = priv(dev)->tx_tail = 0; 483 priv(dev)->tx_head = priv(dev)->tx_tail = 0;
496 484
@@ -509,7 +497,7 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
509 497
510 if (priv(dev)->broken) { 498 if (priv(dev)->broken) {
511 dev_kfree_skb(skb); 499 dev_kfree_skb(skb);
512 priv(dev)->stats.tx_dropped ++; 500 dev->stats.tx_dropped++;
513 netif_start_queue(dev); 501 netif_start_queue(dev);
514 return NETDEV_TX_OK; 502 return NETDEV_TX_OK;
515 } 503 }
@@ -673,7 +661,7 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
673 } else 661 } else
674 goto dropping; 662 goto dropping;
675 } else { 663 } else {
676 struct net_device_stats *stats = &priv(dev)->stats; 664 struct net_device_stats *stats = &dev->stats;
677 ether3_outw(next_ptr >> 8, REG_RECVEND); 665 ether3_outw(next_ptr >> 8, REG_RECVEND);
678 if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++; 666 if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++;
679 if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++; 667 if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++;
@@ -685,14 +673,14 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
685 while (-- maxcnt); 673 while (-- maxcnt);
686 674
687done: 675done:
688 priv(dev)->stats.rx_packets += received; 676 dev->stats.rx_packets += received;
689 priv(dev)->rx_head = next_ptr; 677 priv(dev)->rx_head = next_ptr;
690 /* 678 /*
691 * If rx went off line, then that means that the buffer may be full. We 679 * If rx went off line, then that means that the buffer may be full. We
692 * have dropped at least one packet. 680 * have dropped at least one packet.
693 */ 681 */
694 if (!(ether3_inw(REG_STATUS) & STAT_RXON)) { 682 if (!(ether3_inw(REG_STATUS) & STAT_RXON)) {
695 priv(dev)->stats.rx_dropped ++; 683 dev->stats.rx_dropped++;
696 ether3_outw(next_ptr, REG_RECVPTR); 684 ether3_outw(next_ptr, REG_RECVPTR);
697 ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND); 685 ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
698 } 686 }
@@ -710,7 +698,7 @@ dropping:{
710 last_warned = jiffies; 698 last_warned = jiffies;
711 printk("%s: memory squeeze, dropping packet.\n", dev->name); 699 printk("%s: memory squeeze, dropping packet.\n", dev->name);
712 } 700 }
713 priv(dev)->stats.rx_dropped ++; 701 dev->stats.rx_dropped++;
714 goto done; 702 goto done;
715 } 703 }
716} 704}
@@ -743,13 +731,13 @@ static void ether3_tx(struct net_device *dev)
743 * Update errors 731 * Update errors
744 */ 732 */
745 if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS))) 733 if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS)))
746 priv(dev)->stats.tx_packets++; 734 dev->stats.tx_packets++;
747 else { 735 else {
748 priv(dev)->stats.tx_errors ++; 736 dev->stats.tx_errors++;
749 if (status & TXSTAT_16COLLISIONS) 737 if (status & TXSTAT_16COLLISIONS)
750 priv(dev)->stats.collisions += 16; 738 dev->stats.collisions += 16;
751 if (status & TXSTAT_BABBLED) 739 if (status & TXSTAT_BABBLED)
752 priv(dev)->stats.tx_fifo_errors ++; 740 dev->stats.tx_fifo_errors++;
753 } 741 }
754 742
755 tx_tail = (tx_tail + 1) & 15; 743 tx_tail = (tx_tail + 1) & 15;
@@ -773,7 +761,6 @@ static const struct net_device_ops ether3_netdev_ops = {
773 .ndo_open = ether3_open, 761 .ndo_open = ether3_open,
774 .ndo_stop = ether3_close, 762 .ndo_stop = ether3_close,
775 .ndo_start_xmit = ether3_sendpacket, 763 .ndo_start_xmit = ether3_sendpacket,
776 .ndo_get_stats = ether3_getstats,
777 .ndo_set_multicast_list = ether3_setmulticastlist, 764 .ndo_set_multicast_list = ether3_setmulticastlist,
778 .ndo_tx_timeout = ether3_timeout, 765 .ndo_tx_timeout = ether3_timeout,
779 .ndo_validate_addr = eth_validate_addr, 766 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/ether3.h b/drivers/net/arm/ether3.h
index 1921a3a07da7..2db63b08bdf3 100644
--- a/drivers/net/arm/ether3.h
+++ b/drivers/net/arm/ether3.h
@@ -164,7 +164,6 @@ struct dev_priv {
164 unsigned char tx_head; /* buffer nr to insert next packet */ 164 unsigned char tx_head; /* buffer nr to insert next packet */
165 unsigned char tx_tail; /* buffer nr of transmitting packet */ 165 unsigned char tx_tail; /* buffer nr of transmitting packet */
166 unsigned int rx_head; /* address to fetch next packet from */ 166 unsigned int rx_head; /* address to fetch next packet from */
167 struct net_device_stats stats;
168 struct timer_list timer; 167 struct timer_list timer;
169 int broken; /* 0 = ok, 1 = something went wrong */ 168 int broken; /* 0 = ok, 1 = something went wrong */
170}; 169};
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index b57d7dee389a..3134e5326231 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -362,7 +362,7 @@ static void *slow_memcpy( void *dst, const void *src, size_t len )
362 *cto++ = *cfrom++; 362 *cto++ = *cfrom++;
363 MFPDELAY(); 363 MFPDELAY();
364 } 364 }
365 return( dst ); 365 return dst;
366} 366}
367 367
368 368
@@ -449,7 +449,7 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag,
449 vbr[2] = save_berr; 449 vbr[2] = save_berr;
450 local_irq_restore(flags); 450 local_irq_restore(flags);
451 451
452 return( ret ); 452 return ret;
453} 453}
454 454
455static const struct net_device_ops lance_netdev_ops = { 455static const struct net_device_ops lance_netdev_ops = {
@@ -526,7 +526,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
526 goto probe_ok; 526 goto probe_ok;
527 527
528 probe_fail: 528 probe_fail:
529 return( 0 ); 529 return 0;
530 530
531 probe_ok: 531 probe_ok:
532 lp = netdev_priv(dev); 532 lp = netdev_priv(dev);
@@ -556,7 +556,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
556 if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, 556 if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO,
557 "PAM/Riebl-ST Ethernet", dev)) { 557 "PAM/Riebl-ST Ethernet", dev)) {
558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); 558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
559 return( 0 ); 559 return 0;
560 } 560 }
561 dev->irq = (unsigned short)IRQ_AUTO_5; 561 dev->irq = (unsigned short)IRQ_AUTO_5;
562 } 562 }
@@ -568,12 +568,12 @@ static unsigned long __init lance_probe1( struct net_device *dev,
568 unsigned long irq = atari_register_vme_int(); 568 unsigned long irq = atari_register_vme_int();
569 if (!irq) { 569 if (!irq) {
570 printk( "Lance: request for VME interrupt failed\n" ); 570 printk( "Lance: request for VME interrupt failed\n" );
571 return( 0 ); 571 return 0;
572 } 572 }
573 if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO, 573 if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
574 "Riebl-VME Ethernet", dev)) { 574 "Riebl-VME Ethernet", dev)) {
575 printk( "Lance: request for irq %ld failed\n", irq ); 575 printk( "Lance: request for irq %ld failed\n", irq );
576 return( 0 ); 576 return 0;
577 } 577 }
578 dev->irq = irq; 578 dev->irq = irq;
579 } 579 }
@@ -637,7 +637,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
637 /* XXX MSch */ 637 /* XXX MSch */
638 dev->watchdog_timeo = TX_TIMEOUT; 638 dev->watchdog_timeo = TX_TIMEOUT;
639 639
640 return( 1 ); 640 return 1;
641} 641}
642 642
643 643
@@ -666,7 +666,7 @@ static int lance_open( struct net_device *dev )
666 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", 666 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
667 dev->name, i, DREG )); 667 dev->name, i, DREG ));
668 DREG = CSR0_STOP; 668 DREG = CSR0_STOP;
669 return( -EIO ); 669 return -EIO;
670 } 670 }
671 DREG = CSR0_IDON; 671 DREG = CSR0_IDON;
672 DREG = CSR0_STRT; 672 DREG = CSR0_STRT;
@@ -676,7 +676,7 @@ static int lance_open( struct net_device *dev )
676 676
677 DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG )); 677 DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
678 678
679 return( 0 ); 679 return 0;
680} 680}
681 681
682 682
@@ -1126,13 +1126,13 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
1126 int i; 1126 int i;
1127 1127
1128 if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL) 1128 if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL)
1129 return( -EOPNOTSUPP ); 1129 return -EOPNOTSUPP;
1130 1130
1131 if (netif_running(dev)) { 1131 if (netif_running(dev)) {
1132 /* Only possible while card isn't started */ 1132 /* Only possible while card isn't started */
1133 DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n", 1133 DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n",
1134 dev->name )); 1134 dev->name ));
1135 return( -EIO ); 1135 return -EIO;
1136 } 1136 }
1137 1137
1138 memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len ); 1138 memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
@@ -1142,7 +1142,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
1142 /* set also the magic for future sessions */ 1142 /* set also the magic for future sessions */
1143 *RIEBL_MAGIC_ADDR = RIEBL_MAGIC; 1143 *RIEBL_MAGIC_ADDR = RIEBL_MAGIC;
1144 1144
1145 return( 0 ); 1145 return 0;
1146} 1146}
1147 1147
1148 1148
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 52abbbdf8a08..ef4115b897bf 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -559,7 +559,6 @@ struct atl1c_adapter {
559 struct napi_struct napi; 559 struct napi_struct napi;
560 struct atl1c_hw hw; 560 struct atl1c_hw hw;
561 struct atl1c_hw_stats hw_stats; 561 struct atl1c_hw_stats hw_stats;
562 struct net_device_stats net_stats;
563 struct mii_if_info mii; /* MII interface info */ 562 struct mii_if_info mii; /* MII interface info */
564 u16 rx_buffer_len; 563 u16 rx_buffer_len;
565 564
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index d8501f060957..919080b2c3a5 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -480,7 +480,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
480 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D); 480 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D);
481 } 481 }
482 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2 482 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2
483 || hw->nic_type == athr_l2c || hw->nic_type == athr_l2c) { 483 || hw->nic_type == athr_l2c) {
484 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); 484 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
485 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD); 485 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
486 } 486 }
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index c7b8ef507ebd..553230eb365c 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -1562,7 +1562,7 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
1562{ 1562{
1563 struct atl1c_adapter *adapter = netdev_priv(netdev); 1563 struct atl1c_adapter *adapter = netdev_priv(netdev);
1564 struct atl1c_hw_stats *hw_stats = &adapter->hw_stats; 1564 struct atl1c_hw_stats *hw_stats = &adapter->hw_stats;
1565 struct net_device_stats *net_stats = &adapter->net_stats; 1565 struct net_device_stats *net_stats = &netdev->stats;
1566 1566
1567 atl1c_update_hw_stats(adapter); 1567 atl1c_update_hw_stats(adapter);
1568 net_stats->rx_packets = hw_stats->rx_ok; 1568 net_stats->rx_packets = hw_stats->rx_ok;
@@ -1590,7 +1590,7 @@ static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
1590 net_stats->tx_aborted_errors = hw_stats->tx_abort_col; 1590 net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1591 net_stats->tx_window_errors = hw_stats->tx_late_col; 1591 net_stats->tx_window_errors = hw_stats->tx_late_col;
1592 1592
1593 return &adapter->net_stats; 1593 return net_stats;
1594} 1594}
1595 1595
1596static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter) 1596static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
@@ -1700,7 +1700,7 @@ static irqreturn_t atl1c_intr(int irq, void *data)
1700 1700
1701 /* link event */ 1701 /* link event */
1702 if (status & (ISR_GPHY | ISR_MANUAL)) { 1702 if (status & (ISR_GPHY | ISR_MANUAL)) {
1703 adapter->net_stats.tx_carrier_errors++; 1703 netdev->stats.tx_carrier_errors++;
1704 atl1c_link_chg_event(adapter); 1704 atl1c_link_chg_event(adapter);
1705 break; 1705 break;
1706 } 1706 }
@@ -1719,7 +1719,7 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1719 * cannot figure out if the packet is fragmented or not, 1719 * cannot figure out if the packet is fragmented or not,
1720 * so we tell the KERNEL CHECKSUM_NONE 1720 * so we tell the KERNEL CHECKSUM_NONE
1721 */ 1721 */
1722 skb->ip_summed = CHECKSUM_NONE; 1722 skb_checksum_none_assert(skb);
1723} 1723}
1724 1724
1725static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid) 1725static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 1acea5774e89..56ace3fbe40d 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1331,7 +1331,7 @@ static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter,
1331 u16 pkt_flags; 1331 u16 pkt_flags;
1332 u16 err_flags; 1332 u16 err_flags;
1333 1333
1334 skb->ip_summed = CHECKSUM_NONE; 1334 skb_checksum_none_assert(skb);
1335 pkt_flags = prrs->pkt_flag; 1335 pkt_flags = prrs->pkt_flag;
1336 err_flags = prrs->err_flag; 1336 err_flags = prrs->err_flag;
1337 if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) && 1337 if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) &&
@@ -2316,7 +2316,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2316 netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64); 2316 netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
2317 2317
2318 init_timer(&adapter->phy_config_timer); 2318 init_timer(&adapter->phy_config_timer);
2319 adapter->phy_config_timer.function = &atl1e_phy_config; 2319 adapter->phy_config_timer.function = atl1e_phy_config;
2320 adapter->phy_config_timer.data = (unsigned long) adapter; 2320 adapter->phy_config_timer.data = (unsigned long) adapter;
2321 2321
2322 /* get user settings */ 2322 /* get user settings */
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index c73be2848319..b8c053f76878 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1811,7 +1811,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1811 * the higher layers and let it be sorted out there. 1811 * the higher layers and let it be sorted out there.
1812 */ 1812 */
1813 1813
1814 skb->ip_summed = CHECKSUM_NONE; 1814 skb_checksum_none_assert(skb);
1815 1815
1816 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { 1816 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
1817 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | 1817 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
@@ -2100,9 +2100,9 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
2100{ 2100{
2101 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 2101 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
2102 u16 next_to_use = atomic_read(&tpd_ring->next_to_use); 2102 u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
2103 return ((next_to_clean > next_to_use) ? 2103 return (next_to_clean > next_to_use) ?
2104 next_to_clean - next_to_use - 1 : 2104 next_to_clean - next_to_use - 1 :
2105 tpd_ring->count + next_to_clean - next_to_use - 1); 2105 tpd_ring->count + next_to_clean - next_to_use - 1;
2106} 2106}
2107 2107
2108static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, 2108static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
@@ -3043,7 +3043,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
3043 netif_carrier_off(netdev); 3043 netif_carrier_off(netdev);
3044 netif_stop_queue(netdev); 3044 netif_stop_queue(netdev);
3045 3045
3046 setup_timer(&adapter->phy_config_timer, &atl1_phy_config, 3046 setup_timer(&adapter->phy_config_timer, atl1_phy_config,
3047 (unsigned long)adapter); 3047 (unsigned long)adapter);
3048 adapter->phy_timer_pending = false; 3048 adapter->phy_timer_pending = false;
3049 3049
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 8da87383fb39..29c0265ccc5d 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -51,10 +51,10 @@
51 51
52#define ATL2_DRV_VERSION "2.2.3" 52#define ATL2_DRV_VERSION "2.2.3"
53 53
54static char atl2_driver_name[] = "atl2"; 54static const char atl2_driver_name[] = "atl2";
55static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver"; 55static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
56static char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation."; 56static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
57static char atl2_driver_version[] = ATL2_DRV_VERSION; 57static const char atl2_driver_version[] = ATL2_DRV_VERSION;
58 58
59MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>"); 59MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
60MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver"); 60MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
@@ -1444,11 +1444,11 @@ static int __devinit atl2_probe(struct pci_dev *pdev,
1444 atl2_check_options(adapter); 1444 atl2_check_options(adapter);
1445 1445
1446 init_timer(&adapter->watchdog_timer); 1446 init_timer(&adapter->watchdog_timer);
1447 adapter->watchdog_timer.function = &atl2_watchdog; 1447 adapter->watchdog_timer.function = atl2_watchdog;
1448 adapter->watchdog_timer.data = (unsigned long) adapter; 1448 adapter->watchdog_timer.data = (unsigned long) adapter;
1449 1449
1450 init_timer(&adapter->phy_config_timer); 1450 init_timer(&adapter->phy_config_timer);
1451 adapter->phy_config_timer.function = &atl2_phy_config; 1451 adapter->phy_config_timer.function = atl2_phy_config;
1452 adapter->phy_config_timer.data = (unsigned long) adapter; 1452 adapter->phy_config_timer.data = (unsigned long) adapter;
1453 1453
1454 INIT_WORK(&adapter->reset_task, atl2_reset_task); 1454 INIT_WORK(&adapter->reset_task, atl2_reset_task);
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index bd2f9d331dac..dfd96b20547f 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -445,7 +445,7 @@ static int net_open(struct net_device *dev)
445 init_timer(&lp->timer); 445 init_timer(&lp->timer);
446 lp->timer.expires = jiffies + TIMED_CHECKER; 446 lp->timer.expires = jiffies + TIMED_CHECKER;
447 lp->timer.data = (unsigned long)dev; 447 lp->timer.data = (unsigned long)dev;
448 lp->timer.function = &atp_timed_checker; /* timer handler */ 448 lp->timer.function = atp_timed_checker; /* timer handler */
449 add_timer(&lp->timer); 449 add_timer(&lp->timer);
450 450
451 netif_start_queue(dev); 451 netif_start_queue(dev);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 15ae6df2ff00..43489f89c142 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -13,7 +13,7 @@
13 * converted to use linux-2.6.x's PHY framework 13 * converted to use linux-2.6.x's PHY framework
14 * 14 *
15 * Author: MontaVista Software, Inc. 15 * Author: MontaVista Software, Inc.
16 * ppopov@mvista.com or source@mvista.com 16 * ppopov@mvista.com or source@mvista.com
17 * 17 *
18 * ######################################################################## 18 * ########################################################################
19 * 19 *
@@ -34,6 +34,8 @@
34 * 34 *
35 * 35 *
36 */ 36 */
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
37#include <linux/capability.h> 39#include <linux/capability.h>
38#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
39#include <linux/module.h> 41#include <linux/module.h>
@@ -56,11 +58,11 @@
56#include <linux/crc32.h> 58#include <linux/crc32.h>
57#include <linux/phy.h> 59#include <linux/phy.h>
58#include <linux/platform_device.h> 60#include <linux/platform_device.h>
61#include <linux/cpu.h>
62#include <linux/io.h>
59 63
60#include <asm/cpu.h>
61#include <asm/mipsregs.h> 64#include <asm/mipsregs.h>
62#include <asm/irq.h> 65#include <asm/irq.h>
63#include <asm/io.h>
64#include <asm/processor.h> 66#include <asm/processor.h>
65 67
66#include <au1000.h> 68#include <au1000.h>
@@ -152,11 +154,11 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
152 154
153 spin_lock_irqsave(&aup->lock, flags); 155 spin_lock_irqsave(&aup->lock, flags);
154 156
155 if(force_reset || (!aup->mac_enabled)) { 157 if (force_reset || (!aup->mac_enabled)) {
156 *aup->enable = MAC_EN_CLOCK_ENABLE; 158 writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
157 au_sync_delay(2); 159 au_sync_delay(2);
158 *aup->enable = (MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 160 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
159 | MAC_EN_CLOCK_ENABLE); 161 | MAC_EN_CLOCK_ENABLE), &aup->enable);
160 au_sync_delay(2); 162 au_sync_delay(2);
161 163
162 aup->mac_enabled = 1; 164 aup->mac_enabled = 1;
@@ -171,12 +173,12 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
171static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg) 173static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
172{ 174{
173 struct au1000_private *aup = netdev_priv(dev); 175 struct au1000_private *aup = netdev_priv(dev);
174 volatile u32 *const mii_control_reg = &aup->mac->mii_control; 176 u32 *const mii_control_reg = &aup->mac->mii_control;
175 volatile u32 *const mii_data_reg = &aup->mac->mii_data; 177 u32 *const mii_data_reg = &aup->mac->mii_data;
176 u32 timedout = 20; 178 u32 timedout = 20;
177 u32 mii_control; 179 u32 mii_control;
178 180
179 while (*mii_control_reg & MAC_MII_BUSY) { 181 while (readl(mii_control_reg) & MAC_MII_BUSY) {
180 mdelay(1); 182 mdelay(1);
181 if (--timedout == 0) { 183 if (--timedout == 0) {
182 netdev_err(dev, "read_MII busy timeout!!\n"); 184 netdev_err(dev, "read_MII busy timeout!!\n");
@@ -187,29 +189,29 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
187 mii_control = MAC_SET_MII_SELECT_REG(reg) | 189 mii_control = MAC_SET_MII_SELECT_REG(reg) |
188 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ; 190 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
189 191
190 *mii_control_reg = mii_control; 192 writel(mii_control, mii_control_reg);
191 193
192 timedout = 20; 194 timedout = 20;
193 while (*mii_control_reg & MAC_MII_BUSY) { 195 while (readl(mii_control_reg) & MAC_MII_BUSY) {
194 mdelay(1); 196 mdelay(1);
195 if (--timedout == 0) { 197 if (--timedout == 0) {
196 netdev_err(dev, "mdio_read busy timeout!!\n"); 198 netdev_err(dev, "mdio_read busy timeout!!\n");
197 return -1; 199 return -1;
198 } 200 }
199 } 201 }
200 return (int)*mii_data_reg; 202 return readl(mii_data_reg);
201} 203}
202 204
203static void au1000_mdio_write(struct net_device *dev, int phy_addr, 205static void au1000_mdio_write(struct net_device *dev, int phy_addr,
204 int reg, u16 value) 206 int reg, u16 value)
205{ 207{
206 struct au1000_private *aup = netdev_priv(dev); 208 struct au1000_private *aup = netdev_priv(dev);
207 volatile u32 *const mii_control_reg = &aup->mac->mii_control; 209 u32 *const mii_control_reg = &aup->mac->mii_control;
208 volatile u32 *const mii_data_reg = &aup->mac->mii_data; 210 u32 *const mii_data_reg = &aup->mac->mii_data;
209 u32 timedout = 20; 211 u32 timedout = 20;
210 u32 mii_control; 212 u32 mii_control;
211 213
212 while (*mii_control_reg & MAC_MII_BUSY) { 214 while (readl(mii_control_reg) & MAC_MII_BUSY) {
213 mdelay(1); 215 mdelay(1);
214 if (--timedout == 0) { 216 if (--timedout == 0) {
215 netdev_err(dev, "mdio_write busy timeout!!\n"); 217 netdev_err(dev, "mdio_write busy timeout!!\n");
@@ -220,18 +222,22 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr,
220 mii_control = MAC_SET_MII_SELECT_REG(reg) | 222 mii_control = MAC_SET_MII_SELECT_REG(reg) |
221 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE; 223 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
222 224
223 *mii_data_reg = value; 225 writel(value, mii_data_reg);
224 *mii_control_reg = mii_control; 226 writel(mii_control, mii_control_reg);
225} 227}
226 228
227static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 229static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
228{ 230{
229 /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does 231 /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
230 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */ 232 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus)
233 */
231 struct net_device *const dev = bus->priv; 234 struct net_device *const dev = bus->priv;
232 235
233 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this 236 /* make sure the MAC associated with this
234 * mii_bus is enabled */ 237 * mii_bus is enabled
238 */
239 au1000_enable_mac(dev, 0);
240
235 return au1000_mdio_read(dev, phy_addr, regnum); 241 return au1000_mdio_read(dev, phy_addr, regnum);
236} 242}
237 243
@@ -240,8 +246,11 @@ static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
240{ 246{
241 struct net_device *const dev = bus->priv; 247 struct net_device *const dev = bus->priv;
242 248
243 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this 249 /* make sure the MAC associated with this
244 * mii_bus is enabled */ 250 * mii_bus is enabled
251 */
252 au1000_enable_mac(dev, 0);
253
245 au1000_mdio_write(dev, phy_addr, regnum, value); 254 au1000_mdio_write(dev, phy_addr, regnum, value);
246 return 0; 255 return 0;
247} 256}
@@ -250,28 +259,37 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
250{ 259{
251 struct net_device *const dev = bus->priv; 260 struct net_device *const dev = bus->priv;
252 261
253 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this 262 /* make sure the MAC associated with this
254 * mii_bus is enabled */ 263 * mii_bus is enabled
264 */
265 au1000_enable_mac(dev, 0);
266
255 return 0; 267 return 0;
256} 268}
257 269
258static void au1000_hard_stop(struct net_device *dev) 270static void au1000_hard_stop(struct net_device *dev)
259{ 271{
260 struct au1000_private *aup = netdev_priv(dev); 272 struct au1000_private *aup = netdev_priv(dev);
273 u32 reg;
261 274
262 netif_dbg(aup, drv, dev, "hard stop\n"); 275 netif_dbg(aup, drv, dev, "hard stop\n");
263 276
264 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); 277 reg = readl(&aup->mac->control);
278 reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
279 writel(reg, &aup->mac->control);
265 au_sync_delay(10); 280 au_sync_delay(10);
266} 281}
267 282
268static void au1000_enable_rx_tx(struct net_device *dev) 283static void au1000_enable_rx_tx(struct net_device *dev)
269{ 284{
270 struct au1000_private *aup = netdev_priv(dev); 285 struct au1000_private *aup = netdev_priv(dev);
286 u32 reg;
271 287
272 netif_dbg(aup, hw, dev, "enable_rx_tx\n"); 288 netif_dbg(aup, hw, dev, "enable_rx_tx\n");
273 289
274 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE); 290 reg = readl(&aup->mac->control);
291 reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
292 writel(reg, &aup->mac->control);
275 au_sync_delay(10); 293 au_sync_delay(10);
276} 294}
277 295
@@ -281,6 +299,7 @@ au1000_adjust_link(struct net_device *dev)
281 struct au1000_private *aup = netdev_priv(dev); 299 struct au1000_private *aup = netdev_priv(dev);
282 struct phy_device *phydev = aup->phy_dev; 300 struct phy_device *phydev = aup->phy_dev;
283 unsigned long flags; 301 unsigned long flags;
302 u32 reg;
284 303
285 int status_change = 0; 304 int status_change = 0;
286 305
@@ -312,14 +331,15 @@ au1000_adjust_link(struct net_device *dev)
312 /* switching duplex mode requires to disable rx and tx! */ 331 /* switching duplex mode requires to disable rx and tx! */
313 au1000_hard_stop(dev); 332 au1000_hard_stop(dev);
314 333
315 if (DUPLEX_FULL == phydev->duplex) 334 reg = readl(&aup->mac->control);
316 aup->mac->control = ((aup->mac->control 335 if (DUPLEX_FULL == phydev->duplex) {
317 | MAC_FULL_DUPLEX) 336 reg |= MAC_FULL_DUPLEX;
318 & ~MAC_DISABLE_RX_OWN); 337 reg &= ~MAC_DISABLE_RX_OWN;
319 else 338 } else {
320 aup->mac->control = ((aup->mac->control 339 reg &= ~MAC_FULL_DUPLEX;
321 & ~MAC_FULL_DUPLEX) 340 reg |= MAC_DISABLE_RX_OWN;
322 | MAC_DISABLE_RX_OWN); 341 }
342 writel(reg, &aup->mac->control);
323 au_sync_delay(1); 343 au_sync_delay(1);
324 344
325 au1000_enable_rx_tx(dev); 345 au1000_enable_rx_tx(dev);
@@ -353,10 +373,11 @@ au1000_adjust_link(struct net_device *dev)
353 } 373 }
354} 374}
355 375
356static int au1000_mii_probe (struct net_device *dev) 376static int au1000_mii_probe(struct net_device *dev)
357{ 377{
358 struct au1000_private *const aup = netdev_priv(dev); 378 struct au1000_private *const aup = netdev_priv(dev);
359 struct phy_device *phydev = NULL; 379 struct phy_device *phydev = NULL;
380 int phy_addr;
360 381
361 if (aup->phy_static_config) { 382 if (aup->phy_static_config) {
362 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1); 383 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
@@ -366,42 +387,46 @@ static int au1000_mii_probe (struct net_device *dev)
366 else 387 else
367 netdev_info(dev, "using PHY-less setup\n"); 388 netdev_info(dev, "using PHY-less setup\n");
368 return 0; 389 return 0;
369 } else { 390 }
370 int phy_addr;
371
372 /* find the first (lowest address) PHY on the current MAC's MII bus */
373 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
374 if (aup->mii_bus->phy_map[phy_addr]) {
375 phydev = aup->mii_bus->phy_map[phy_addr];
376 if (!aup->phy_search_highest_addr)
377 break; /* break out with first one found */
378 }
379
380 if (aup->phy1_search_mac0) {
381 /* try harder to find a PHY */
382 if (!phydev && (aup->mac_id == 1)) {
383 /* no PHY found, maybe we have a dual PHY? */
384 dev_info(&dev->dev, ": no PHY found on MAC1, "
385 "let's see if it's attached to MAC0...\n");
386
387 /* find the first (lowest address) non-attached PHY on
388 * the MAC0 MII bus */
389 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
390 struct phy_device *const tmp_phydev =
391 aup->mii_bus->phy_map[phy_addr];
392
393 if (aup->mac_id == 1)
394 break;
395
396 if (!tmp_phydev)
397 continue; /* no PHY here... */
398 391
399 if (tmp_phydev->attached_dev) 392 /* find the first (lowest address) PHY
400 continue; /* already claimed by MAC0 */ 393 * on the current MAC's MII bus
394 */
395 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
396 if (aup->mii_bus->phy_map[phy_addr]) {
397 phydev = aup->mii_bus->phy_map[phy_addr];
398 if (!aup->phy_search_highest_addr)
399 /* break out with first one found */
400 break;
401 }
401 402
402 phydev = tmp_phydev; 403 if (aup->phy1_search_mac0) {
403 break; /* found it */ 404 /* try harder to find a PHY */
404 } 405 if (!phydev && (aup->mac_id == 1)) {
406 /* no PHY found, maybe we have a dual PHY? */
407 dev_info(&dev->dev, ": no PHY found on MAC1, "
408 "let's see if it's attached to MAC0...\n");
409
410 /* find the first (lowest address) non-attached
411 * PHY on the MAC0 MII bus
412 */
413 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
414 struct phy_device *const tmp_phydev =
415 aup->mii_bus->phy_map[phy_addr];
416
417 if (aup->mac_id == 1)
418 break;
419
420 /* no PHY here... */
421 if (!tmp_phydev)
422 continue;
423
424 /* already claimed by MAC0 */
425 if (tmp_phydev->attached_dev)
426 continue;
427
428 phydev = tmp_phydev;
429 break; /* found it */
405 } 430 }
406 } 431 }
407 } 432 }
@@ -452,20 +477,20 @@ static int au1000_mii_probe (struct net_device *dev)
452 * has the virtual and dma address of a buffer suitable for 477 * has the virtual and dma address of a buffer suitable for
453 * both, receive and transmit operations. 478 * both, receive and transmit operations.
454 */ 479 */
455static db_dest_t *au1000_GetFreeDB(struct au1000_private *aup) 480static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
456{ 481{
457 db_dest_t *pDB; 482 struct db_dest *pDB;
458 pDB = aup->pDBfree; 483 pDB = aup->pDBfree;
459 484
460 if (pDB) { 485 if (pDB)
461 aup->pDBfree = pDB->pnext; 486 aup->pDBfree = pDB->pnext;
462 } 487
463 return pDB; 488 return pDB;
464} 489}
465 490
466void au1000_ReleaseDB(struct au1000_private *aup, db_dest_t *pDB) 491void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
467{ 492{
468 db_dest_t *pDBfree = aup->pDBfree; 493 struct db_dest *pDBfree = aup->pDBfree;
469 if (pDBfree) 494 if (pDBfree)
470 pDBfree->pnext = pDB; 495 pDBfree->pnext = pDB;
471 aup->pDBfree = pDB; 496 aup->pDBfree = pDB;
@@ -478,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
478 503
479 au1000_hard_stop(dev); 504 au1000_hard_stop(dev);
480 505
481 *aup->enable = MAC_EN_CLOCK_ENABLE; 506 writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
482 au_sync_delay(2); 507 au_sync_delay(2);
483 *aup->enable = 0; 508 writel(0, &aup->enable);
484 au_sync_delay(2); 509 au_sync_delay(2);
485 510
486 aup->tx_full = 0; 511 aup->tx_full = 0;
@@ -507,7 +532,7 @@ static void au1000_reset_mac(struct net_device *dev)
507 532
508 spin_lock_irqsave(&aup->lock, flags); 533 spin_lock_irqsave(&aup->lock, flags);
509 534
510 au1000_reset_mac_unlocked (dev); 535 au1000_reset_mac_unlocked(dev);
511 536
512 spin_unlock_irqrestore(&aup->lock, flags); 537 spin_unlock_irqrestore(&aup->lock, flags);
513} 538}
@@ -524,11 +549,13 @@ au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
524 549
525 for (i = 0; i < NUM_RX_DMA; i++) { 550 for (i = 0; i < NUM_RX_DMA; i++) {
526 aup->rx_dma_ring[i] = 551 aup->rx_dma_ring[i] =
527 (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i); 552 (struct rx_dma *)
553 (rx_base + sizeof(struct rx_dma)*i);
528 } 554 }
529 for (i = 0; i < NUM_TX_DMA; i++) { 555 for (i = 0; i < NUM_TX_DMA; i++) {
530 aup->tx_dma_ring[i] = 556 aup->tx_dma_ring[i] =
531 (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i); 557 (struct tx_dma *)
558 (tx_base + sizeof(struct tx_dma)*i);
532 } 559 }
533} 560}
534 561
@@ -616,18 +643,21 @@ static int au1000_init(struct net_device *dev)
616 643
617 spin_lock_irqsave(&aup->lock, flags); 644 spin_lock_irqsave(&aup->lock, flags);
618 645
619 aup->mac->control = 0; 646 writel(0, &aup->mac->control);
620 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2; 647 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
621 aup->tx_tail = aup->tx_head; 648 aup->tx_tail = aup->tx_head;
622 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2; 649 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
623 650
624 aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4]; 651 writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
625 aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 | 652 &aup->mac->mac_addr_high);
626 dev->dev_addr[1]<<8 | dev->dev_addr[0]; 653 writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
654 dev->dev_addr[1]<<8 | dev->dev_addr[0],
655 &aup->mac->mac_addr_low);
627 656
628 for (i = 0; i < NUM_RX_DMA; i++) { 657
658 for (i = 0; i < NUM_RX_DMA; i++)
629 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE; 659 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
630 } 660
631 au_sync(); 661 au_sync();
632 662
633 control = MAC_RX_ENABLE | MAC_TX_ENABLE; 663 control = MAC_RX_ENABLE | MAC_TX_ENABLE;
@@ -643,8 +673,8 @@ static int au1000_init(struct net_device *dev)
643 control |= MAC_FULL_DUPLEX; 673 control |= MAC_FULL_DUPLEX;
644 } 674 }
645 675
646 aup->mac->control = control; 676 writel(control, &aup->mac->control);
647 aup->mac->vlan1_tag = 0x8100; /* activate vlan support */ 677 writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
648 au_sync(); 678 au_sync();
649 679
650 spin_unlock_irqrestore(&aup->lock, flags); 680 spin_unlock_irqrestore(&aup->lock, flags);
@@ -681,9 +711,9 @@ static int au1000_rx(struct net_device *dev)
681{ 711{
682 struct au1000_private *aup = netdev_priv(dev); 712 struct au1000_private *aup = netdev_priv(dev);
683 struct sk_buff *skb; 713 struct sk_buff *skb;
684 volatile rx_dma_t *prxd; 714 struct rx_dma *prxd;
685 u32 buff_stat, status; 715 u32 buff_stat, status;
686 db_dest_t *pDB; 716 struct db_dest *pDB;
687 u32 frmlen; 717 u32 frmlen;
688 718
689 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head); 719 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
@@ -713,24 +743,26 @@ static int au1000_rx(struct net_device *dev)
713 netif_rx(skb); /* pass the packet to upper layers */ 743 netif_rx(skb); /* pass the packet to upper layers */
714 } else { 744 } else {
715 if (au1000_debug > 4) { 745 if (au1000_debug > 4) {
746 pr_err("rx_error(s):");
716 if (status & RX_MISSED_FRAME) 747 if (status & RX_MISSED_FRAME)
717 printk("rx miss\n"); 748 pr_cont(" miss");
718 if (status & RX_WDOG_TIMER) 749 if (status & RX_WDOG_TIMER)
719 printk("rx wdog\n"); 750 pr_cont(" wdog");
720 if (status & RX_RUNT) 751 if (status & RX_RUNT)
721 printk("rx runt\n"); 752 pr_cont(" runt");
722 if (status & RX_OVERLEN) 753 if (status & RX_OVERLEN)
723 printk("rx overlen\n"); 754 pr_cont(" overlen");
724 if (status & RX_COLL) 755 if (status & RX_COLL)
725 printk("rx coll\n"); 756 pr_cont(" coll");
726 if (status & RX_MII_ERROR) 757 if (status & RX_MII_ERROR)
727 printk("rx mii error\n"); 758 pr_cont(" mii error");
728 if (status & RX_CRC_ERROR) 759 if (status & RX_CRC_ERROR)
729 printk("rx crc error\n"); 760 pr_cont(" crc error");
730 if (status & RX_LEN_ERROR) 761 if (status & RX_LEN_ERROR)
731 printk("rx len error\n"); 762 pr_cont(" len error");
732 if (status & RX_U_CNTRL_FRAME) 763 if (status & RX_U_CNTRL_FRAME)
733 printk("rx u control frame\n"); 764 pr_cont(" u control frame");
765 pr_cont("\n");
734 } 766 }
735 } 767 }
736 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); 768 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
@@ -753,7 +785,8 @@ static void au1000_update_tx_stats(struct net_device *dev, u32 status)
753 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { 785 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
754 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { 786 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
755 /* any other tx errors are only valid 787 /* any other tx errors are only valid
756 * in half duplex mode */ 788 * in half duplex mode
789 */
757 ps->tx_errors++; 790 ps->tx_errors++;
758 ps->tx_aborted_errors++; 791 ps->tx_aborted_errors++;
759 } 792 }
@@ -774,7 +807,7 @@ static void au1000_update_tx_stats(struct net_device *dev, u32 status)
774static void au1000_tx_ack(struct net_device *dev) 807static void au1000_tx_ack(struct net_device *dev)
775{ 808{
776 struct au1000_private *aup = netdev_priv(dev); 809 struct au1000_private *aup = netdev_priv(dev);
777 volatile tx_dma_t *ptxd; 810 struct tx_dma *ptxd;
778 811
779 ptxd = aup->tx_dma_ring[aup->tx_tail]; 812 ptxd = aup->tx_dma_ring[aup->tx_tail];
780 813
@@ -854,7 +887,7 @@ static int au1000_close(struct net_device *dev)
854 887
855 spin_lock_irqsave(&aup->lock, flags); 888 spin_lock_irqsave(&aup->lock, flags);
856 889
857 au1000_reset_mac_unlocked (dev); 890 au1000_reset_mac_unlocked(dev);
858 891
859 /* stop the device */ 892 /* stop the device */
860 netif_stop_queue(dev); 893 netif_stop_queue(dev);
@@ -873,9 +906,9 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
873{ 906{
874 struct au1000_private *aup = netdev_priv(dev); 907 struct au1000_private *aup = netdev_priv(dev);
875 struct net_device_stats *ps = &dev->stats; 908 struct net_device_stats *ps = &dev->stats;
876 volatile tx_dma_t *ptxd; 909 struct tx_dma *ptxd;
877 u32 buff_stat; 910 u32 buff_stat;
878 db_dest_t *pDB; 911 struct db_dest *pDB;
879 int i; 912 int i;
880 913
881 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n", 914 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
@@ -902,9 +935,9 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
902 pDB = aup->tx_db_inuse[aup->tx_head]; 935 pDB = aup->tx_db_inuse[aup->tx_head];
903 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); 936 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
904 if (skb->len < ETH_ZLEN) { 937 if (skb->len < ETH_ZLEN) {
905 for (i = skb->len; i < ETH_ZLEN; i++) { 938 for (i = skb->len; i < ETH_ZLEN; i++)
906 ((char *)pDB->vaddr)[i] = 0; 939 ((char *)pDB->vaddr)[i] = 0;
907 } 940
908 ptxd->len = ETH_ZLEN; 941 ptxd->len = ETH_ZLEN;
909 } else 942 } else
910 ptxd->len = skb->len; 943 ptxd->len = skb->len;
@@ -935,15 +968,16 @@ static void au1000_tx_timeout(struct net_device *dev)
935static void au1000_multicast_list(struct net_device *dev) 968static void au1000_multicast_list(struct net_device *dev)
936{ 969{
937 struct au1000_private *aup = netdev_priv(dev); 970 struct au1000_private *aup = netdev_priv(dev);
971 u32 reg;
938 972
939 netif_dbg(aup, drv, dev, "au1000_multicast_list: flags=%x\n", dev->flags); 973 netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
940 974 reg = readl(&aup->mac->control);
941 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 975 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
942 aup->mac->control |= MAC_PROMISCUOUS; 976 reg |= MAC_PROMISCUOUS;
943 } else if ((dev->flags & IFF_ALLMULTI) || 977 } else if ((dev->flags & IFF_ALLMULTI) ||
944 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) { 978 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
945 aup->mac->control |= MAC_PASS_ALL_MULTI; 979 reg |= MAC_PASS_ALL_MULTI;
946 aup->mac->control &= ~MAC_PROMISCUOUS; 980 reg &= ~MAC_PROMISCUOUS;
947 netdev_info(dev, "Pass all multicast\n"); 981 netdev_info(dev, "Pass all multicast\n");
948 } else { 982 } else {
949 struct netdev_hw_addr *ha; 983 struct netdev_hw_addr *ha;
@@ -953,11 +987,12 @@ static void au1000_multicast_list(struct net_device *dev)
953 netdev_for_each_mc_addr(ha, dev) 987 netdev_for_each_mc_addr(ha, dev)
954 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26, 988 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
955 (long *)mc_filter); 989 (long *)mc_filter);
956 aup->mac->multi_hash_high = mc_filter[1]; 990 writel(mc_filter[1], &aup->mac->multi_hash_high);
957 aup->mac->multi_hash_low = mc_filter[0]; 991 writel(mc_filter[0], &aup->mac->multi_hash_low);
958 aup->mac->control &= ~MAC_PROMISCUOUS; 992 reg &= ~MAC_PROMISCUOUS;
959 aup->mac->control |= MAC_HASH_MODE; 993 reg |= MAC_HASH_MODE;
960 } 994 }
995 writel(reg, &aup->mac->control);
961} 996}
962 997
963static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 998static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -991,7 +1026,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
991 struct au1000_private *aup = NULL; 1026 struct au1000_private *aup = NULL;
992 struct au1000_eth_platform_data *pd; 1027 struct au1000_eth_platform_data *pd;
993 struct net_device *dev = NULL; 1028 struct net_device *dev = NULL;
994 db_dest_t *pDB, *pDBfree; 1029 struct db_dest *pDB, *pDBfree;
995 int irq, i, err = 0; 1030 int irq, i, err = 0;
996 struct resource *base, *macen; 1031 struct resource *base, *macen;
997 1032
@@ -1016,13 +1051,15 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1016 goto out; 1051 goto out;
1017 } 1052 }
1018 1053
1019 if (!request_mem_region(base->start, resource_size(base), pdev->name)) { 1054 if (!request_mem_region(base->start, resource_size(base),
1055 pdev->name)) {
1020 dev_err(&pdev->dev, "failed to request memory region for base registers\n"); 1056 dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1021 err = -ENXIO; 1057 err = -ENXIO;
1022 goto out; 1058 goto out;
1023 } 1059 }
1024 1060
1025 if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) { 1061 if (!request_mem_region(macen->start, resource_size(macen),
1062 pdev->name)) {
1026 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n"); 1063 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1027 err = -ENXIO; 1064 err = -ENXIO;
1028 goto err_request; 1065 goto err_request;
@@ -1040,10 +1077,12 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1040 aup = netdev_priv(dev); 1077 aup = netdev_priv(dev);
1041 1078
1042 spin_lock_init(&aup->lock); 1079 spin_lock_init(&aup->lock);
1043 aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug); 1080 aup->msg_enable = (au1000_debug < 4 ?
1081 AU1000_DEF_MSG_ENABLE : au1000_debug);
1044 1082
1045 /* Allocate the data buffers */ 1083 /* Allocate the data buffers
1046 /* Snooping works fine with eth on all au1xxx */ 1084 * Snooping works fine with eth on all au1xxx
1085 */
1047 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * 1086 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1048 (NUM_TX_BUFFS + NUM_RX_BUFFS), 1087 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1049 &aup->dma_addr, 0); 1088 &aup->dma_addr, 0);
@@ -1054,15 +1093,17 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1054 } 1093 }
1055 1094
1056 /* aup->mac is the base address of the MAC's registers */ 1095 /* aup->mac is the base address of the MAC's registers */
1057 aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base)); 1096 aup->mac = (struct mac_reg *)
1097 ioremap_nocache(base->start, resource_size(base));
1058 if (!aup->mac) { 1098 if (!aup->mac) {
1059 dev_err(&pdev->dev, "failed to ioremap MAC registers\n"); 1099 dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1060 err = -ENXIO; 1100 err = -ENXIO;
1061 goto err_remap1; 1101 goto err_remap1;
1062 } 1102 }
1063 1103
1064 /* Setup some variables for quick register address access */ 1104 /* Setup some variables for quick register address access */
1065 aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen)); 1105 aup->enable = (u32 *)ioremap_nocache(macen->start,
1106 resource_size(macen));
1066 if (!aup->enable) { 1107 if (!aup->enable) {
1067 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n"); 1108 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1068 err = -ENXIO; 1109 err = -ENXIO;
@@ -1078,12 +1119,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1078 /* set a random MAC now in case platform_data doesn't provide one */ 1119 /* set a random MAC now in case platform_data doesn't provide one */
1079 random_ether_addr(dev->dev_addr); 1120 random_ether_addr(dev->dev_addr);
1080 1121
1081 *aup->enable = 0; 1122 writel(0, &aup->enable);
1082 aup->mac_enabled = 0; 1123 aup->mac_enabled = 0;
1083 1124
1084 pd = pdev->dev.platform_data; 1125 pd = pdev->dev.platform_data;
1085 if (!pd) { 1126 if (!pd) {
1086 dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n"); 1127 dev_info(&pdev->dev, "no platform_data passed,"
1128 " PHY search on MAC0\n");
1087 aup->phy1_search_mac0 = 1; 1129 aup->phy1_search_mac0 = 1;
1088 } else { 1130 } else {
1089 if (is_valid_ether_addr(pd->mac)) 1131 if (is_valid_ether_addr(pd->mac))
@@ -1098,8 +1140,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1098 } 1140 }
1099 1141
1100 if (aup->phy_busid && aup->phy_busid > 0) { 1142 if (aup->phy_busid && aup->phy_busid > 0) {
1101 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII" 1143 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
1102 "bus not supported yet\n");
1103 err = -ENODEV; 1144 err = -ENODEV;
1104 goto err_mdiobus_alloc; 1145 goto err_mdiobus_alloc;
1105 } 1146 }
@@ -1151,17 +1192,17 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1151 1192
1152 for (i = 0; i < NUM_RX_DMA; i++) { 1193 for (i = 0; i < NUM_RX_DMA; i++) {
1153 pDB = au1000_GetFreeDB(aup); 1194 pDB = au1000_GetFreeDB(aup);
1154 if (!pDB) { 1195 if (!pDB)
1155 goto err_out; 1196 goto err_out;
1156 } 1197
1157 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; 1198 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1158 aup->rx_db_inuse[i] = pDB; 1199 aup->rx_db_inuse[i] = pDB;
1159 } 1200 }
1160 for (i = 0; i < NUM_TX_DMA; i++) { 1201 for (i = 0; i < NUM_TX_DMA; i++) {
1161 pDB = au1000_GetFreeDB(aup); 1202 pDB = au1000_GetFreeDB(aup);
1162 if (!pDB) { 1203 if (!pDB)
1163 goto err_out; 1204 goto err_out;
1164 } 1205
1165 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; 1206 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1166 aup->tx_dma_ring[i]->len = 0; 1207 aup->tx_dma_ring[i]->len = 0;
1167 aup->tx_db_inuse[i] = pDB; 1208 aup->tx_db_inuse[i] = pDB;
@@ -1188,7 +1229,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1188 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n", 1229 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1189 (unsigned long)base->start, irq); 1230 (unsigned long)base->start, irq);
1190 if (version_printed++ == 0) 1231 if (version_printed++ == 0)
1191 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); 1232 pr_info("%s version %s %s\n",
1233 DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1192 1234
1193 return 0; 1235 return 0;
1194 1236
@@ -1197,7 +1239,8 @@ err_out:
1197 mdiobus_unregister(aup->mii_bus); 1239 mdiobus_unregister(aup->mii_bus);
1198 1240
1199 /* here we should have a valid dev plus aup-> register addresses 1241 /* here we should have a valid dev plus aup-> register addresses
1200 * so we can reset the mac properly.*/ 1242 * so we can reset the mac properly.
1243 */
1201 au1000_reset_mac(dev); 1244 au1000_reset_mac(dev);
1202 1245
1203 for (i = 0; i < NUM_RX_DMA; i++) { 1246 for (i = 0; i < NUM_RX_DMA; i++) {
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index d06ec008fbf1..6229c774552c 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -44,34 +44,34 @@
44 * Data Buffer Descriptor. Data buffers must be aligned on 32 byte 44 * Data Buffer Descriptor. Data buffers must be aligned on 32 byte
45 * boundary for both, receive and transmit. 45 * boundary for both, receive and transmit.
46 */ 46 */
47typedef struct db_dest { 47struct db_dest {
48 struct db_dest *pnext; 48 struct db_dest *pnext;
49 volatile u32 *vaddr; 49 u32 *vaddr;
50 dma_addr_t dma_addr; 50 dma_addr_t dma_addr;
51} db_dest_t; 51};
52 52
53/* 53/*
54 * The transmit and receive descriptors are memory 54 * The transmit and receive descriptors are memory
55 * mapped registers. 55 * mapped registers.
56 */ 56 */
57typedef struct tx_dma { 57struct tx_dma {
58 u32 status; 58 u32 status;
59 u32 buff_stat; 59 u32 buff_stat;
60 u32 len; 60 u32 len;
61 u32 pad; 61 u32 pad;
62} tx_dma_t; 62};
63 63
64typedef struct rx_dma { 64struct rx_dma {
65 u32 status; 65 u32 status;
66 u32 buff_stat; 66 u32 buff_stat;
67 u32 pad[2]; 67 u32 pad[2];
68} rx_dma_t; 68};
69 69
70 70
71/* 71/*
72 * MAC control registers, memory mapped. 72 * MAC control registers, memory mapped.
73 */ 73 */
74typedef struct mac_reg { 74struct mac_reg {
75 u32 control; 75 u32 control;
76 u32 mac_addr_high; 76 u32 mac_addr_high;
77 u32 mac_addr_low; 77 u32 mac_addr_low;
@@ -82,16 +82,16 @@ typedef struct mac_reg {
82 u32 flow_control; 82 u32 flow_control;
83 u32 vlan1_tag; 83 u32 vlan1_tag;
84 u32 vlan2_tag; 84 u32 vlan2_tag;
85} mac_reg_t; 85};
86 86
87 87
88struct au1000_private { 88struct au1000_private {
89 db_dest_t *pDBfree; 89 struct db_dest *pDBfree;
90 db_dest_t db[NUM_RX_BUFFS+NUM_TX_BUFFS]; 90 struct db_dest db[NUM_RX_BUFFS+NUM_TX_BUFFS];
91 volatile rx_dma_t *rx_dma_ring[NUM_RX_DMA]; 91 struct rx_dma *rx_dma_ring[NUM_RX_DMA];
92 volatile tx_dma_t *tx_dma_ring[NUM_TX_DMA]; 92 struct tx_dma *tx_dma_ring[NUM_TX_DMA];
93 db_dest_t *rx_db_inuse[NUM_RX_DMA]; 93 struct db_dest *rx_db_inuse[NUM_RX_DMA];
94 db_dest_t *tx_db_inuse[NUM_TX_DMA]; 94 struct db_dest *tx_db_inuse[NUM_TX_DMA];
95 u32 rx_head; 95 u32 rx_head;
96 u32 tx_head; 96 u32 tx_head;
97 u32 tx_tail; 97 u32 tx_tail;
@@ -99,7 +99,9 @@ struct au1000_private {
99 99
100 int mac_id; 100 int mac_id;
101 101
102 int mac_enabled; /* whether MAC is currently enabled and running (req. for mdio) */ 102 int mac_enabled; /* whether MAC is currently enabled and running
103 * (req. for mdio)
104 */
103 105
104 int old_link; /* used by au1000_adjust_link */ 106 int old_link; /* used by au1000_adjust_link */
105 int old_speed; 107 int old_speed;
@@ -117,9 +119,11 @@ struct au1000_private {
117 int phy_busid; 119 int phy_busid;
118 int phy_irq; 120 int phy_irq;
119 121
120 /* These variables are just for quick access to certain regs addresses. */ 122 /* These variables are just for quick access
121 volatile mac_reg_t *mac; /* mac registers */ 123 * to certain regs addresses.
122 volatile u32 *enable; /* address of MAC Enable Register */ 124 */
125 struct mac_reg *mac; /* mac registers */
126 u32 *enable; /* address of MAC Enable Register */
123 127
124 u32 vaddr; /* virtual address of rx/tx buffers */ 128 u32 vaddr; /* virtual address of rx/tx buffers */
125 dma_addr_t dma_addr; /* dma address of rx/tx buffers */ 129 dma_addr_t dma_addr; /* dma address of rx/tx buffers */
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 1e620e287ae0..8e7c8a8e61c7 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -818,7 +818,7 @@ static int b44_rx(struct b44 *bp, int budget)
818 copy_skb->data, len); 818 copy_skb->data, len);
819 skb = copy_skb; 819 skb = copy_skb;
820 } 820 }
821 skb->ip_summed = CHECKSUM_NONE; 821 skb_checksum_none_assert(skb);
822 skb->protocol = eth_type_trans(skb, bp->dev); 822 skb->protocol = eth_type_trans(skb, bp->dev);
823 netif_receive_skb(skb); 823 netif_receive_skb(skb);
824 received++; 824 received++;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 0d2c5da08937..ecfef240a303 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -293,22 +293,22 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
293 /* if the packet does not have start of packet _and_ 293 /* if the packet does not have start of packet _and_
294 * end of packet flag set, then just recycle it */ 294 * end of packet flag set, then just recycle it */
295 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { 295 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
296 priv->stats.rx_dropped++; 296 dev->stats.rx_dropped++;
297 continue; 297 continue;
298 } 298 }
299 299
300 /* recycle packet if it's marked as bad */ 300 /* recycle packet if it's marked as bad */
301 if (unlikely(len_stat & DMADESC_ERR_MASK)) { 301 if (unlikely(len_stat & DMADESC_ERR_MASK)) {
302 priv->stats.rx_errors++; 302 dev->stats.rx_errors++;
303 303
304 if (len_stat & DMADESC_OVSIZE_MASK) 304 if (len_stat & DMADESC_OVSIZE_MASK)
305 priv->stats.rx_length_errors++; 305 dev->stats.rx_length_errors++;
306 if (len_stat & DMADESC_CRC_MASK) 306 if (len_stat & DMADESC_CRC_MASK)
307 priv->stats.rx_crc_errors++; 307 dev->stats.rx_crc_errors++;
308 if (len_stat & DMADESC_UNDER_MASK) 308 if (len_stat & DMADESC_UNDER_MASK)
309 priv->stats.rx_frame_errors++; 309 dev->stats.rx_frame_errors++;
310 if (len_stat & DMADESC_OV_MASK) 310 if (len_stat & DMADESC_OV_MASK)
311 priv->stats.rx_fifo_errors++; 311 dev->stats.rx_fifo_errors++;
312 continue; 312 continue;
313 } 313 }
314 314
@@ -324,7 +324,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
324 nskb = netdev_alloc_skb_ip_align(dev, len); 324 nskb = netdev_alloc_skb_ip_align(dev, len);
325 if (!nskb) { 325 if (!nskb) {
326 /* forget packet, just rearm desc */ 326 /* forget packet, just rearm desc */
327 priv->stats.rx_dropped++; 327 dev->stats.rx_dropped++;
328 continue; 328 continue;
329 } 329 }
330 330
@@ -342,8 +342,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
342 342
343 skb_put(skb, len); 343 skb_put(skb, len);
344 skb->protocol = eth_type_trans(skb, dev); 344 skb->protocol = eth_type_trans(skb, dev);
345 priv->stats.rx_packets++; 345 dev->stats.rx_packets++;
346 priv->stats.rx_bytes += len; 346 dev->stats.rx_bytes += len;
347 netif_receive_skb(skb); 347 netif_receive_skb(skb);
348 348
349 } while (--budget > 0); 349 } while (--budget > 0);
@@ -403,7 +403,7 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
403 spin_unlock(&priv->tx_lock); 403 spin_unlock(&priv->tx_lock);
404 404
405 if (desc->len_stat & DMADESC_UNDER_MASK) 405 if (desc->len_stat & DMADESC_UNDER_MASK)
406 priv->stats.tx_errors++; 406 dev->stats.tx_errors++;
407 407
408 dev_kfree_skb(skb); 408 dev_kfree_skb(skb);
409 released++; 409 released++;
@@ -563,8 +563,8 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
563 if (!priv->tx_desc_count) 563 if (!priv->tx_desc_count)
564 netif_stop_queue(dev); 564 netif_stop_queue(dev);
565 565
566 priv->stats.tx_bytes += skb->len; 566 dev->stats.tx_bytes += skb->len;
567 priv->stats.tx_packets++; 567 dev->stats.tx_packets++;
568 ret = NETDEV_TX_OK; 568 ret = NETDEV_TX_OK;
569 569
570out_unlock: 570out_unlock:
@@ -798,7 +798,7 @@ static int bcm_enet_open(struct net_device *dev)
798 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 798 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
799 priv->mac_id ? "1" : "0", priv->phy_id); 799 priv->mac_id ? "1" : "0", priv->phy_id);
800 800
801 phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0, 801 phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
802 PHY_INTERFACE_MODE_MII); 802 PHY_INTERFACE_MODE_MII);
803 803
804 if (IS_ERR(phydev)) { 804 if (IS_ERR(phydev)) {
@@ -1141,17 +1141,6 @@ static int bcm_enet_stop(struct net_device *dev)
1141} 1141}
1142 1142
1143/* 1143/*
1144 * core request to return device rx/tx stats
1145 */
1146static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev)
1147{
1148 struct bcm_enet_priv *priv;
1149
1150 priv = netdev_priv(dev);
1151 return &priv->stats;
1152}
1153
1154/*
1155 * ethtool callbacks 1144 * ethtool callbacks
1156 */ 1145 */
1157struct bcm_enet_stats { 1146struct bcm_enet_stats {
@@ -1163,16 +1152,18 @@ struct bcm_enet_stats {
1163 1152
1164#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ 1153#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \
1165 offsetof(struct bcm_enet_priv, m) 1154 offsetof(struct bcm_enet_priv, m)
1155#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m), \
1156 offsetof(struct net_device_stats, m)
1166 1157
1167static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { 1158static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
1168 { "rx_packets", GEN_STAT(stats.rx_packets), -1 }, 1159 { "rx_packets", DEV_STAT(rx_packets), -1 },
1169 { "tx_packets", GEN_STAT(stats.tx_packets), -1 }, 1160 { "tx_packets", DEV_STAT(tx_packets), -1 },
1170 { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 }, 1161 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
1171 { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 }, 1162 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
1172 { "rx_errors", GEN_STAT(stats.rx_errors), -1 }, 1163 { "rx_errors", DEV_STAT(rx_errors), -1 },
1173 { "tx_errors", GEN_STAT(stats.tx_errors), -1 }, 1164 { "tx_errors", DEV_STAT(tx_errors), -1 },
1174 { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 }, 1165 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
1175 { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 }, 1166 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
1176 1167
1177 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, 1168 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
1178 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, 1169 { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
@@ -1328,7 +1319,11 @@ static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
1328 char *p; 1319 char *p;
1329 1320
1330 s = &bcm_enet_gstrings_stats[i]; 1321 s = &bcm_enet_gstrings_stats[i];
1331 p = (char *)priv + s->stat_offset; 1322 if (s->mib_reg == -1)
1323 p = (char *)&netdev->stats;
1324 else
1325 p = (char *)priv;
1326 p += s->stat_offset;
1332 data[i] = (s->sizeof_stat == sizeof(u64)) ? 1327 data[i] = (s->sizeof_stat == sizeof(u64)) ?
1333 *(u64 *)p : *(u32 *)p; 1328 *(u64 *)p : *(u32 *)p;
1334 } 1329 }
@@ -1605,7 +1600,6 @@ static const struct net_device_ops bcm_enet_ops = {
1605 .ndo_open = bcm_enet_open, 1600 .ndo_open = bcm_enet_open,
1606 .ndo_stop = bcm_enet_stop, 1601 .ndo_stop = bcm_enet_stop,
1607 .ndo_start_xmit = bcm_enet_start_xmit, 1602 .ndo_start_xmit = bcm_enet_start_xmit,
1608 .ndo_get_stats = bcm_enet_get_stats,
1609 .ndo_set_mac_address = bcm_enet_set_mac_address, 1603 .ndo_set_mac_address = bcm_enet_set_mac_address,
1610 .ndo_set_multicast_list = bcm_enet_set_multicast_list, 1604 .ndo_set_multicast_list = bcm_enet_set_multicast_list,
1611 .ndo_do_ioctl = bcm_enet_ioctl, 1605 .ndo_do_ioctl = bcm_enet_ioctl,
diff --git a/drivers/net/bcm63xx_enet.h b/drivers/net/bcm63xx_enet.h
index bd3684d42d74..0e3048b788c2 100644
--- a/drivers/net/bcm63xx_enet.h
+++ b/drivers/net/bcm63xx_enet.h
@@ -274,7 +274,6 @@ struct bcm_enet_priv {
274 int pause_tx; 274 int pause_tx;
275 275
276 /* stats */ 276 /* stats */
277 struct net_device_stats stats;
278 struct bcm_enet_mib_counters mib; 277 struct bcm_enet_mib_counters mib;
279 278
280 /* after mib interrupt, mib registers update is done in this 279 /* after mib interrupt, mib registers update is done in this
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 53306bf3f401..4faf6961dcec 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -414,6 +414,20 @@ static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
414 adapter->is_virtfn = (data != 0xAA); 414 adapter->is_virtfn = (data != 0xAA);
415} 415}
416 416
417static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
418{
419 u32 addr;
420
421 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
422
423 mac[5] = (u8)(addr & 0xFF);
424 mac[4] = (u8)((addr >> 8) & 0xFF);
425 mac[3] = (u8)((addr >> 16) & 0xFF);
426 mac[2] = 0xC9;
427 mac[1] = 0x00;
428 mac[0] = 0x00;
429}
430
417extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 431extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
418 u16 num_popped); 432 u16 num_popped);
419extern void be_link_status_update(struct be_adapter *adapter, bool link_up); 433extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 34abcc9403d6..0db28b411e87 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -98,9 +98,9 @@ static void be_async_link_state_process(struct be_adapter *adapter,
98 98
99static inline bool is_link_state_evt(u32 trailer) 99static inline bool is_link_state_evt(u32 trailer)
100{ 100{
101 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 101 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
102 ASYNC_TRAILER_EVENT_CODE_MASK) == 102 ASYNC_TRAILER_EVENT_CODE_MASK) ==
103 ASYNC_EVENT_CODE_LINK_STATE); 103 ASYNC_EVENT_CODE_LINK_STATE;
104} 104}
105 105
106static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 106static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 13f0abbc5205..d92063420c25 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -91,6 +91,9 @@ static const struct be_ethtool_stat et_stats[] = {
91 {PORTSTAT_INFO(rx_non_rss_packets)}, 91 {PORTSTAT_INFO(rx_non_rss_packets)},
92 {PORTSTAT_INFO(rx_ipv4_packets)}, 92 {PORTSTAT_INFO(rx_ipv4_packets)},
93 {PORTSTAT_INFO(rx_ipv6_packets)}, 93 {PORTSTAT_INFO(rx_ipv6_packets)},
94 {PORTSTAT_INFO(rx_switched_unicast_packets)},
95 {PORTSTAT_INFO(rx_switched_multicast_packets)},
96 {PORTSTAT_INFO(rx_switched_broadcast_packets)},
94 {PORTSTAT_INFO(tx_unicastframes)}, 97 {PORTSTAT_INFO(tx_unicastframes)},
95 {PORTSTAT_INFO(tx_multicastframes)}, 98 {PORTSTAT_INFO(tx_multicastframes)},
96 {PORTSTAT_INFO(tx_broadcastframes)}, 99 {PORTSTAT_INFO(tx_broadcastframes)},
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 6eda7a022256..43a3a574e2e0 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -365,11 +365,6 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
365 rx_eq->cur_eqd = eqd; 365 rx_eq->cur_eqd = eqd;
366} 366}
367 367
368static struct net_device_stats *be_get_stats(struct net_device *dev)
369{
370 return &dev->stats;
371}
372
373static u32 be_calc_rate(u64 bytes, unsigned long ticks) 368static u32 be_calc_rate(u64 bytes, unsigned long ticks)
374{ 369{
375 u64 rate = bytes; 370 u64 rate = bytes;
@@ -1026,7 +1021,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1026 skb_fill_rx_data(adapter, skb, rxcp, num_rcvd); 1021 skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
1027 1022
1028 if (do_pkt_csum(rxcp, adapter->rx_csum)) 1023 if (do_pkt_csum(rxcp, adapter->rx_csum))
1029 skb->ip_summed = CHECKSUM_NONE; 1024 skb_checksum_none_assert(skb);
1030 else 1025 else
1031 skb->ip_summed = CHECKSUM_UNNECESSARY; 1026 skb->ip_summed = CHECKSUM_UNNECESSARY;
1032 1027
@@ -2084,6 +2079,47 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2084 return status; 2079 return status;
2085} 2080}
2086 2081
2082/*
2083 * Generate a seed MAC address from the PF MAC Address using jhash.
2084 * MAC Address for VFs are assigned incrementally starting from the seed.
2085 * These addresses are programmed in the ASIC by the PF and the VF driver
2086 * queries for the MAC address during its probe.
2087 */
2088static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2089{
2090 u32 vf = 0;
2091 int status;
2092 u8 mac[ETH_ALEN];
2093
2094 be_vf_eth_addr_generate(adapter, mac);
2095
2096 for (vf = 0; vf < num_vfs; vf++) {
2097 status = be_cmd_pmac_add(adapter, mac,
2098 adapter->vf_cfg[vf].vf_if_handle,
2099 &adapter->vf_cfg[vf].vf_pmac_id);
2100 if (status)
2101 dev_err(&adapter->pdev->dev,
2102 "Mac address add failed for VF %d\n", vf);
2103 else
2104 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2105
2106 mac[5] += 1;
2107 }
2108 return status;
2109}
2110
2111static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2112{
2113 u32 vf;
2114
2115 for (vf = 0; vf < num_vfs; vf++) {
2116 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2117 be_cmd_pmac_del(adapter,
2118 adapter->vf_cfg[vf].vf_if_handle,
2119 adapter->vf_cfg[vf].vf_pmac_id);
2120 }
2121}
2122
2087static int be_setup(struct be_adapter *adapter) 2123static int be_setup(struct be_adapter *adapter)
2088{ 2124{
2089 struct net_device *netdev = adapter->netdev; 2125 struct net_device *netdev = adapter->netdev;
@@ -2143,10 +2179,20 @@ static int be_setup(struct be_adapter *adapter)
2143 if (status != 0) 2179 if (status != 0)
2144 goto rx_qs_destroy; 2180 goto rx_qs_destroy;
2145 2181
2182 if (be_physfn(adapter)) {
2183 status = be_vf_eth_addr_config(adapter);
2184 if (status)
2185 goto mcc_q_destroy;
2186 }
2187
2146 adapter->link_speed = -1; 2188 adapter->link_speed = -1;
2147 2189
2148 return 0; 2190 return 0;
2149 2191
2192mcc_q_destroy:
2193 if (be_physfn(adapter))
2194 be_vf_eth_addr_rem(adapter);
2195 be_mcc_queues_destroy(adapter);
2150rx_qs_destroy: 2196rx_qs_destroy:
2151 be_rx_queues_destroy(adapter); 2197 be_rx_queues_destroy(adapter);
2152tx_qs_destroy: 2198tx_qs_destroy:
@@ -2163,6 +2209,9 @@ do_none:
2163 2209
2164static int be_clear(struct be_adapter *adapter) 2210static int be_clear(struct be_adapter *adapter)
2165{ 2211{
2212 if (be_physfn(adapter))
2213 be_vf_eth_addr_rem(adapter);
2214
2166 be_mcc_queues_destroy(adapter); 2215 be_mcc_queues_destroy(adapter);
2167 be_rx_queues_destroy(adapter); 2216 be_rx_queues_destroy(adapter);
2168 be_tx_queues_destroy(adapter); 2217 be_tx_queues_destroy(adapter);
@@ -2390,7 +2439,6 @@ static struct net_device_ops be_netdev_ops = {
2390 .ndo_open = be_open, 2439 .ndo_open = be_open,
2391 .ndo_stop = be_close, 2440 .ndo_stop = be_close,
2392 .ndo_start_xmit = be_xmit, 2441 .ndo_start_xmit = be_xmit,
2393 .ndo_get_stats = be_get_stats,
2394 .ndo_set_rx_mode = be_set_multicast_list, 2442 .ndo_set_rx_mode = be_set_multicast_list,
2395 .ndo_set_mac_address = be_mac_addr_set, 2443 .ndo_set_mac_address = be_mac_addr_set,
2396 .ndo_change_mtu = be_change_mtu, 2444 .ndo_change_mtu = be_change_mtu,
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 012613fde3f4..7a0e4156fade 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -803,15 +803,14 @@ static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompa
803static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) 803static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
804{ 804{
805 struct bfin_mac_local *lp = netdev_priv(netdev); 805 struct bfin_mac_local *lp = netdev_priv(netdev);
806 union skb_shared_tx *shtx = skb_tx(skb);
807 806
808 if (shtx->hardware) { 807 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
809 int timeout_cnt = MAX_TIMEOUT_CNT; 808 int timeout_cnt = MAX_TIMEOUT_CNT;
810 809
811 /* When doing time stamping, keep the connection to the socket 810 /* When doing time stamping, keep the connection to the socket
812 * a while longer 811 * a while longer
813 */ 812 */
814 shtx->in_progress = 1; 813 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
815 814
816 /* 815 /*
817 * The timestamping is done at the EMAC module's MII/RMII interface 816 * The timestamping is done at the EMAC module's MII/RMII interface
@@ -991,7 +990,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
991 struct bfin_mac_local *lp = netdev_priv(dev); 990 struct bfin_mac_local *lp = netdev_priv(dev);
992 u16 *data; 991 u16 *data;
993 u32 data_align = (unsigned long)(skb->data) & 0x3; 992 u32 data_align = (unsigned long)(skb->data) & 0x3;
994 union skb_shared_tx *shtx = skb_tx(skb);
995 993
996 current_tx_ptr->skb = skb; 994 current_tx_ptr->skb = skb;
997 995
@@ -1005,7 +1003,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1005 * of this field are the length of the packet payload in bytes and the higher 1003 * of this field are the length of the packet payload in bytes and the higher
1006 * 4 bits are the timestamping enable field. 1004 * 4 bits are the timestamping enable field.
1007 */ 1005 */
1008 if (shtx->hardware) 1006 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1009 *data |= 0x1000; 1007 *data |= 0x1000;
1010 1008
1011 current_tx_ptr->desc_a.start_addr = (u32)data; 1009 current_tx_ptr->desc_a.start_addr = (u32)data;
@@ -1015,7 +1013,7 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1015 } else { 1013 } else {
1016 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len); 1014 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
1017 /* enable timestamping for the sent packet */ 1015 /* enable timestamping for the sent packet */
1018 if (shtx->hardware) 1016 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1019 *((u16 *)(current_tx_ptr->packet)) |= 0x1000; 1017 *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
1020 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data, 1018 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
1021 skb->len); 1019 skb->len);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 959add2410bf..a1b8c8b8010b 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1233,15 +1233,8 @@ static void bmac_reset_and_enable(struct net_device *dev)
1233 } 1233 }
1234 spin_unlock_irqrestore(&bp->lock, flags); 1234 spin_unlock_irqrestore(&bp->lock, flags);
1235} 1235}
1236static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1237{
1238 struct bmac_data *bp = netdev_priv(dev);
1239 strcpy(info->driver, "bmac");
1240 strcpy(info->bus_info, dev_name(&bp->mdev->ofdev.dev));
1241}
1242 1236
1243static const struct ethtool_ops bmac_ethtool_ops = { 1237static const struct ethtool_ops bmac_ethtool_ops = {
1244 .get_drvinfo = bmac_get_drvinfo,
1245 .get_link = ethtool_op_get_link, 1238 .get_link = ethtool_op_get_link,
1246}; 1239};
1247 1240
@@ -1588,7 +1581,7 @@ bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1588 int i; 1581 int i;
1589 1582
1590 if (bmac_devs == NULL) 1583 if (bmac_devs == NULL)
1591 return (-ENOSYS); 1584 return -ENOSYS;
1592 1585
1593 len += sprintf(buffer, "BMAC counters & registers\n"); 1586 len += sprintf(buffer, "BMAC counters & registers\n");
1594 1587
diff --git a/drivers/net/bna/Makefile b/drivers/net/bna/Makefile
new file mode 100644
index 000000000000..a5d604de7fea
--- /dev/null
+++ b/drivers/net/bna/Makefile
@@ -0,0 +1,11 @@
1#
2# Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3# All rights reserved.
4#
5
6obj-$(CONFIG_BNA) += bna.o
7
8bna-objs := bnad.o bnad_ethtool.o bna_ctrl.o bna_txrx.o
9bna-objs += bfa_ioc.o bfa_ioc_ct.o bfa_cee.o cna_fwimg.o
10
11EXTRA_CFLAGS := -Idrivers/net/bna
diff --git a/drivers/net/bna/bfa_cee.c b/drivers/net/bna/bfa_cee.c
new file mode 100644
index 000000000000..f7b789a3b217
--- /dev/null
+++ b/drivers/net/bna/bfa_cee.c
@@ -0,0 +1,291 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_defs_cna.h"
20#include "cna.h"
21#include "bfa_cee.h"
22#include "bfi_cna.h"
23#include "bfa_ioc.h"
24
25#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
26#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
27
28static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg);
29static void bfa_cee_format_cee_cfg(void *buffer);
30
31static void
32bfa_cee_format_cee_cfg(void *buffer)
33{
34 struct bfa_cee_attr *cee_cfg = buffer;
35 bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
36}
37
38static void
39bfa_cee_stats_swap(struct bfa_cee_stats *stats)
40{
41 u32 *buffer = (u32 *)stats;
42 int i;
43
44 for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32));
45 i++) {
46 buffer[i] = ntohl(buffer[i]);
47 }
48}
49
50static void
51bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
52{
53 lldp_cfg->time_to_live =
54 ntohs(lldp_cfg->time_to_live);
55 lldp_cfg->enabled_system_cap =
56 ntohs(lldp_cfg->enabled_system_cap);
57}
58
59/**
60 * bfa_cee_attr_meminfo()
61 *
62 * @brief Returns the size of the DMA memory needed by CEE attributes
63 *
64 * @param[in] void
65 *
66 * @return Size of DMA region
67 */
68static u32
69bfa_cee_attr_meminfo(void)
70{
71 return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
72}
73/**
74 * bfa_cee_stats_meminfo()
75 *
76 * @brief Returns the size of the DMA memory needed by CEE stats
77 *
78 * @param[in] void
79 *
80 * @return Size of DMA region
81 */
82static u32
83bfa_cee_stats_meminfo(void)
84{
85 return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ);
86}
87
88/**
89 * bfa_cee_get_attr_isr()
90 *
91 * @brief CEE ISR for get-attributes responses from f/w
92 *
93 * @param[in] cee - Pointer to the CEE module
94 * status - Return status from the f/w
95 *
96 * @return void
97 */
98static void
99bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
100{
101 cee->get_attr_status = status;
102 if (status == BFA_STATUS_OK) {
103 memcpy(cee->attr, cee->attr_dma.kva,
104 sizeof(struct bfa_cee_attr));
105 bfa_cee_format_cee_cfg(cee->attr);
106 }
107 cee->get_attr_pending = false;
108 if (cee->cbfn.get_attr_cbfn)
109 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
110}
111
112/**
113 * bfa_cee_get_attr_isr()
114 *
115 * @brief CEE ISR for get-stats responses from f/w
116 *
117 * @param[in] cee - Pointer to the CEE module
118 * status - Return status from the f/w
119 *
120 * @return void
121 */
122static void
123bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
124{
125 cee->get_stats_status = status;
126 if (status == BFA_STATUS_OK) {
127 memcpy(cee->stats, cee->stats_dma.kva,
128 sizeof(struct bfa_cee_stats));
129 bfa_cee_stats_swap(cee->stats);
130 }
131 cee->get_stats_pending = false;
132 if (cee->cbfn.get_stats_cbfn)
133 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
134}
135
136/**
137 * bfa_cee_get_attr_isr()
138 *
139 * @brief CEE ISR for reset-stats responses from f/w
140 *
141 * @param[in] cee - Pointer to the CEE module
142 * status - Return status from the f/w
143 *
144 * @return void
145 */
146static void
147bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
148{
149 cee->reset_stats_status = status;
150 cee->reset_stats_pending = false;
151 if (cee->cbfn.reset_stats_cbfn)
152 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
153}
154/**
155 * bfa_nw_cee_meminfo()
156 *
157 * @brief Returns the size of the DMA memory needed by CEE module
158 *
159 * @param[in] void
160 *
161 * @return Size of DMA region
162 */
163u32
164bfa_nw_cee_meminfo(void)
165{
166 return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
167}
168
169/**
170 * bfa_nw_cee_mem_claim()
171 *
172 * @brief Initialized CEE DMA Memory
173 *
174 * @param[in] cee CEE module pointer
175 * dma_kva Kernel Virtual Address of CEE DMA Memory
176 * dma_pa Physical Address of CEE DMA Memory
177 *
178 * @return void
179 */
180void
181bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
182{
183 cee->attr_dma.kva = dma_kva;
184 cee->attr_dma.pa = dma_pa;
185 cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
186 cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
187 cee->attr = (struct bfa_cee_attr *) dma_kva;
188 cee->stats = (struct bfa_cee_stats *)
189 (dma_kva + bfa_cee_attr_meminfo());
190}
191
192/**
193 * bfa_cee_isrs()
194 *
195 * @brief Handles Mail-box interrupts for CEE module.
196 *
197 * @param[in] Pointer to the CEE module data structure.
198 *
199 * @return void
200 */
201
202static void
203bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
204{
205 union bfi_cee_i2h_msg_u *msg;
206 struct bfi_cee_get_rsp *get_rsp;
207 struct bfa_cee *cee = (struct bfa_cee *) cbarg;
208 msg = (union bfi_cee_i2h_msg_u *) m;
209 get_rsp = (struct bfi_cee_get_rsp *) m;
210 switch (msg->mh.msg_id) {
211 case BFI_CEE_I2H_GET_CFG_RSP:
212 bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
213 break;
214 case BFI_CEE_I2H_GET_STATS_RSP:
215 bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
216 break;
217 case BFI_CEE_I2H_RESET_STATS_RSP:
218 bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
219 break;
220 default:
221 BUG_ON(1);
222 }
223}
224
225/**
226 * bfa_cee_hbfail()
227 *
228 * @brief CEE module heart-beat failure handler.
229 *
230 * @param[in] Pointer to the CEE module data structure.
231 *
232 * @return void
233 */
234
235static void
236bfa_cee_hbfail(void *arg)
237{
238 struct bfa_cee *cee;
239 cee = (struct bfa_cee *) arg;
240
241 if (cee->get_attr_pending == true) {
242 cee->get_attr_status = BFA_STATUS_FAILED;
243 cee->get_attr_pending = false;
244 if (cee->cbfn.get_attr_cbfn) {
245 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
246 BFA_STATUS_FAILED);
247 }
248 }
249 if (cee->get_stats_pending == true) {
250 cee->get_stats_status = BFA_STATUS_FAILED;
251 cee->get_stats_pending = false;
252 if (cee->cbfn.get_stats_cbfn) {
253 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
254 BFA_STATUS_FAILED);
255 }
256 }
257 if (cee->reset_stats_pending == true) {
258 cee->reset_stats_status = BFA_STATUS_FAILED;
259 cee->reset_stats_pending = false;
260 if (cee->cbfn.reset_stats_cbfn) {
261 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
262 BFA_STATUS_FAILED);
263 }
264 }
265}
266
267/**
268 * bfa_nw_cee_attach()
269 *
270 * @brief CEE module-attach API
271 *
272 * @param[in] cee - Pointer to the CEE module data structure
273 * ioc - Pointer to the ioc module data structure
274 * dev - Pointer to the device driver module data structure
275 * The device driver specific mbox ISR functions have
276 * this pointer as one of the parameters.
277 *
278 * @return void
279 */
280void
281bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
282 void *dev)
283{
284 BUG_ON(!(cee != NULL));
285 cee->dev = dev;
286 cee->ioc = ioc;
287
288 bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
289 bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
290 bfa_nw_ioc_hbfail_register(cee->ioc, &cee->hbfail);
291}
diff --git a/drivers/net/bna/bfa_cee.h b/drivers/net/bna/bfa_cee.h
new file mode 100644
index 000000000000..20543d15b64f
--- /dev/null
+++ b/drivers/net/bna/bfa_cee.h
@@ -0,0 +1,64 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_CEE_H__
20#define __BFA_CEE_H__
21
22#include "bfa_defs_cna.h"
23#include "bfa_ioc.h"
24
25typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
26typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
27typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
28typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, enum bfa_status status);
29
30struct bfa_cee_cbfn {
31 bfa_cee_get_attr_cbfn_t get_attr_cbfn;
32 void *get_attr_cbarg;
33 bfa_cee_get_stats_cbfn_t get_stats_cbfn;
34 void *get_stats_cbarg;
35 bfa_cee_reset_stats_cbfn_t reset_stats_cbfn;
36 void *reset_stats_cbarg;
37};
38
39struct bfa_cee {
40 void *dev;
41 bool get_attr_pending;
42 bool get_stats_pending;
43 bool reset_stats_pending;
44 enum bfa_status get_attr_status;
45 enum bfa_status get_stats_status;
46 enum bfa_status reset_stats_status;
47 struct bfa_cee_cbfn cbfn;
48 struct bfa_ioc_hbfail_notify hbfail;
49 struct bfa_cee_attr *attr;
50 struct bfa_cee_stats *stats;
51 struct bfa_dma attr_dma;
52 struct bfa_dma stats_dma;
53 struct bfa_ioc *ioc;
54 struct bfa_mbox_cmd get_cfg_mb;
55 struct bfa_mbox_cmd get_stats_mb;
56 struct bfa_mbox_cmd reset_stats_mb;
57};
58
59u32 bfa_nw_cee_meminfo(void);
60void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
61 u64 dma_pa);
62void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
63
64#endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/bna/bfa_defs.h
new file mode 100644
index 000000000000..29c1b8de2c2d
--- /dev/null
+++ b/drivers/net/bna/bfa_defs.h
@@ -0,0 +1,243 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_DEFS_H__
20#define __BFA_DEFS_H__
21
22#include "cna.h"
23#include "bfa_defs_status.h"
24#include "bfa_defs_mfg_comm.h"
25
26#define BFA_STRING_32 32
27#define BFA_VERSION_LEN 64
28
29/**
30 * ---------------------- adapter definitions ------------
31 */
32
33/**
34 * BFA adapter level attributes.
35 */
36enum {
37 BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
38 /*
39 *!< adapter serial num length
40 */
41 BFA_ADAPTER_MODEL_NAME_LEN = 16, /*!< model name length */
42 BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */
43 BFA_ADAPTER_MFG_NAME_LEN = 8, /*!< manufacturer name length */
44 BFA_ADAPTER_SYM_NAME_LEN = 64, /*!< adapter symbolic name length */
45 BFA_ADAPTER_OS_TYPE_LEN = 64, /*!< adapter os type length */
46};
47
48struct bfa_adapter_attr {
49 char manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
50 char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
51 u32 card_type;
52 char model[BFA_ADAPTER_MODEL_NAME_LEN];
53 char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
54 u64 pwwn;
55 char node_symname[FC_SYMNAME_MAX];
56 char hw_ver[BFA_VERSION_LEN];
57 char fw_ver[BFA_VERSION_LEN];
58 char optrom_ver[BFA_VERSION_LEN];
59 char os_type[BFA_ADAPTER_OS_TYPE_LEN];
60 struct bfa_mfg_vpd vpd;
61 struct mac mac;
62
63 u8 nports;
64 u8 max_speed;
65 u8 prototype;
66 char asic_rev;
67
68 u8 pcie_gen;
69 u8 pcie_lanes_orig;
70 u8 pcie_lanes;
71 u8 cna_capable;
72
73 u8 is_mezz;
74 u8 trunk_capable;
75};
76
77/**
78 * ---------------------- IOC definitions ------------
79 */
80
81enum {
82 BFA_IOC_DRIVER_LEN = 16,
83 BFA_IOC_CHIP_REV_LEN = 8,
84};
85
86/**
87 * Driver and firmware versions.
88 */
89struct bfa_ioc_driver_attr {
90 char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
91 char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
92 char fw_ver[BFA_VERSION_LEN]; /*!< firmware version */
93 char bios_ver[BFA_VERSION_LEN]; /*!< bios version */
94 char efi_ver[BFA_VERSION_LEN]; /*!< EFI version */
95 char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
96};
97
98/**
99 * IOC PCI device attributes
100 */
101struct bfa_ioc_pci_attr {
102 u16 vendor_id; /*!< PCI vendor ID */
103 u16 device_id; /*!< PCI device ID */
104 u16 ssid; /*!< subsystem ID */
105 u16 ssvid; /*!< subsystem vendor ID */
106 u32 pcifn; /*!< PCI device function */
107 u32 rsvd; /* padding */
108 char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
109};
110
111/**
112 * IOC states
113 */
114enum bfa_ioc_state {
115 BFA_IOC_RESET = 1, /*!< IOC is in reset state */
116 BFA_IOC_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
117 BFA_IOC_HWINIT = 3, /*!< IOC h/w is being initialized */
118 BFA_IOC_GETATTR = 4, /*!< IOC is being configured */
119 BFA_IOC_OPERATIONAL = 5, /*!< IOC is operational */
120 BFA_IOC_INITFAIL = 6, /*!< IOC hardware failure */
121 BFA_IOC_HBFAIL = 7, /*!< IOC heart-beat failure */
122 BFA_IOC_DISABLING = 8, /*!< IOC is being disabled */
123 BFA_IOC_DISABLED = 9, /*!< IOC is disabled */
124 BFA_IOC_FWMISMATCH = 10, /*!< IOC f/w different from drivers */
125};
126
127/**
128 * IOC firmware stats
129 */
130struct bfa_fw_ioc_stats {
131 u32 enable_reqs;
132 u32 disable_reqs;
133 u32 get_attr_reqs;
134 u32 dbg_sync;
135 u32 dbg_dump;
136 u32 unknown_reqs;
137};
138
139/**
140 * IOC driver stats
141 */
142struct bfa_ioc_drv_stats {
143 u32 ioc_isrs;
144 u32 ioc_enables;
145 u32 ioc_disables;
146 u32 ioc_hbfails;
147 u32 ioc_boots;
148 u32 stats_tmos;
149 u32 hb_count;
150 u32 disable_reqs;
151 u32 enable_reqs;
152 u32 disable_replies;
153 u32 enable_replies;
154};
155
156/**
157 * IOC statistics
158 */
159struct bfa_ioc_stats {
160 struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
161 struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
162};
163
164enum bfa_ioc_type {
165 BFA_IOC_TYPE_FC = 1,
166 BFA_IOC_TYPE_FCoE = 2,
167 BFA_IOC_TYPE_LL = 3,
168};
169
170/**
171 * IOC attributes returned in queries
172 */
173struct bfa_ioc_attr {
174 enum bfa_ioc_type ioc_type;
175 enum bfa_ioc_state state; /*!< IOC state */
176 struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
177 struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
178 struct bfa_ioc_pci_attr pci_attr;
179 u8 port_id; /*!< port number */
180 u8 rsvd[7]; /*!< 64bit align */
181};
182
183/**
184 * ---------------------- mfg definitions ------------
185 */
186
187/**
188 * Checksum size
189 */
190#define BFA_MFG_CHKSUM_SIZE 16
191
192#define BFA_MFG_PARTNUM_SIZE 14
193#define BFA_MFG_SUPPLIER_ID_SIZE 10
194#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20
195#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20
196#define BFA_MFG_SUPPLIER_REVISION_SIZE 4
197
198#pragma pack(1)
199
200/**
201 * @brief BFA adapter manufacturing block definition.
202 *
203 * All numerical fields are in big-endian format.
204 */
205struct bfa_mfg_block {
206 u8 version; /*!< manufacturing block version */
207 u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
208 u16 mfgsize; /*!< mfg block size */
209 u16 u16_chksum; /*!< old u16 checksum */
210 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
211 char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
212 u8 mfg_day; /*!< manufacturing day */
213 u8 mfg_month; /*!< manufacturing month */
214 u16 mfg_year; /*!< manufacturing year */
215 u64 mfg_wwn; /*!< wwn base for this adapter */
216 u8 num_wwn; /*!< number of wwns assigned */
217 u8 mfg_speeds; /*!< speeds allowed for this adapter */
218 u8 rsv[2];
219 char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
220 char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
221 char
222 supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
223 char
224 supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
225 mac_t mfg_mac; /*!< mac address */
226 u8 num_mac; /*!< number of mac addresses */
227 u8 rsv2;
228 u32 mfg_type; /*!< card type */
229 u8 rsv3[108];
230 u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
231};
232
233#pragma pack()
234
235/**
236 * ---------------------- pci definitions ------------
237 */
238
239#define bfa_asic_id_ct(devid) \
240 ((devid) == PCI_DEVICE_ID_BROCADE_CT || \
241 (devid) == PCI_DEVICE_ID_BROCADE_CT_FC)
242
243#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/bna/bfa_defs_cna.h b/drivers/net/bna/bfa_defs_cna.h
new file mode 100644
index 000000000000..7e0a9187bdd5
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_cna.h
@@ -0,0 +1,223 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_DEFS_CNA_H__
20#define __BFA_DEFS_CNA_H__
21
22#include "bfa_defs.h"
23
24/**
25 * @brief
26 * FC physical port statistics.
27 */
28struct bfa_port_fc_stats {
29 u64 secs_reset; /*!< Seconds since stats is reset */
30 u64 tx_frames; /*!< Tx frames */
31 u64 tx_words; /*!< Tx words */
32 u64 tx_lip; /*!< Tx LIP */
33 u64 tx_nos; /*!< Tx NOS */
34 u64 tx_ols; /*!< Tx OLS */
35 u64 tx_lr; /*!< Tx LR */
36 u64 tx_lrr; /*!< Tx LRR */
37 u64 rx_frames; /*!< Rx frames */
38 u64 rx_words; /*!< Rx words */
39 u64 lip_count; /*!< Rx LIP */
40 u64 nos_count; /*!< Rx NOS */
41 u64 ols_count; /*!< Rx OLS */
42 u64 lr_count; /*!< Rx LR */
43 u64 lrr_count; /*!< Rx LRR */
44 u64 invalid_crcs; /*!< Rx CRC err frames */
45 u64 invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */
46 u64 undersized_frm; /*!< Rx undersized frames */
47 u64 oversized_frm; /*!< Rx oversized frames */
48 u64 bad_eof_frm; /*!< Rx frames with bad EOF */
49 u64 error_frames; /*!< Errored frames */
50 u64 dropped_frames; /*!< Dropped frames */
51 u64 link_failures; /*!< Link Failure (LF) count */
52 u64 loss_of_syncs; /*!< Loss of sync count */
53 u64 loss_of_signals; /*!< Loss of signal count */
54 u64 primseq_errs; /*!< Primitive sequence protocol err. */
55 u64 bad_os_count; /*!< Invalid ordered sets */
56 u64 err_enc_out; /*!< Encoding err nonframe_8b10b */
57 u64 err_enc; /*!< Encoding err frame_8b10b */
58};
59
60/**
61 * @brief
62 * Eth Physical Port statistics.
63 */
64struct bfa_port_eth_stats {
65 u64 secs_reset; /*!< Seconds since stats is reset */
66 u64 frame_64; /*!< Frames 64 bytes */
67 u64 frame_65_127; /*!< Frames 65-127 bytes */
68 u64 frame_128_255; /*!< Frames 128-255 bytes */
69 u64 frame_256_511; /*!< Frames 256-511 bytes */
70 u64 frame_512_1023; /*!< Frames 512-1023 bytes */
71 u64 frame_1024_1518; /*!< Frames 1024-1518 bytes */
72 u64 frame_1519_1522; /*!< Frames 1519-1522 bytes */
73 u64 tx_bytes; /*!< Tx bytes */
74 u64 tx_packets; /*!< Tx packets */
75 u64 tx_mcast_packets; /*!< Tx multicast packets */
76 u64 tx_bcast_packets; /*!< Tx broadcast packets */
77 u64 tx_control_frame; /*!< Tx control frame */
78 u64 tx_drop; /*!< Tx drops */
79 u64 tx_jabber; /*!< Tx jabber */
80 u64 tx_fcs_error; /*!< Tx FCS errors */
81 u64 tx_fragments; /*!< Tx fragments */
82 u64 rx_bytes; /*!< Rx bytes */
83 u64 rx_packets; /*!< Rx packets */
84 u64 rx_mcast_packets; /*!< Rx multicast packets */
85 u64 rx_bcast_packets; /*!< Rx broadcast packets */
86 u64 rx_control_frames; /*!< Rx control frames */
87 u64 rx_unknown_opcode; /*!< Rx unknown opcode */
88 u64 rx_drop; /*!< Rx drops */
89 u64 rx_jabber; /*!< Rx jabber */
90 u64 rx_fcs_error; /*!< Rx FCS errors */
91 u64 rx_alignment_error; /*!< Rx alignment errors */
92 u64 rx_frame_length_error; /*!< Rx frame len errors */
93 u64 rx_code_error; /*!< Rx code errors */
94 u64 rx_fragments; /*!< Rx fragments */
95 u64 rx_pause; /*!< Rx pause */
96 u64 rx_zero_pause; /*!< Rx zero pause */
97 u64 tx_pause; /*!< Tx pause */
98 u64 tx_zero_pause; /*!< Tx zero pause */
99 u64 rx_fcoe_pause; /*!< Rx FCoE pause */
100 u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */
101 u64 tx_fcoe_pause; /*!< Tx FCoE pause */
102 u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */
103};
104
105/**
106 * @brief
107 * Port statistics.
108 */
109union bfa_port_stats_u {
110 struct bfa_port_fc_stats fc;
111 struct bfa_port_eth_stats eth;
112};
113
114#pragma pack(1)
115
116#define BFA_CEE_LLDP_MAX_STRING_LEN (128)
117#define BFA_CEE_DCBX_MAX_PRIORITY (8)
118#define BFA_CEE_DCBX_MAX_PGID (8)
119
120#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001
121#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002
122#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004
123#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008
124#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010
125#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020
126#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040
127#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080
128#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100
129#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200
130#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400
131
132/* LLDP string type */
133struct bfa_cee_lldp_str {
134 u8 sub_type;
135 u8 len;
136 u8 rsvd[2];
137 u8 value[BFA_CEE_LLDP_MAX_STRING_LEN];
138};
139
140/* LLDP paramters */
141struct bfa_cee_lldp_cfg {
142 struct bfa_cee_lldp_str chassis_id;
143 struct bfa_cee_lldp_str port_id;
144 struct bfa_cee_lldp_str port_desc;
145 struct bfa_cee_lldp_str sys_name;
146 struct bfa_cee_lldp_str sys_desc;
147 struct bfa_cee_lldp_str mgmt_addr;
148 u16 time_to_live;
149 u16 enabled_system_cap;
150};
151
152enum bfa_cee_dcbx_version {
153 DCBX_PROTOCOL_PRECEE = 1,
154 DCBX_PROTOCOL_CEE = 2,
155};
156
157enum bfa_cee_lls {
158 /* LLS is down because the TLV not sent by the peer */
159 CEE_LLS_DOWN_NO_TLV = 0,
160 /* LLS is down as advertised by the peer */
161 CEE_LLS_DOWN = 1,
162 CEE_LLS_UP = 2,
163};
164
165/* CEE/DCBX parameters */
166struct bfa_cee_dcbx_cfg {
167 u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY];
168 u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID];
169 u8 pfc_primap; /* bitmap of priorties with PFC enabled */
170 u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */
171 u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */
172 u8 dcbx_version; /* operating version:CEE or preCEE */
173 u8 lls_fcoe; /* FCoE Logical Link Status */
174 u8 lls_lan; /* LAN Logical Link Status */
175 u8 rsvd[2];
176};
177
178/* CEE status */
179/* Making this to tri-state for the benefit of port list command */
180enum bfa_cee_status {
181 CEE_UP = 0,
182 CEE_PHY_UP = 1,
183 CEE_LOOPBACK = 2,
184 CEE_PHY_DOWN = 3,
185};
186
187/* CEE Query */
188struct bfa_cee_attr {
189 u8 cee_status;
190 u8 error_reason;
191 struct bfa_cee_lldp_cfg lldp_remote;
192 struct bfa_cee_dcbx_cfg dcbx_remote;
193 mac_t src_mac;
194 u8 link_speed;
195 u8 nw_priority;
196 u8 filler[2];
197};
198
199/* LLDP/DCBX/CEE Statistics */
200struct bfa_cee_stats {
201 u32 lldp_tx_frames; /*!< LLDP Tx Frames */
202 u32 lldp_rx_frames; /*!< LLDP Rx Frames */
203 u32 lldp_rx_frames_invalid; /*!< LLDP Rx Frames invalid */
204 u32 lldp_rx_frames_new; /*!< LLDP Rx Frames new */
205 u32 lldp_tlvs_unrecognized; /*!< LLDP Rx unrecognized TLVs */
206 u32 lldp_rx_shutdown_tlvs; /*!< LLDP Rx shutdown TLVs */
207 u32 lldp_info_aged_out; /*!< LLDP remote info aged out */
208 u32 dcbx_phylink_ups; /*!< DCBX phy link ups */
209 u32 dcbx_phylink_downs; /*!< DCBX phy link downs */
210 u32 dcbx_rx_tlvs; /*!< DCBX Rx TLVs */
211 u32 dcbx_rx_tlvs_invalid; /*!< DCBX Rx TLVs invalid */
212 u32 dcbx_control_tlv_error; /*!< DCBX control TLV errors */
213 u32 dcbx_feature_tlv_error; /*!< DCBX feature TLV errors */
214 u32 dcbx_cee_cfg_new; /*!< DCBX new CEE cfg rcvd */
215 u32 cee_status_down; /*!< CEE status down */
216 u32 cee_status_up; /*!< CEE status up */
217 u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */
218 u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */
219};
220
221#pragma pack()
222
223#endif /* __BFA_DEFS_CNA_H__ */
diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h
new file mode 100644
index 000000000000..987978fcb3fe
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_mfg_comm.h
@@ -0,0 +1,244 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFA_DEFS_MFG_COMM_H__
19#define __BFA_DEFS_MFG_COMM_H__
20
21#include "cna.h"
22
23/**
24 * Manufacturing block version
25 */
26#define BFA_MFG_VERSION 2
27#define BFA_MFG_VERSION_UNINIT 0xFF
28
29/**
30 * Manufacturing block encrypted version
31 */
32#define BFA_MFG_ENC_VER 2
33
34/**
35 * Manufacturing block version 1 length
36 */
37#define BFA_MFG_VER1_LEN 128
38
39/**
40 * Manufacturing block header length
41 */
42#define BFA_MFG_HDR_LEN 4
43
44#define BFA_MFG_SERIALNUM_SIZE 11
45#define STRSZ(_n) (((_n) + 4) & ~3)
46
47/**
48 * Manufacturing card type
49 */
50enum {
51 BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
52 BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
53 BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */
54 BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */
55 BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */
56 BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */
57 BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */
58 BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */
59 BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */
60 BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */
61 BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */
62 BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */
63 BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */
64};
65
66#pragma pack(1)
67
68/**
69 * Check if 1-port card
70 */
71#define bfa_mfg_is_1port(type) (( \
72 (type) == BFA_MFG_TYPE_FC8P1 || \
73 (type) == BFA_MFG_TYPE_FC4P1 || \
74 (type) == BFA_MFG_TYPE_CNA10P1))
75
76/**
77 * Check if Mezz card
78 */
79#define bfa_mfg_is_mezz(type) (( \
80 (type) == BFA_MFG_TYPE_JAYHAWK || \
81 (type) == BFA_MFG_TYPE_WANCHESE || \
82 (type) == BFA_MFG_TYPE_ASTRA || \
83 (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
84 (type) == BFA_MFG_TYPE_LIGHTNING))
85
86/**
87 * Check if card type valid
88 */
89#define bfa_mfg_is_card_type_valid(type) (( \
90 (type) == BFA_MFG_TYPE_FC8P2 || \
91 (type) == BFA_MFG_TYPE_FC8P1 || \
92 (type) == BFA_MFG_TYPE_FC4P2 || \
93 (type) == BFA_MFG_TYPE_FC4P1 || \
94 (type) == BFA_MFG_TYPE_CNA10P2 || \
95 (type) == BFA_MFG_TYPE_CNA10P1 || \
96 bfa_mfg_is_mezz(type)))
97
98/**
99 * Check if the card having old wwn/mac handling
100 */
101#define bfa_mfg_is_old_wwn_mac_model(type) (( \
102 (type) == BFA_MFG_TYPE_FC8P2 || \
103 (type) == BFA_MFG_TYPE_FC8P1 || \
104 (type) == BFA_MFG_TYPE_FC4P2 || \
105 (type) == BFA_MFG_TYPE_FC4P1 || \
106 (type) == BFA_MFG_TYPE_CNA10P2 || \
107 (type) == BFA_MFG_TYPE_CNA10P1 || \
108 (type) == BFA_MFG_TYPE_JAYHAWK || \
109 (type) == BFA_MFG_TYPE_WANCHESE))
110
111#define bfa_mfg_increment_wwn_mac(m, i) \
112do { \
113 u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2]; \
114 t += (i); \
115 (m)[0] = (t >> 16) & 0xFF; \
116 (m)[1] = (t >> 8) & 0xFF; \
117 (m)[2] = t & 0xFF; \
118} while (0)
119
120#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \
121do { \
122 switch ((card_type)) { \
123 case BFA_MFG_TYPE_FC8P2: \
124 case BFA_MFG_TYPE_JAYHAWK: \
125 case BFA_MFG_TYPE_ASTRA: \
126 (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
127 BFI_ADAPTER_SETP(SPEED, 8); \
128 break; \
129 case BFA_MFG_TYPE_FC8P1: \
130 (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
131 BFI_ADAPTER_SETP(SPEED, 8); \
132 break; \
133 case BFA_MFG_TYPE_FC4P2: \
134 (prop) = BFI_ADAPTER_SETP(NPORTS, 2) | \
135 BFI_ADAPTER_SETP(SPEED, 4); \
136 break; \
137 case BFA_MFG_TYPE_FC4P1: \
138 (prop) = BFI_ADAPTER_SETP(NPORTS, 1) | \
139 BFI_ADAPTER_SETP(SPEED, 4); \
140 break; \
141 case BFA_MFG_TYPE_CNA10P2: \
142 case BFA_MFG_TYPE_WANCHESE: \
143 case BFA_MFG_TYPE_LIGHTNING_P0: \
144 case BFA_MFG_TYPE_LIGHTNING: \
145 (prop) = BFI_ADAPTER_SETP(NPORTS, 2); \
146 (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
147 break; \
148 case BFA_MFG_TYPE_CNA10P1: \
149 (prop) = BFI_ADAPTER_SETP(NPORTS, 1); \
150 (prop) |= BFI_ADAPTER_SETP(SPEED, 10); \
151 break; \
152 default: \
153 (prop) = BFI_ADAPTER_UNSUPP; \
154 } \
155} while (0)
156
157enum {
158 CB_GPIO_TTV = (1), /*!< TTV debug capable cards */
159 CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */
160 CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */
161 CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */
162 CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */
163 CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */
164 CB_GPIO_PROTO = (1 << 7) /*!< 8G 2port FC prototypes */
165};
166
167#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \
168do { \
169 if ((gpio) & CB_GPIO_PROTO) { \
170 (prop) |= BFI_ADAPTER_PROTO; \
171 (gpio) &= ~CB_GPIO_PROTO; \
172 } \
173 switch ((gpio)) { \
174 case CB_GPIO_TTV: \
175 (prop) |= BFI_ADAPTER_TTV; \
176 case CB_GPIO_DFLY: \
177 case CB_GPIO_FC8P2: \
178 (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
179 (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
180 (card_type) = BFA_MFG_TYPE_FC8P2; \
181 break; \
182 case CB_GPIO_FC8P1: \
183 (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
184 (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \
185 (card_type) = BFA_MFG_TYPE_FC8P1; \
186 break; \
187 case CB_GPIO_FC4P2: \
188 (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \
189 (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
190 (card_type) = BFA_MFG_TYPE_FC4P2; \
191 break; \
192 case CB_GPIO_FC4P1: \
193 (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \
194 (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \
195 (card_type) = BFA_MFG_TYPE_FC4P1; \
196 break; \
197 default: \
198 (prop) |= BFI_ADAPTER_UNSUPP; \
199 (card_type) = BFA_MFG_TYPE_INVALID; \
200 } \
201} while (0)
202
203/**
204 * VPD data length
205 */
206#define BFA_MFG_VPD_LEN 512
207#define BFA_MFG_VPD_LEN_INVALID 0
208
209#define BFA_MFG_VPD_PCI_HDR_OFF 137
210#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
211#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
212
213/**
214 * VPD vendor tag
215 */
216enum {
217 BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
218 BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
219 BFA_MFG_VPD_HP = 2, /*!< vendor HP */
220 BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
221 BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
222 BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
223 BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
224 BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
225};
226
227/**
228 * @brief BFA adapter flash vpd data definition.
229 *
230 * All numerical fields are in big-endian format.
231 */
232struct bfa_mfg_vpd {
233 u8 version; /*!< vpd data version */
234 u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
235 u8 chksum; /*!< u8 checksum */
236 u8 vendor; /*!< vendor */
237 u8 len; /*!< vpd data length excluding header */
238 u8 rsv;
239 u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
240};
241
242#pragma pack()
243
244#endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/net/bna/bfa_defs_status.h b/drivers/net/bna/bfa_defs_status.h
new file mode 100644
index 000000000000..af951126375c
--- /dev/null
+++ b/drivers/net/bna/bfa_defs_status.h
@@ -0,0 +1,216 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFA_DEFS_STATUS_H__
19#define __BFA_DEFS_STATUS_H__
20
21/**
22 * API status return values
23 *
24 * NOTE: The error msgs are auto generated from the comments. Only singe line
25 * comments are supported
26 */
27enum bfa_status {
28 BFA_STATUS_OK = 0,
29 BFA_STATUS_FAILED = 1,
30 BFA_STATUS_EINVAL = 2,
31 BFA_STATUS_ENOMEM = 3,
32 BFA_STATUS_ENOSYS = 4,
33 BFA_STATUS_ETIMER = 5,
34 BFA_STATUS_EPROTOCOL = 6,
35 BFA_STATUS_ENOFCPORTS = 7,
36 BFA_STATUS_NOFLASH = 8,
37 BFA_STATUS_BADFLASH = 9,
38 BFA_STATUS_SFP_UNSUPP = 10,
39 BFA_STATUS_UNKNOWN_VFID = 11,
40 BFA_STATUS_DATACORRUPTED = 12,
41 BFA_STATUS_DEVBUSY = 13,
42 BFA_STATUS_ABORTED = 14,
43 BFA_STATUS_NODEV = 15,
44 BFA_STATUS_HDMA_FAILED = 16,
45 BFA_STATUS_FLASH_BAD_LEN = 17,
46 BFA_STATUS_UNKNOWN_LWWN = 18,
47 BFA_STATUS_UNKNOWN_RWWN = 19,
48 BFA_STATUS_FCPT_LS_RJT = 20,
49 BFA_STATUS_VPORT_EXISTS = 21,
50 BFA_STATUS_VPORT_MAX = 22,
51 BFA_STATUS_UNSUPP_SPEED = 23,
52 BFA_STATUS_INVLD_DFSZ = 24,
53 BFA_STATUS_CNFG_FAILED = 25,
54 BFA_STATUS_CMD_NOTSUPP = 26,
55 BFA_STATUS_NO_ADAPTER = 27,
56 BFA_STATUS_LINKDOWN = 28,
57 BFA_STATUS_FABRIC_RJT = 29,
58 BFA_STATUS_UNKNOWN_VWWN = 30,
59 BFA_STATUS_NSLOGIN_FAILED = 31,
60 BFA_STATUS_NO_RPORTS = 32,
61 BFA_STATUS_NSQUERY_FAILED = 33,
62 BFA_STATUS_PORT_OFFLINE = 34,
63 BFA_STATUS_RPORT_OFFLINE = 35,
64 BFA_STATUS_TGTOPEN_FAILED = 36,
65 BFA_STATUS_BAD_LUNS = 37,
66 BFA_STATUS_IO_FAILURE = 38,
67 BFA_STATUS_NO_FABRIC = 39,
68 BFA_STATUS_EBADF = 40,
69 BFA_STATUS_EINTR = 41,
70 BFA_STATUS_EIO = 42,
71 BFA_STATUS_ENOTTY = 43,
72 BFA_STATUS_ENXIO = 44,
73 BFA_STATUS_EFOPEN = 45,
74 BFA_STATUS_VPORT_WWN_BP = 46,
75 BFA_STATUS_PORT_NOT_DISABLED = 47,
76 BFA_STATUS_BADFRMHDR = 48,
77 BFA_STATUS_BADFRMSZ = 49,
78 BFA_STATUS_MISSINGFRM = 50,
79 BFA_STATUS_LINKTIMEOUT = 51,
80 BFA_STATUS_NO_FCPIM_NEXUS = 52,
81 BFA_STATUS_CHECKSUM_FAIL = 53,
82 BFA_STATUS_GZME_FAILED = 54,
83 BFA_STATUS_SCSISTART_REQD = 55,
84 BFA_STATUS_IOC_FAILURE = 56,
85 BFA_STATUS_INVALID_WWN = 57,
86 BFA_STATUS_MISMATCH = 58,
87 BFA_STATUS_IOC_ENABLED = 59,
88 BFA_STATUS_ADAPTER_ENABLED = 60,
89 BFA_STATUS_IOC_NON_OP = 61,
90 BFA_STATUS_ADDR_MAP_FAILURE = 62,
91 BFA_STATUS_SAME_NAME = 63,
92 BFA_STATUS_PENDING = 64,
93 BFA_STATUS_8G_SPD = 65,
94 BFA_STATUS_4G_SPD = 66,
95 BFA_STATUS_AD_IS_ENABLE = 67,
96 BFA_STATUS_EINVAL_TOV = 68,
97 BFA_STATUS_EINVAL_QDEPTH = 69,
98 BFA_STATUS_VERSION_FAIL = 70,
99 BFA_STATUS_DIAG_BUSY = 71,
100 BFA_STATUS_BEACON_ON = 72,
101 BFA_STATUS_BEACON_OFF = 73,
102 BFA_STATUS_LBEACON_ON = 74,
103 BFA_STATUS_LBEACON_OFF = 75,
104 BFA_STATUS_PORT_NOT_INITED = 76,
105 BFA_STATUS_RPSC_ENABLED = 77,
106 BFA_STATUS_ENOFSAVE = 78,
107 BFA_STATUS_BAD_FILE = 79,
108 BFA_STATUS_RLIM_EN = 80,
109 BFA_STATUS_RLIM_DIS = 81,
110 BFA_STATUS_IOC_DISABLED = 82,
111 BFA_STATUS_ADAPTER_DISABLED = 83,
112 BFA_STATUS_BIOS_DISABLED = 84,
113 BFA_STATUS_AUTH_ENABLED = 85,
114 BFA_STATUS_AUTH_DISABLED = 86,
115 BFA_STATUS_ERROR_TRL_ENABLED = 87,
116 BFA_STATUS_ERROR_QOS_ENABLED = 88,
117 BFA_STATUS_NO_SFP_DEV = 89,
118 BFA_STATUS_MEMTEST_FAILED = 90,
119 BFA_STATUS_INVALID_DEVID = 91,
120 BFA_STATUS_QOS_ENABLED = 92,
121 BFA_STATUS_QOS_DISABLED = 93,
122 BFA_STATUS_INCORRECT_DRV_CONFIG = 94,
123 BFA_STATUS_REG_FAIL = 95,
124 BFA_STATUS_IM_INV_CODE = 96,
125 BFA_STATUS_IM_INV_VLAN = 97,
126 BFA_STATUS_IM_INV_ADAPT_NAME = 98,
127 BFA_STATUS_IM_LOW_RESOURCES = 99,
128 BFA_STATUS_IM_VLANID_IS_PVID = 100,
129 BFA_STATUS_IM_VLANID_EXISTS = 101,
130 BFA_STATUS_IM_FW_UPDATE_FAIL = 102,
131 BFA_STATUS_PORTLOG_ENABLED = 103,
132 BFA_STATUS_PORTLOG_DISABLED = 104,
133 BFA_STATUS_FILE_NOT_FOUND = 105,
134 BFA_STATUS_QOS_FC_ONLY = 106,
135 BFA_STATUS_RLIM_FC_ONLY = 107,
136 BFA_STATUS_CT_SPD = 108,
137 BFA_STATUS_LEDTEST_OP = 109,
138 BFA_STATUS_CEE_NOT_DN = 110,
139 BFA_STATUS_10G_SPD = 111,
140 BFA_STATUS_IM_INV_TEAM_NAME = 112,
141 BFA_STATUS_IM_DUP_TEAM_NAME = 113,
142 BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114,
143 BFA_STATUS_IM_ADAPT_HAS_VLANS = 115,
144 BFA_STATUS_IM_PVID_MISMATCH = 116,
145 BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117,
146 BFA_STATUS_IM_MTU_MISMATCH = 118,
147 BFA_STATUS_IM_RSS_MISMATCH = 119,
148 BFA_STATUS_IM_HDS_MISMATCH = 120,
149 BFA_STATUS_IM_OFFLOAD_MISMATCH = 121,
150 BFA_STATUS_IM_PORT_PARAMS = 122,
151 BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123,
152 BFA_STATUS_IM_CANNOT_REM_PRI = 124,
153 BFA_STATUS_IM_MAX_PORTS_REACHED = 125,
154 BFA_STATUS_IM_LAST_PORT_DELETE = 126,
155 BFA_STATUS_IM_NO_DRIVER = 127,
156 BFA_STATUS_IM_MAX_VLANS_REACHED = 128,
157 BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129,
158 BFA_STATUS_NO_MINPORT_DRIVER = 130,
159 BFA_STATUS_CARD_TYPE_MISMATCH = 131,
160 BFA_STATUS_BAD_ASICBLK = 132,
161 BFA_STATUS_NO_DRIVER = 133,
162 BFA_STATUS_INVALID_MAC = 134,
163 BFA_STATUS_IM_NO_VLAN = 135,
164 BFA_STATUS_IM_ETH_LB_FAILED = 136,
165 BFA_STATUS_IM_PVID_REMOVE = 137,
166 BFA_STATUS_IM_PVID_EDIT = 138,
167 BFA_STATUS_CNA_NO_BOOT = 139,
168 BFA_STATUS_IM_PVID_NON_ZERO = 140,
169 BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141,
170 BFA_STATUS_IM_GET_INETCFG_FAILED = 142,
171 BFA_STATUS_IM_NOT_BOUND = 143,
172 BFA_STATUS_INSUFFICIENT_PERMS = 144,
173 BFA_STATUS_IM_INV_VLAN_NAME = 145,
174 BFA_STATUS_CMD_NOTSUPP_CNA = 146,
175 BFA_STATUS_IM_PASSTHRU_EDIT = 147,
176 BFA_STATUS_IM_BIND_FAILED = 148,
177 BFA_STATUS_IM_UNBIND_FAILED = 149,
178 BFA_STATUS_IM_PORT_IN_TEAM = 150,
179 BFA_STATUS_IM_VLAN_NOT_FOUND = 151,
180 BFA_STATUS_IM_TEAM_NOT_FOUND = 152,
181 BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153,
182 BFA_STATUS_PBC = 154,
183 BFA_STATUS_DEVID_MISSING = 155,
184 BFA_STATUS_BAD_FWCFG = 156,
185 BFA_STATUS_CREATE_FILE = 157,
186 BFA_STATUS_INVALID_VENDOR = 158,
187 BFA_STATUS_SFP_NOT_READY = 159,
188 BFA_STATUS_FLASH_UNINIT = 160,
189 BFA_STATUS_FLASH_EMPTY = 161,
190 BFA_STATUS_FLASH_CKFAIL = 162,
191 BFA_STATUS_TRUNK_UNSUPP = 163,
192 BFA_STATUS_TRUNK_ENABLED = 164,
193 BFA_STATUS_TRUNK_DISABLED = 165,
194 BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166,
195 BFA_STATUS_BOOT_CODE_UPDATED = 167,
196 BFA_STATUS_BOOT_VERSION = 168,
197 BFA_STATUS_CARDTYPE_MISSING = 169,
198 BFA_STATUS_INVALID_CARDTYPE = 170,
199 BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171,
200 BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172,
201 BFA_STATUS_ETHBOOT_ENABLED = 173,
202 BFA_STATUS_ETHBOOT_DISABLED = 174,
203 BFA_STATUS_IOPROFILE_OFF = 175,
204 BFA_STATUS_NO_PORT_INSTANCE = 176,
205 BFA_STATUS_BOOT_CODE_TIMEDOUT = 177,
206 BFA_STATUS_NO_VPORT_LOCK = 178,
207 BFA_STATUS_VPORT_NO_CNFG = 179,
208 BFA_STATUS_MAX_VAL
209};
210
211enum bfa_eproto_status {
212 BFA_EPROTO_BAD_ACCEPT = 0,
213 BFA_EPROTO_UNKNOWN_RSP = 1
214};
215
216#endif /* __BFA_DEFS_STATUS_H__ */
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
new file mode 100644
index 000000000000..73493de98de5
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc.c
@@ -0,0 +1,1738 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_ioc.h"
20#include "cna.h"
21#include "bfi.h"
22#include "bfi_ctreg.h"
23#include "bfa_defs.h"
24
25/**
26 * IOC local definitions
27 */
28
29#define bfa_ioc_timer_start(__ioc) \
30 mod_timer(&(__ioc)->ioc_timer, jiffies + \
31 msecs_to_jiffies(BFA_IOC_TOV))
32#define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
33
34#define bfa_ioc_recovery_timer_start(__ioc) \
35 mod_timer(&(__ioc)->ioc_timer, jiffies + \
36 msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
37
38#define bfa_sem_timer_start(__ioc) \
39 mod_timer(&(__ioc)->sem_timer, jiffies + \
40 msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
41#define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
42
43#define bfa_hb_timer_start(__ioc) \
44 mod_timer(&(__ioc)->hb_timer, jiffies + \
45 msecs_to_jiffies(BFA_IOC_HB_TOV))
46#define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
47
48/**
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50 */
51
52#define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54#define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58#define bfa_ioc_notify_hbfail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
60
61#define bfa_ioc_is_optrom(__ioc) \
62 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
63
64#define bfa_ioc_mbox_cmd_pending(__ioc) \
65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
66 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
67
68bool bfa_nw_auto_recover = true;
69
70/*
71 * forward declarations
72 */
73static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
74static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
75static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
76static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
77static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
78static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
79static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
80static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
81static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
82static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
83static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
84static void bfa_ioc_recover(struct bfa_ioc *ioc);
85static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
86static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
87static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
88static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
89 u32 boot_param);
90static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
91static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
92static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
93 char *serial_num);
94static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
95 char *fw_ver);
96static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
97 char *chip_rev);
98static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
99 char *optrom_ver);
100static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
101 char *manufacturer);
102static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
103static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
104static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
105
106/**
107 * IOC state machine events
108 */
109enum ioc_event {
110 IOC_E_ENABLE = 1, /*!< IOC enable request */
111 IOC_E_DISABLE = 2, /*!< IOC disable request */
112 IOC_E_TIMEOUT = 3, /*!< f/w response timeout */
113 IOC_E_FWREADY = 4, /*!< f/w initialization done */
114 IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */
115 IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */
116 IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */
117 IOC_E_HBFAIL = 8, /*!< heartbeat failure */
118 IOC_E_HWERROR = 9, /*!< hardware error interrupt */
119 IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
120 IOC_E_DETACH = 11, /*!< driver detach cleanup */
121};
122
123bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
124bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
125bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
126bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
127bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
128bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
129bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
130bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
131bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
132bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
133bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
134bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
135
136static struct bfa_sm_table ioc_sm_table[] = {
137 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
138 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
139 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
140 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
141 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
142 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
143 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
144 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
145 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
146 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
147 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
148 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
149};
150
151/**
152 * Reset entry actions -- initialize state machine
153 */
154static void
155bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
156{
157 ioc->retry_count = 0;
158 ioc->auto_recover = bfa_nw_auto_recover;
159}
160
161/**
162 * Beginning state. IOC is in reset state.
163 */
164static void
165bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
166{
167 switch (event) {
168 case IOC_E_ENABLE:
169 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
170 break;
171
172 case IOC_E_DISABLE:
173 bfa_ioc_disable_comp(ioc);
174 break;
175
176 case IOC_E_DETACH:
177 break;
178
179 default:
180 bfa_sm_fault(ioc, event);
181 }
182}
183
184/**
185 * Semaphore should be acquired for version check.
186 */
187static void
188bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
189{
190 bfa_ioc_hw_sem_get(ioc);
191}
192
193/**
194 * Awaiting h/w semaphore to continue with version check.
195 */
196static void
197bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
198{
199 switch (event) {
200 case IOC_E_SEMLOCKED:
201 if (bfa_ioc_firmware_lock(ioc)) {
202 ioc->retry_count = 0;
203 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
204 } else {
205 bfa_nw_ioc_hw_sem_release(ioc);
206 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
207 }
208 break;
209
210 case IOC_E_DISABLE:
211 bfa_ioc_disable_comp(ioc);
212 /* fall through */
213
214 case IOC_E_DETACH:
215 bfa_ioc_hw_sem_get_cancel(ioc);
216 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
217 break;
218
219 case IOC_E_FWREADY:
220 break;
221
222 default:
223 bfa_sm_fault(ioc, event);
224 }
225}
226
227/**
228 * Notify enable completion callback and generate mismatch AEN.
229 */
230static void
231bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
232{
233 /**
234 * Provide enable completion callback and AEN notification only once.
235 */
236 if (ioc->retry_count == 0)
237 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
238 ioc->retry_count++;
239 bfa_ioc_timer_start(ioc);
240}
241
242/**
243 * Awaiting firmware version match.
244 */
245static void
246bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
247{
248 switch (event) {
249 case IOC_E_TIMEOUT:
250 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
251 break;
252
253 case IOC_E_DISABLE:
254 bfa_ioc_disable_comp(ioc);
255 /* fall through */
256
257 case IOC_E_DETACH:
258 bfa_ioc_timer_stop(ioc);
259 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
260 break;
261
262 case IOC_E_FWREADY:
263 break;
264
265 default:
266 bfa_sm_fault(ioc, event);
267 }
268}
269
270/**
271 * Request for semaphore.
272 */
273static void
274bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
275{
276 bfa_ioc_hw_sem_get(ioc);
277}
278
279/**
280 * Awaiting semaphore for h/w initialzation.
281 */
282static void
283bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
284{
285 switch (event) {
286 case IOC_E_SEMLOCKED:
287 ioc->retry_count = 0;
288 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
289 break;
290
291 case IOC_E_DISABLE:
292 bfa_ioc_hw_sem_get_cancel(ioc);
293 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
294 break;
295
296 default:
297 bfa_sm_fault(ioc, event);
298 }
299}
300
301static void
302bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
303{
304 bfa_ioc_timer_start(ioc);
305 bfa_ioc_reset(ioc, false);
306}
307
308/**
309 * @brief
310 * Hardware is being initialized. Interrupts are enabled.
311 * Holding hardware semaphore lock.
312 */
313static void
314bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
315{
316 switch (event) {
317 case IOC_E_FWREADY:
318 bfa_ioc_timer_stop(ioc);
319 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
320 break;
321
322 case IOC_E_HWERROR:
323 bfa_ioc_timer_stop(ioc);
324 /* fall through */
325
326 case IOC_E_TIMEOUT:
327 ioc->retry_count++;
328 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
329 bfa_ioc_timer_start(ioc);
330 bfa_ioc_reset(ioc, true);
331 break;
332 }
333
334 bfa_nw_ioc_hw_sem_release(ioc);
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
336 break;
337
338 case IOC_E_DISABLE:
339 bfa_nw_ioc_hw_sem_release(ioc);
340 bfa_ioc_timer_stop(ioc);
341 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
342 break;
343
344 default:
345 bfa_sm_fault(ioc, event);
346 }
347}
348
349static void
350bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
351{
352 bfa_ioc_timer_start(ioc);
353 bfa_ioc_send_enable(ioc);
354}
355
356/**
357 * Host IOC function is being enabled, awaiting response from firmware.
358 * Semaphore is acquired.
359 */
360static void
361bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
362{
363 switch (event) {
364 case IOC_E_FWRSP_ENABLE:
365 bfa_ioc_timer_stop(ioc);
366 bfa_nw_ioc_hw_sem_release(ioc);
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
368 break;
369
370 case IOC_E_HWERROR:
371 bfa_ioc_timer_stop(ioc);
372 /* fall through */
373
374 case IOC_E_TIMEOUT:
375 ioc->retry_count++;
376 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
377 writel(BFI_IOC_UNINIT,
378 ioc->ioc_regs.ioc_fwstate);
379 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
380 break;
381 }
382
383 bfa_nw_ioc_hw_sem_release(ioc);
384 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
385 break;
386
387 case IOC_E_DISABLE:
388 bfa_ioc_timer_stop(ioc);
389 bfa_nw_ioc_hw_sem_release(ioc);
390 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
391 break;
392
393 case IOC_E_FWREADY:
394 bfa_ioc_send_enable(ioc);
395 break;
396
397 default:
398 bfa_sm_fault(ioc, event);
399 }
400}
401
402static void
403bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
404{
405 bfa_ioc_timer_start(ioc);
406 bfa_ioc_send_getattr(ioc);
407}
408
409/**
410 * @brief
411 * IOC configuration in progress. Timer is active.
412 */
413static void
414bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
415{
416 switch (event) {
417 case IOC_E_FWRSP_GETATTR:
418 bfa_ioc_timer_stop(ioc);
419 bfa_ioc_check_attr_wwns(ioc);
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
421 break;
422
423 case IOC_E_HWERROR:
424 bfa_ioc_timer_stop(ioc);
425 /* fall through */
426
427 case IOC_E_TIMEOUT:
428 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
429 break;
430
431 case IOC_E_DISABLE:
432 bfa_ioc_timer_stop(ioc);
433 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
434 break;
435
436 default:
437 bfa_sm_fault(ioc, event);
438 }
439}
440
441static void
442bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
443{
444 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
445 bfa_ioc_hb_monitor(ioc);
446}
447
448static void
449bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
450{
451 switch (event) {
452 case IOC_E_ENABLE:
453 break;
454
455 case IOC_E_DISABLE:
456 bfa_ioc_hb_stop(ioc);
457 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
458 break;
459
460 case IOC_E_HWERROR:
461 case IOC_E_FWREADY:
462 /**
463 * Hard error or IOC recovery by other function.
464 * Treat it same as heartbeat failure.
465 */
466 bfa_ioc_hb_stop(ioc);
467 /* !!! fall through !!! */
468
469 case IOC_E_HBFAIL:
470 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
471 break;
472
473 default:
474 bfa_sm_fault(ioc, event);
475 }
476}
477
478static void
479bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
480{
481 bfa_ioc_timer_start(ioc);
482 bfa_ioc_send_disable(ioc);
483}
484
485/**
486 * IOC is being disabled
487 */
488static void
489bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
490{
491 switch (event) {
492 case IOC_E_FWRSP_DISABLE:
493 bfa_ioc_timer_stop(ioc);
494 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
495 break;
496
497 case IOC_E_HWERROR:
498 bfa_ioc_timer_stop(ioc);
499 /*
500 * !!! fall through !!!
501 */
502
503 case IOC_E_TIMEOUT:
504 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
505 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
506 break;
507
508 default:
509 bfa_sm_fault(ioc, event);
510 }
511}
512
513/**
514 * IOC disable completion entry.
515 */
516static void
517bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
518{
519 bfa_ioc_disable_comp(ioc);
520}
521
522static void
523bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
524{
525 switch (event) {
526 case IOC_E_ENABLE:
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
528 break;
529
530 case IOC_E_DISABLE:
531 ioc->cbfn->disable_cbfn(ioc->bfa);
532 break;
533
534 case IOC_E_FWREADY:
535 break;
536
537 case IOC_E_DETACH:
538 bfa_ioc_firmware_unlock(ioc);
539 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
540 break;
541
542 default:
543 bfa_sm_fault(ioc, event);
544 }
545}
546
547static void
548bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
549{
550 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
551 bfa_ioc_timer_start(ioc);
552}
553
554/**
555 * @brief
556 * Hardware initialization failed.
557 */
558static void
559bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
560{
561 switch (event) {
562 case IOC_E_DISABLE:
563 bfa_ioc_timer_stop(ioc);
564 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
565 break;
566
567 case IOC_E_DETACH:
568 bfa_ioc_timer_stop(ioc);
569 bfa_ioc_firmware_unlock(ioc);
570 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
571 break;
572
573 case IOC_E_TIMEOUT:
574 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
575 break;
576
577 default:
578 bfa_sm_fault(ioc, event);
579 }
580}
581
582static void
583bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
584{
585 struct list_head *qe;
586 struct bfa_ioc_hbfail_notify *notify;
587
588 /**
589 * Mark IOC as failed in hardware and stop firmware.
590 */
591 bfa_ioc_lpu_stop(ioc);
592 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
593
594 /**
595 * Notify other functions on HB failure.
596 */
597 bfa_ioc_notify_hbfail(ioc);
598
599 /**
600 * Notify driver and common modules registered for notification.
601 */
602 ioc->cbfn->hbfail_cbfn(ioc->bfa);
603 list_for_each(qe, &ioc->hb_notify_q) {
604 notify = (struct bfa_ioc_hbfail_notify *) qe;
605 notify->cbfn(notify->cbarg);
606 }
607
608 /**
609 * Flush any queued up mailbox requests.
610 */
611 bfa_ioc_mbox_hbfail(ioc);
612
613 /**
614 * Trigger auto-recovery after a delay.
615 */
616 if (ioc->auto_recover)
617 mod_timer(&ioc->ioc_timer, jiffies +
618 msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
619}
620
621/**
622 * @brief
623 * IOC heartbeat failure.
624 */
625static void
626bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
627{
628 switch (event) {
629
630 case IOC_E_ENABLE:
631 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
632 break;
633
634 case IOC_E_DISABLE:
635 if (ioc->auto_recover)
636 bfa_ioc_timer_stop(ioc);
637 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
638 break;
639
640 case IOC_E_TIMEOUT:
641 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
642 break;
643
644 case IOC_E_FWREADY:
645 /**
646 * Recovery is already initiated by other function.
647 */
648 break;
649
650 case IOC_E_HWERROR:
651 /*
652 * HB failure notification, ignore.
653 */
654 break;
655 default:
656 bfa_sm_fault(ioc, event);
657 }
658}
659
660/**
661 * BFA IOC private functions
662 */
663
664static void
665bfa_ioc_disable_comp(struct bfa_ioc *ioc)
666{
667 struct list_head *qe;
668 struct bfa_ioc_hbfail_notify *notify;
669
670 ioc->cbfn->disable_cbfn(ioc->bfa);
671
672 /**
673 * Notify common modules registered for notification.
674 */
675 list_for_each(qe, &ioc->hb_notify_q) {
676 notify = (struct bfa_ioc_hbfail_notify *) qe;
677 notify->cbfn(notify->cbarg);
678 }
679}
680
681void
682bfa_nw_ioc_sem_timeout(void *ioc_arg)
683{
684 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
685
686 bfa_ioc_hw_sem_get(ioc);
687}
688
689bool
690bfa_nw_ioc_sem_get(void __iomem *sem_reg)
691{
692 u32 r32;
693 int cnt = 0;
694#define BFA_SEM_SPINCNT 3000
695
696 r32 = readl(sem_reg);
697
698 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
699 cnt++;
700 udelay(2);
701 r32 = readl(sem_reg);
702 }
703
704 if (r32 == 0)
705 return true;
706
707 BUG_ON(!(cnt < BFA_SEM_SPINCNT));
708 return false;
709}
710
711void
712bfa_nw_ioc_sem_release(void __iomem *sem_reg)
713{
714 writel(1, sem_reg);
715}
716
717static void
718bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
719{
720 u32 r32;
721
722 /**
723 * First read to the semaphore register will return 0, subsequent reads
724 * will return 1. Semaphore is released by writing 1 to the register
725 */
726 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
727 if (r32 == 0) {
728 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
729 return;
730 }
731
732 mod_timer(&ioc->sem_timer, jiffies +
733 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
734}
735
736void
737bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
738{
739 writel(1, ioc->ioc_regs.ioc_sem_reg);
740}
741
742static void
743bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
744{
745 del_timer(&ioc->sem_timer);
746}
747
748/**
749 * @brief
750 * Initialize LPU local memory (aka secondary memory / SRAM)
751 */
752static void
753bfa_ioc_lmem_init(struct bfa_ioc *ioc)
754{
755 u32 pss_ctl;
756 int i;
757#define PSS_LMEM_INIT_TIME 10000
758
759 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
760 pss_ctl &= ~__PSS_LMEM_RESET;
761 pss_ctl |= __PSS_LMEM_INIT_EN;
762
763 /*
764 * i2c workaround 12.5khz clock
765 */
766 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
767 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
768
769 /**
770 * wait for memory initialization to be complete
771 */
772 i = 0;
773 do {
774 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
775 i++;
776 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
777
778 /**
779 * If memory initialization is not successful, IOC timeout will catch
780 * such failures.
781 */
782 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
783
784 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
785 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
786}
787
788static void
789bfa_ioc_lpu_start(struct bfa_ioc *ioc)
790{
791 u32 pss_ctl;
792
793 /**
794 * Take processor out of reset.
795 */
796 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
797 pss_ctl &= ~__PSS_LPU0_RESET;
798
799 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
800}
801
802static void
803bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
804{
805 u32 pss_ctl;
806
807 /**
808 * Put processors in reset.
809 */
810 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
811 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
812
813 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
814}
815
816/**
817 * Get driver and firmware versions.
818 */
819void
820bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
821{
822 u32 pgnum, pgoff;
823 u32 loff = 0;
824 int i;
825 u32 *fwsig = (u32 *) fwhdr;
826
827 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
828 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
829 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
830
831 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
832 i++) {
833 fwsig[i] =
834 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
835 loff += sizeof(u32);
836 }
837}
838
839/**
840 * Returns TRUE if same.
841 */
842bool
843bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
844{
845 struct bfi_ioc_image_hdr *drv_fwhdr;
846 int i;
847
848 drv_fwhdr = (struct bfi_ioc_image_hdr *)
849 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
850
851 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
852 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
853 return false;
854 }
855
856 return true;
857}
858
859/**
860 * Return true if current running version is valid. Firmware signature and
861 * execution context (driver/bios) must match.
862 */
863static bool
864bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
865{
866 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
867
868 /**
869 * If bios/efi boot (flash based) -- return true
870 */
871 if (bfa_ioc_is_optrom(ioc))
872 return true;
873
874 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
875 drv_fwhdr = (struct bfi_ioc_image_hdr *)
876 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
877
878 if (fwhdr.signature != drv_fwhdr->signature)
879 return false;
880
881 if (fwhdr.exec != drv_fwhdr->exec)
882 return false;
883
884 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
885}
886
887/**
888 * Conditionally flush any pending message from firmware at start.
889 */
890static void
891bfa_ioc_msgflush(struct bfa_ioc *ioc)
892{
893 u32 r32;
894
895 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
896 if (r32)
897 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
898}
899
900/**
901 * @img ioc_init_logic.jpg
902 */
903static void
904bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
905{
906 enum bfi_ioc_state ioc_fwstate;
907 bool fwvalid;
908
909 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
910
911 if (force)
912 ioc_fwstate = BFI_IOC_UNINIT;
913
914 /**
915 * check if firmware is valid
916 */
917 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
918 false : bfa_ioc_fwver_valid(ioc);
919
920 if (!fwvalid) {
921 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
922 return;
923 }
924
925 /**
926 * If hardware initialization is in progress (initialized by other IOC),
927 * just wait for an initialization completion interrupt.
928 */
929 if (ioc_fwstate == BFI_IOC_INITING) {
930 ioc->cbfn->reset_cbfn(ioc->bfa);
931 return;
932 }
933
934 /**
935 * If IOC function is disabled and firmware version is same,
936 * just re-enable IOC.
937 *
938 * If option rom, IOC must not be in operational state. With
939 * convergence, IOC will be in operational state when 2nd driver
940 * is loaded.
941 */
942 if (ioc_fwstate == BFI_IOC_DISABLED ||
943 (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
944 /**
945 * When using MSI-X any pending firmware ready event should
946 * be flushed. Otherwise MSI-X interrupts are not delivered.
947 */
948 bfa_ioc_msgflush(ioc);
949 ioc->cbfn->reset_cbfn(ioc->bfa);
950 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
951 return;
952 }
953
954 /**
955 * Initialize the h/w for any other states.
956 */
957 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
958}
959
960void
961bfa_nw_ioc_timeout(void *ioc_arg)
962{
963 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
964
965 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
966}
967
968static void
969bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
970{
971 u32 *msgp = (u32 *) ioc_msg;
972 u32 i;
973
974 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
975
976 /*
977 * first write msg to mailbox registers
978 */
979 for (i = 0; i < len / sizeof(u32); i++)
980 writel(cpu_to_le32(msgp[i]),
981 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
982
983 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
984 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
985
986 /*
987 * write 1 to mailbox CMD to trigger LPU event
988 */
989 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
990 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
991}
992
993static void
994bfa_ioc_send_enable(struct bfa_ioc *ioc)
995{
996 struct bfi_ioc_ctrl_req enable_req;
997 struct timeval tv;
998
999 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1000 bfa_ioc_portid(ioc));
1001 enable_req.ioc_class = ioc->ioc_mc;
1002 do_gettimeofday(&tv);
1003 enable_req.tv_sec = ntohl(tv.tv_sec);
1004 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1005}
1006
1007static void
1008bfa_ioc_send_disable(struct bfa_ioc *ioc)
1009{
1010 struct bfi_ioc_ctrl_req disable_req;
1011
1012 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1013 bfa_ioc_portid(ioc));
1014 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1015}
1016
1017static void
1018bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1019{
1020 struct bfi_ioc_getattr_req attr_req;
1021
1022 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1023 bfa_ioc_portid(ioc));
1024 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1025 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1026}
1027
1028void
1029bfa_nw_ioc_hb_check(void *cbarg)
1030{
1031 struct bfa_ioc *ioc = cbarg;
1032 u32 hb_count;
1033
1034 hb_count = readl(ioc->ioc_regs.heartbeat);
1035 if (ioc->hb_count == hb_count) {
1036 pr_crit("Firmware heartbeat failure at %d", hb_count);
1037 bfa_ioc_recover(ioc);
1038 return;
1039 } else {
1040 ioc->hb_count = hb_count;
1041 }
1042
1043 bfa_ioc_mbox_poll(ioc);
1044 mod_timer(&ioc->hb_timer, jiffies +
1045 msecs_to_jiffies(BFA_IOC_HB_TOV));
1046}
1047
1048static void
1049bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1050{
1051 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1052 mod_timer(&ioc->hb_timer, jiffies +
1053 msecs_to_jiffies(BFA_IOC_HB_TOV));
1054}
1055
1056static void
1057bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1058{
1059 del_timer(&ioc->hb_timer);
1060}
1061
1062/**
1063 * @brief
1064 * Initiate a full firmware download.
1065 */
1066static void
1067bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1068 u32 boot_param)
1069{
1070 u32 *fwimg;
1071 u32 pgnum, pgoff;
1072 u32 loff = 0;
1073 u32 chunkno = 0;
1074 u32 i;
1075
1076 /**
1077 * Initialize LMEM first before code download
1078 */
1079 bfa_ioc_lmem_init(ioc);
1080
1081 /**
1082 * Flash based firmware boot
1083 */
1084 if (bfa_ioc_is_optrom(ioc))
1085 boot_type = BFI_BOOT_TYPE_FLASH;
1086 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1087
1088 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1089 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1090
1091 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1092
1093 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1094 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1095 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1096 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1097 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1098 }
1099
1100 /**
1101 * write smem
1102 */
1103 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1104 ((ioc->ioc_regs.smem_page_start) + (loff)));
1105
1106 loff += sizeof(u32);
1107
1108 /**
1109 * handle page offset wrap around
1110 */
1111 loff = PSS_SMEM_PGOFF(loff);
1112 if (loff == 0) {
1113 pgnum++;
1114 writel(pgnum,
1115 ioc->ioc_regs.host_page_num_fn);
1116 }
1117 }
1118
1119 writel(bfa_ioc_smem_pgnum(ioc, 0),
1120 ioc->ioc_regs.host_page_num_fn);
1121
1122 /*
1123 * Set boot type and boot param at the end.
1124 */
1125 writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
1126 + (BFI_BOOT_TYPE_OFF)));
1127 writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
1128 + (BFI_BOOT_PARAM_OFF)));
1129}
1130
1131static void
1132bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1133{
1134 bfa_ioc_hwinit(ioc, force);
1135}
1136
1137/**
1138 * @brief
1139 * Update BFA configuration from firmware configuration.
1140 */
1141static void
1142bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1143{
1144 struct bfi_ioc_attr *attr = ioc->attr;
1145
1146 attr->adapter_prop = ntohl(attr->adapter_prop);
1147 attr->card_type = ntohl(attr->card_type);
1148 attr->maxfrsize = ntohs(attr->maxfrsize);
1149
1150 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1151}
1152
1153/**
1154 * Attach time initialization of mbox logic.
1155 */
1156static void
1157bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1158{
1159 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1160 int mc;
1161
1162 INIT_LIST_HEAD(&mod->cmd_q);
1163 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1164 mod->mbhdlr[mc].cbfn = NULL;
1165 mod->mbhdlr[mc].cbarg = ioc->bfa;
1166 }
1167}
1168
1169/**
1170 * Mbox poll timer -- restarts any pending mailbox requests.
1171 */
1172static void
1173bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1174{
1175 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1176 struct bfa_mbox_cmd *cmd;
1177 u32 stat;
1178
1179 /**
1180 * If no command pending, do nothing
1181 */
1182 if (list_empty(&mod->cmd_q))
1183 return;
1184
1185 /**
1186 * If previous command is not yet fetched by firmware, do nothing
1187 */
1188 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1189 if (stat)
1190 return;
1191
1192 /**
1193 * Enqueue command to firmware.
1194 */
1195 bfa_q_deq(&mod->cmd_q, &cmd);
1196 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1197}
1198
1199/**
1200 * Cleanup any pending requests.
1201 */
1202static void
1203bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1204{
1205 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1206 struct bfa_mbox_cmd *cmd;
1207
1208 while (!list_empty(&mod->cmd_q))
1209 bfa_q_deq(&mod->cmd_q, &cmd);
1210}
1211
1212/**
1213 * IOC public
1214 */
1215static enum bfa_status
1216bfa_ioc_pll_init(struct bfa_ioc *ioc)
1217{
1218 /*
1219 * Hold semaphore so that nobody can access the chip during init.
1220 */
1221 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1222
1223 bfa_ioc_pll_init_asic(ioc);
1224
1225 ioc->pllinit = true;
1226 /*
1227 * release semaphore.
1228 */
1229 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1230
1231 return BFA_STATUS_OK;
1232}
1233
1234/**
1235 * Interface used by diag module to do firmware boot with memory test
1236 * as the entry vector.
1237 */
1238static void
1239bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1240{
1241 void __iomem *rb;
1242
1243 bfa_ioc_stats(ioc, ioc_boots);
1244
1245 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1246 return;
1247
1248 /**
1249 * Initialize IOC state of all functions on a chip reset.
1250 */
1251 rb = ioc->pcidev.pci_bar_kva;
1252 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1253 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1254 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1255 } else {
1256 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1257 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1258 }
1259
1260 bfa_ioc_msgflush(ioc);
1261 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1262
1263 /**
1264 * Enable interrupts just before starting LPU
1265 */
1266 ioc->cbfn->reset_cbfn(ioc->bfa);
1267 bfa_ioc_lpu_start(ioc);
1268}
1269
1270/**
1271 * Enable/disable IOC failure auto recovery.
1272 */
1273void
1274bfa_nw_ioc_auto_recover(bool auto_recover)
1275{
1276 bfa_nw_auto_recover = auto_recover;
1277}
1278
1279bool
1280bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
1281{
1282 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1283}
1284
1285static void
1286bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1287{
1288 u32 *msgp = mbmsg;
1289 u32 r32;
1290 int i;
1291
1292 /**
1293 * read the MBOX msg
1294 */
1295 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1296 i++) {
1297 r32 = readl(ioc->ioc_regs.lpu_mbox +
1298 i * sizeof(u32));
1299 msgp[i] = htonl(r32);
1300 }
1301
1302 /**
1303 * turn off mailbox interrupt by clearing mailbox status
1304 */
1305 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1306 readl(ioc->ioc_regs.lpu_mbox_cmd);
1307}
1308
1309static void
1310bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1311{
1312 union bfi_ioc_i2h_msg_u *msg;
1313
1314 msg = (union bfi_ioc_i2h_msg_u *) m;
1315
1316 bfa_ioc_stats(ioc, ioc_isrs);
1317
1318 switch (msg->mh.msg_id) {
1319 case BFI_IOC_I2H_HBEAT:
1320 break;
1321
1322 case BFI_IOC_I2H_READY_EVENT:
1323 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1324 break;
1325
1326 case BFI_IOC_I2H_ENABLE_REPLY:
1327 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1328 break;
1329
1330 case BFI_IOC_I2H_DISABLE_REPLY:
1331 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1332 break;
1333
1334 case BFI_IOC_I2H_GETATTR_REPLY:
1335 bfa_ioc_getattr_reply(ioc);
1336 break;
1337
1338 default:
1339 BUG_ON(1);
1340 }
1341}
1342
1343/**
1344 * IOC attach time initialization and setup.
1345 *
1346 * @param[in] ioc memory for IOC
1347 * @param[in] bfa driver instance structure
1348 */
1349void
1350bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1351{
1352 ioc->bfa = bfa;
1353 ioc->cbfn = cbfn;
1354 ioc->fcmode = false;
1355 ioc->pllinit = false;
1356 ioc->dbg_fwsave_once = true;
1357
1358 bfa_ioc_mbox_attach(ioc);
1359 INIT_LIST_HEAD(&ioc->hb_notify_q);
1360
1361 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1362}
1363
1364/**
1365 * Driver detach time IOC cleanup.
1366 */
1367void
1368bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1369{
1370 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1371}
1372
1373/**
1374 * Setup IOC PCI properties.
1375 *
1376 * @param[in] pcidev PCI device information for this IOC
1377 */
1378void
1379bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1380 enum bfi_mclass mc)
1381{
1382 ioc->ioc_mc = mc;
1383 ioc->pcidev = *pcidev;
1384 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1385 ioc->cna = ioc->ctdev && !ioc->fcmode;
1386
1387 bfa_nw_ioc_set_ct_hwif(ioc);
1388
1389 bfa_ioc_map_port(ioc);
1390 bfa_ioc_reg_init(ioc);
1391}
1392
1393/**
1394 * Initialize IOC dma memory
1395 *
1396 * @param[in] dm_kva kernel virtual address of IOC dma memory
1397 * @param[in] dm_pa physical address of IOC dma memory
1398 */
1399void
1400bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
1401{
1402 /**
1403 * dma memory for firmware attribute
1404 */
1405 ioc->attr_dma.kva = dm_kva;
1406 ioc->attr_dma.pa = dm_pa;
1407 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1408}
1409
1410/**
1411 * Return size of dma memory required.
1412 */
1413u32
1414bfa_nw_ioc_meminfo(void)
1415{
1416 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1417}
1418
1419void
1420bfa_nw_ioc_enable(struct bfa_ioc *ioc)
1421{
1422 bfa_ioc_stats(ioc, ioc_enables);
1423 ioc->dbg_fwsave_once = true;
1424
1425 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1426}
1427
1428void
1429bfa_nw_ioc_disable(struct bfa_ioc *ioc)
1430{
1431 bfa_ioc_stats(ioc, ioc_disables);
1432 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1433}
1434
1435static u32
1436bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1437{
1438 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1439}
1440
1441static u32
1442bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1443{
1444 return PSS_SMEM_PGOFF(fmaddr);
1445}
1446
1447/**
1448 * Register mailbox message handler function, to be called by common modules
1449 */
1450void
1451bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
1452 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1453{
1454 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1455
1456 mod->mbhdlr[mc].cbfn = cbfn;
1457 mod->mbhdlr[mc].cbarg = cbarg;
1458}
1459
1460/**
1461 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1462 * Responsibility of caller to serialize
1463 *
1464 * @param[in] ioc IOC instance
1465 * @param[i] cmd Mailbox command
1466 */
1467void
1468bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
1469{
1470 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1471 u32 stat;
1472
1473 /**
1474 * If a previous command is pending, queue new command
1475 */
1476 if (!list_empty(&mod->cmd_q)) {
1477 list_add_tail(&cmd->qe, &mod->cmd_q);
1478 return;
1479 }
1480
1481 /**
1482 * If mailbox is busy, queue command for poll timer
1483 */
1484 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1485 if (stat) {
1486 list_add_tail(&cmd->qe, &mod->cmd_q);
1487 return;
1488 }
1489
1490 /**
1491 * mailbox is free -- queue command to firmware
1492 */
1493 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1494}
1495
1496/**
1497 * Handle mailbox interrupts
1498 */
1499void
1500bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
1501{
1502 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1503 struct bfi_mbmsg m;
1504 int mc;
1505
1506 bfa_ioc_msgget(ioc, &m);
1507
1508 /**
1509 * Treat IOC message class as special.
1510 */
1511 mc = m.mh.msg_class;
1512 if (mc == BFI_MC_IOC) {
1513 bfa_ioc_isr(ioc, &m);
1514 return;
1515 }
1516
1517 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1518 return;
1519
1520 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1521}
1522
1523void
1524bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
1525{
1526 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1527}
1528
1529/**
1530 * Add to IOC heartbeat failure notification queue. To be used by common
1531 * modules such as cee, port, diag.
1532 */
1533void
1534bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
1535 struct bfa_ioc_hbfail_notify *notify)
1536{
1537 list_add_tail(&notify->qe, &ioc->hb_notify_q);
1538}
1539
1540#define BFA_MFG_NAME "Brocade"
1541static void
1542bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
1543 struct bfa_adapter_attr *ad_attr)
1544{
1545 struct bfi_ioc_attr *ioc_attr;
1546
1547 ioc_attr = ioc->attr;
1548
1549 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1550 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1551 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1552 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1553 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1554 sizeof(struct bfa_mfg_vpd));
1555
1556 ad_attr->nports = bfa_ioc_get_nports(ioc);
1557 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1558
1559 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1560 /* For now, model descr uses same model string */
1561 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1562
1563 ad_attr->card_type = ioc_attr->card_type;
1564 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1565
1566 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1567 ad_attr->prototype = 1;
1568 else
1569 ad_attr->prototype = 0;
1570
1571 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1572 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
1573
1574 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1575 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1576 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1577 ad_attr->asic_rev = ioc_attr->asic_rev;
1578
1579 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1580
1581 ad_attr->cna_capable = ioc->cna;
1582 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
1583}
1584
1585static enum bfa_ioc_type
1586bfa_ioc_get_type(struct bfa_ioc *ioc)
1587{
1588 if (!ioc->ctdev || ioc->fcmode)
1589 return BFA_IOC_TYPE_FC;
1590 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1591 return BFA_IOC_TYPE_FCoE;
1592 else if (ioc->ioc_mc == BFI_MC_LL)
1593 return BFA_IOC_TYPE_LL;
1594 else {
1595 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
1596 return BFA_IOC_TYPE_LL;
1597 }
1598}
1599
1600static void
1601bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
1602{
1603 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1604 memcpy(serial_num,
1605 (void *)ioc->attr->brcd_serialnum,
1606 BFA_ADAPTER_SERIAL_NUM_LEN);
1607}
1608
1609static void
1610bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
1611{
1612 memset(fw_ver, 0, BFA_VERSION_LEN);
1613 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1614}
1615
1616static void
1617bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
1618{
1619 BUG_ON(!(chip_rev));
1620
1621 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1622
1623 chip_rev[0] = 'R';
1624 chip_rev[1] = 'e';
1625 chip_rev[2] = 'v';
1626 chip_rev[3] = '-';
1627 chip_rev[4] = ioc->attr->asic_rev;
1628 chip_rev[5] = '\0';
1629}
1630
1631static void
1632bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
1633{
1634 memset(optrom_ver, 0, BFA_VERSION_LEN);
1635 memcpy(optrom_ver, ioc->attr->optrom_version,
1636 BFA_VERSION_LEN);
1637}
1638
1639static void
1640bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
1641{
1642 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1643 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1644}
1645
1646static void
1647bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
1648{
1649 struct bfi_ioc_attr *ioc_attr;
1650
1651 BUG_ON(!(model));
1652 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1653
1654 ioc_attr = ioc->attr;
1655
1656 /**
1657 * model name
1658 */
1659 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1660 BFA_MFG_NAME, ioc_attr->card_type);
1661}
1662
1663static enum bfa_ioc_state
1664bfa_ioc_get_state(struct bfa_ioc *ioc)
1665{
1666 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1667}
1668
1669void
1670bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
1671{
1672 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
1673
1674 ioc_attr->state = bfa_ioc_get_state(ioc);
1675 ioc_attr->port_id = ioc->port_id;
1676
1677 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1678
1679 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1680
1681 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1682 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1683 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1684}
1685
1686/**
1687 * WWN public
1688 */
1689static u64
1690bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
1691{
1692 return ioc->attr->pwwn;
1693}
1694
1695mac_t
1696bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
1697{
1698 /*
1699 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1700 */
1701 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1702 return bfa_ioc_get_mfg_mac(ioc);
1703 else
1704 return ioc->attr->mac;
1705}
1706
1707static mac_t
1708bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
1709{
1710 mac_t m;
1711
1712 m = ioc->attr->mfg_mac;
1713 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
1714 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1715 else
1716 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
1717 bfa_ioc_pcifn(ioc));
1718
1719 return m;
1720}
1721
1722/**
1723 * Firmware failure detected. Start recovery actions.
1724 */
1725static void
1726bfa_ioc_recover(struct bfa_ioc *ioc)
1727{
1728 bfa_ioc_stats(ioc, ioc_hbfails);
1729 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
1730}
1731
1732static void
1733bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
1734{
1735 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
1736 return;
1737
1738}
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
new file mode 100644
index 000000000000..7f0719e17efc
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc.h
@@ -0,0 +1,301 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFA_IOC_H__
20#define __BFA_IOC_H__
21
22#include "bfa_sm.h"
23#include "bfi.h"
24#include "cna.h"
25
26#define BFA_IOC_TOV 3000 /* msecs */
27#define BFA_IOC_HWSEM_TOV 500 /* msecs */
28#define BFA_IOC_HB_TOV 500 /* msecs */
29#define BFA_IOC_HWINIT_MAX 2
30#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
31
32/**
33 * Generic Scatter Gather Element used by driver
34 */
35struct bfa_sge {
36 u32 sg_len;
37 void *sg_addr;
38};
39
40/**
41 * PCI device information required by IOC
42 */
43struct bfa_pcidev {
44 int pci_slot;
45 u8 pci_func;
46 u16 device_id;
47 void __iomem *pci_bar_kva;
48};
49
50/**
51 * Structure used to remember the DMA-able memory block's KVA and Physical
52 * Address
53 */
54struct bfa_dma {
55 void *kva; /* ! Kernel virtual address */
56 u64 pa; /* ! Physical address */
57};
58
59#define BFA_DMA_ALIGN_SZ 256
60
61/**
62 * smem size for Crossbow and Catapult
63 */
64#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
65#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
66
67/**
68 * @brief BFA dma address assignment macro
69 */
70#define bfa_dma_addr_set(dma_addr, pa) \
71 __bfa_dma_addr_set(&dma_addr, (u64)pa)
72
73static inline void
74__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
75{
76 dma_addr->a32.addr_lo = (u32) pa;
77 dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa));
78}
79
80/**
81 * @brief BFA dma address assignment macro. (big endian format)
82 */
83#define bfa_dma_be_addr_set(dma_addr, pa) \
84 __bfa_dma_be_addr_set(&dma_addr, (u64)pa)
85static inline void
86__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
87{
88 dma_addr->a32.addr_lo = (u32) htonl(pa);
89 dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
90}
91
92struct bfa_ioc_regs {
93 void __iomem *hfn_mbox_cmd;
94 void __iomem *hfn_mbox;
95 void __iomem *lpu_mbox_cmd;
96 void __iomem *lpu_mbox;
97 void __iomem *pss_ctl_reg;
98 void __iomem *pss_err_status_reg;
99 void __iomem *app_pll_fast_ctl_reg;
100 void __iomem *app_pll_slow_ctl_reg;
101 void __iomem *ioc_sem_reg;
102 void __iomem *ioc_usage_sem_reg;
103 void __iomem *ioc_init_sem_reg;
104 void __iomem *ioc_usage_reg;
105 void __iomem *host_page_num_fn;
106 void __iomem *heartbeat;
107 void __iomem *ioc_fwstate;
108 void __iomem *ll_halt;
109 void __iomem *err_set;
110 void __iomem *shirq_isr_next;
111 void __iomem *shirq_msk_next;
112 void __iomem *smem_page_start;
113 u32 smem_pg0;
114};
115
116/**
117 * IOC Mailbox structures
118 */
119struct bfa_mbox_cmd {
120 struct list_head qe;
121 u32 msg[BFI_IOC_MSGSZ];
122};
123
124/**
125 * IOC mailbox module
126 */
127typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
128struct bfa_ioc_mbox_mod {
129 struct list_head cmd_q; /*!< pending mbox queue */
130 int nmclass; /*!< number of handlers */
131 struct {
132 bfa_ioc_mbox_mcfunc_t cbfn; /*!< message handlers */
133 void *cbarg;
134 } mbhdlr[BFI_MC_MAX];
135};
136
137/**
138 * IOC callback function interfaces
139 */
140typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
141typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
142typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
143typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
144struct bfa_ioc_cbfn {
145 bfa_ioc_enable_cbfn_t enable_cbfn;
146 bfa_ioc_disable_cbfn_t disable_cbfn;
147 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
148 bfa_ioc_reset_cbfn_t reset_cbfn;
149};
150
151/**
152 * Heartbeat failure notification queue element.
153 */
154struct bfa_ioc_hbfail_notify {
155 struct list_head qe;
156 bfa_ioc_hbfail_cbfn_t cbfn;
157 void *cbarg;
158};
159
160/**
161 * Initialize a heartbeat failure notification structure
162 */
163#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
164 (__notify)->cbfn = (__cbfn); \
165 (__notify)->cbarg = (__cbarg); \
166} while (0)
167
168struct bfa_ioc {
169 bfa_fsm_t fsm;
170 struct bfa *bfa;
171 struct bfa_pcidev pcidev;
172 struct bfa_timer_mod *timer_mod;
173 struct timer_list ioc_timer;
174 struct timer_list sem_timer;
175 struct timer_list hb_timer;
176 u32 hb_count;
177 u32 retry_count;
178 struct list_head hb_notify_q;
179 void *dbg_fwsave;
180 int dbg_fwsave_len;
181 bool dbg_fwsave_once;
182 enum bfi_mclass ioc_mc;
183 struct bfa_ioc_regs ioc_regs;
184 struct bfa_ioc_drv_stats stats;
185 bool auto_recover;
186 bool fcmode;
187 bool ctdev;
188 bool cna;
189 bool pllinit;
190 bool stats_busy; /*!< outstanding stats */
191 u8 port_id;
192
193 struct bfa_dma attr_dma;
194 struct bfi_ioc_attr *attr;
195 struct bfa_ioc_cbfn *cbfn;
196 struct bfa_ioc_mbox_mod mbox_mod;
197 struct bfa_ioc_hwif *ioc_hwif;
198};
199
200struct bfa_ioc_hwif {
201 enum bfa_status (*ioc_pll_init) (void __iomem *rb, bool fcmode);
202 bool (*ioc_firmware_lock) (struct bfa_ioc *ioc);
203 void (*ioc_firmware_unlock) (struct bfa_ioc *ioc);
204 void (*ioc_reg_init) (struct bfa_ioc *ioc);
205 void (*ioc_map_port) (struct bfa_ioc *ioc);
206 void (*ioc_isr_mode_set) (struct bfa_ioc *ioc,
207 bool msix);
208 void (*ioc_notify_hbfail) (struct bfa_ioc *ioc);
209 void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
210};
211
212#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
213#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
214#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva)
215#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
216#define bfa_ioc_fetch_stats(__ioc, __stats) \
217 (((__stats)->drv_stats) = (__ioc)->stats)
218#define bfa_ioc_clr_stats(__ioc) \
219 memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
220#define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize)
221#define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit)
222#define bfa_ioc_speed_sup(__ioc) \
223 BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
224#define bfa_ioc_get_nports(__ioc) \
225 BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
226
227#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
228#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
229#define BFA_IOC_FWIMG_TYPE(__ioc) \
230 (((__ioc)->ctdev) ? \
231 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
232 BFI_IMAGE_CB_FC)
233#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
234 (((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
235#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
236#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
237#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
238
239/**
240 * IOC mailbox interface
241 */
242void bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd);
243void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
244void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
245 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
246
247/**
248 * IOC interfaces
249 */
250
251#define bfa_ioc_pll_init_asic(__ioc) \
252 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
253 (__ioc)->fcmode))
254
255#define bfa_ioc_isr_mode_set(__ioc, __msix) \
256 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
257#define bfa_ioc_ownership_reset(__ioc) \
258 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
259
260void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
261
262void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
263 struct bfa_ioc_cbfn *cbfn);
264void bfa_nw_ioc_auto_recover(bool auto_recover);
265void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
266void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
267 enum bfi_mclass mc);
268u32 bfa_nw_ioc_meminfo(void);
269void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa);
270void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
271void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
272
273void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
274bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
275
276void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
277void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
278 struct bfa_ioc_hbfail_notify *notify);
279bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
280void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
281void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
282void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
283 struct bfi_ioc_image_hdr *fwhdr);
284bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
285 struct bfi_ioc_image_hdr *fwhdr);
286mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
287
288/*
289 * Timeout APIs
290 */
291void bfa_nw_ioc_timeout(void *ioc);
292void bfa_nw_ioc_hb_check(void *ioc);
293void bfa_nw_ioc_sem_timeout(void *ioc);
294
295/*
296 * F/W Image Size & Chunk
297 */
298u32 *bfa_cb_image_get_chunk(int type, u32 off);
299u32 bfa_cb_image_get_size(int type);
300
301#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
new file mode 100644
index 000000000000..462857cbab9b
--- /dev/null
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -0,0 +1,392 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_ioc.h"
20#include "cna.h"
21#include "bfi.h"
22#include "bfi_ctreg.h"
23#include "bfa_defs.h"
24
25/*
26 * forward declarations
27 */
28static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
29static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
30static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
31static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
32static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
33static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
34static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
35static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
36
37struct bfa_ioc_hwif nw_hwif_ct;
38
39/**
40 * Called from bfa_ioc_attach() to map asic specific calls.
41 */
42void
43bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
44{
45 nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
46 nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
47 nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
48 nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
49 nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
50 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
51 nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
52 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
53
54 ioc->ioc_hwif = &nw_hwif_ct;
55}
56
57/**
58 * Return true if firmware of current driver matches the running firmware.
59 */
60static bool
61bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
62{
63 enum bfi_ioc_state ioc_fwstate;
64 u32 usecnt;
65 struct bfi_ioc_image_hdr fwhdr;
66
67 /**
68 * Firmware match check is relevant only for CNA.
69 */
70 if (!ioc->cna)
71 return true;
72
73 /**
74 * If bios boot (flash based) -- do not increment usage count
75 */
76 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
77 BFA_IOC_FWIMG_MINSZ)
78 return true;
79
80 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
81 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
82
83 /**
84 * If usage count is 0, always return TRUE.
85 */
86 if (usecnt == 0) {
87 writel(1, ioc->ioc_regs.ioc_usage_reg);
88 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
89 return true;
90 }
91
92 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
93
94 /**
95 * Use count cannot be non-zero and chip in uninitialized state.
96 */
97 BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
98
99 /**
100 * Check if another driver with a different firmware is active
101 */
102 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
103 if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
104 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
105 return false;
106 }
107
108 /**
109 * Same firmware version. Increment the reference count.
110 */
111 usecnt++;
112 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
113 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
114 return true;
115}
116
117static void
118bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
119{
120 u32 usecnt;
121
122 /**
123 * Firmware lock is relevant only for CNA.
124 */
125 if (!ioc->cna)
126 return;
127
128 /**
129 * If bios boot (flash based) -- do not decrement usage count
130 */
131 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
132 BFA_IOC_FWIMG_MINSZ)
133 return;
134
135 /**
136 * decrement usage count
137 */
138 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
139 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
140 BUG_ON(!(usecnt > 0));
141
142 usecnt--;
143 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
144
145 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
146}
147
148/**
149 * Notify other functions on HB failure.
150 */
151static void
152bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
153{
154 if (ioc->cna) {
155 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
156 /* Wait for halt to take effect */
157 readl(ioc->ioc_regs.ll_halt);
158 } else {
159 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
160 readl(ioc->ioc_regs.err_set);
161 }
162}
163
164/**
165 * Host to LPU mailbox message addresses
166 */
167static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
168 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
169 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
170 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
171 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
172};
173
174/**
175 * Host <-> LPU mailbox command/status registers - port 0
176 */
177static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
178 { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
179 { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
180 { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
181 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
182};
183
184/**
185 * Host <-> LPU mailbox command/status registers - port 1
186 */
187static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
188 { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
189 { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
190 { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
191 { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
192};
193
194static void
195bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
196{
197 void __iomem *rb;
198 int pcifn = bfa_ioc_pcifn(ioc);
199
200 rb = bfa_ioc_bar0(ioc);
201
202 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
203 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
204 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
205
206 if (ioc->port_id == 0) {
207 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
208 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
209 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
210 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
211 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
212 } else {
213 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
214 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
215 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
216 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
217 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
218 }
219
220 /*
221 * PSS control registers
222 */
223 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
224 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
225 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
226 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
227
228 /*
229 * IOC semaphore registers and serialization
230 */
231 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
232 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
233 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
234 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
235
236 /**
237 * sram memory access
238 */
239 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
240 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
241
242 /*
243 * err set reg : for notification of hb failure in fcmode
244 */
245 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
246}
247
248/**
249 * Initialize IOC to port mapping.
250 */
251
252#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
253static void
254bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
255{
256 void __iomem *rb = ioc->pcidev.pci_bar_kva;
257 u32 r32;
258
259 /**
260 * For catapult, base port id on personality register and IOC type
261 */
262 r32 = readl(rb + FNC_PERS_REG);
263 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
264 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
265
266}
267
268/**
269 * Set interrupt mode for a function: INTX or MSIX
270 */
271static void
272bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
273{
274 void __iomem *rb = ioc->pcidev.pci_bar_kva;
275 u32 r32, mode;
276
277 r32 = readl(rb + FNC_PERS_REG);
278
279 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
280 __F0_INTX_STATUS;
281
282 /**
283 * If already in desired mode, do not change anything
284 */
285 if (!msix && mode)
286 return;
287
288 if (msix)
289 mode = __F0_INTX_STATUS_MSIX;
290 else
291 mode = __F0_INTX_STATUS_INTA;
292
293 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
294 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
295
296 writel(r32, rb + FNC_PERS_REG);
297}
298
299/**
300 * Cleanup hw semaphore and usecnt registers
301 */
302static void
303bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
304{
305 if (ioc->cna) {
306 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
307 writel(0, ioc->ioc_regs.ioc_usage_reg);
308 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
309 }
310
311 /*
312 * Read the hw sem reg to make sure that it is locked
313 * before we clear it. If it is not locked, writing 1
314 * will lock it instead of clearing it.
315 */
316 readl(ioc->ioc_regs.ioc_sem_reg);
317 bfa_nw_ioc_hw_sem_release(ioc);
318}
319
320static enum bfa_status
321bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
322{
323 u32 pll_sclk, pll_fclk, r32;
324
325 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
326 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
327 __APP_PLL_312_JITLMT0_1(3U) |
328 __APP_PLL_312_CNTLMT0_1(1U);
329 pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
330 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
331 __APP_PLL_425_JITLMT0_1(3U) |
332 __APP_PLL_425_CNTLMT0_1(1U);
333 if (fcmode) {
334 writel(0, (rb + OP_MODE));
335 writel(__APP_EMS_CMLCKSEL |
336 __APP_EMS_REFCKBUFEN2 |
337 __APP_EMS_CHANNEL_SEL,
338 (rb + ETH_MAC_SER_REG));
339 } else {
340 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
341 writel(__APP_EMS_REFCKBUFEN1,
342 (rb + ETH_MAC_SER_REG));
343 }
344 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
345 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
346 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
347 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
348 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
349 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
350 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
351 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
352 writel(pll_sclk |
353 __APP_PLL_312_LOGIC_SOFT_RESET,
354 rb + APP_PLL_312_CTL_REG);
355 writel(pll_fclk |
356 __APP_PLL_425_LOGIC_SOFT_RESET,
357 rb + APP_PLL_425_CTL_REG);
358 writel(pll_sclk |
359 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
360 rb + APP_PLL_312_CTL_REG);
361 writel(pll_fclk |
362 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
363 rb + APP_PLL_425_CTL_REG);
364 readl(rb + HOSTFN0_INT_MSK);
365 udelay(2000);
366 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
367 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
368 writel(pll_sclk |
369 __APP_PLL_312_ENABLE,
370 rb + APP_PLL_312_CTL_REG);
371 writel(pll_fclk |
372 __APP_PLL_425_ENABLE,
373 rb + APP_PLL_425_CTL_REG);
374 if (!fcmode) {
375 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
376 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
377 }
378 r32 = readl((rb + PSS_CTL_REG));
379 r32 &= ~__PSS_LMEM_RESET;
380 writel(r32, (rb + PSS_CTL_REG));
381 udelay(1000);
382 if (!fcmode) {
383 writel(0, (rb + PMM_1T_RESET_REG_P0));
384 writel(0, (rb + PMM_1T_RESET_REG_P1));
385 }
386
387 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
388 udelay(1000);
389 r32 = readl((rb + MBIST_STAT_REG));
390 writel(0, (rb + MBIST_CTL_REG));
391 return BFA_STATUS_OK;
392}
diff --git a/drivers/net/bna/bfa_sm.h b/drivers/net/bna/bfa_sm.h
new file mode 100644
index 000000000000..1d3d975d6f68
--- /dev/null
+++ b/drivers/net/bna/bfa_sm.h
@@ -0,0 +1,88 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19/**
20 * @file bfasm.h State machine defines
21 */
22
23#ifndef __BFA_SM_H__
24#define __BFA_SM_H__
25
26#include "cna.h"
27
28typedef void (*bfa_sm_t)(void *sm, int event);
29
30/**
31 * oc - object class eg. bfa_ioc
32 * st - state, eg. reset
33 * otype - object type, eg. struct bfa_ioc
34 * etype - object type, eg. enum ioc_event
35 */
36#define bfa_sm_state_decl(oc, st, otype, etype) \
37 static void oc ## _sm_ ## st(otype * fsm, etype event)
38
39#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
40#define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event)))
41#define bfa_sm_get_state(_sm) ((_sm)->sm)
42#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
43
44/**
45 * For converting from state machine function to state encoding.
46 */
47struct bfa_sm_table {
48 bfa_sm_t sm; /*!< state machine function */
49 int state; /*!< state machine encoding */
50 char *name; /*!< state name for display */
51};
52#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
53
54/**
55 * State machine with entry actions.
56 */
57typedef void (*bfa_fsm_t)(void *fsm, int event);
58
59/**
60 * oc - object class eg. bfa_ioc
61 * st - state, eg. reset
62 * otype - object type, eg. struct bfa_ioc
63 * etype - object type, eg. enum ioc_event
64 */
65#define bfa_fsm_state_decl(oc, st, otype, etype) \
66 static void oc ## _sm_ ## st(otype * fsm, etype event); \
67 static void oc ## _sm_ ## st ## _entry(otype * fsm)
68
69#define bfa_fsm_set_state(_fsm, _state) do { \
70 (_fsm)->fsm = (bfa_fsm_t)(_state); \
71 _state ## _entry(_fsm); \
72} while (0)
73
74#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
75#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
76#define bfa_fsm_cmp_state(_fsm, _state) \
77 ((_fsm)->fsm == (bfa_fsm_t)(_state))
78
79static inline int
80bfa_sm_to_state(struct bfa_sm_table *smt, bfa_sm_t sm)
81{
82 int i = 0;
83
84 while (smt[i].sm && smt[i].sm != sm)
85 i++;
86 return smt[i].state;
87}
88#endif
diff --git a/drivers/net/bna/bfa_wc.h b/drivers/net/bna/bfa_wc.h
new file mode 100644
index 000000000000..d0e4caee67b0
--- /dev/null
+++ b/drivers/net/bna/bfa_wc.h
@@ -0,0 +1,69 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19/**
20 * @file bfa_wc.h Generic wait counter.
21 */
22
23#ifndef __BFA_WC_H__
24#define __BFA_WC_H__
25
26typedef void (*bfa_wc_resume_t) (void *cbarg);
27
28struct bfa_wc {
29 bfa_wc_resume_t wc_resume;
30 void *wc_cbarg;
31 int wc_count;
32};
33
34static inline void
35bfa_wc_up(struct bfa_wc *wc)
36{
37 wc->wc_count++;
38}
39
40static inline void
41bfa_wc_down(struct bfa_wc *wc)
42{
43 wc->wc_count--;
44 if (wc->wc_count == 0)
45 wc->wc_resume(wc->wc_cbarg);
46}
47
48/**
49 * Initialize a waiting counter.
50 */
51static inline void
52bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
53{
54 wc->wc_resume = wc_resume;
55 wc->wc_cbarg = wc_cbarg;
56 wc->wc_count = 0;
57 bfa_wc_up(wc);
58}
59
60/**
61 * Wait for counter to reach zero
62 */
63static inline void
64bfa_wc_wait(struct bfa_wc *wc)
65{
66 bfa_wc_down(wc);
67}
68
69#endif
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
new file mode 100644
index 000000000000..a97396811050
--- /dev/null
+++ b/drivers/net/bna/bfi.h
@@ -0,0 +1,392 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __BFI_H__
20#define __BFI_H__
21
22#include "bfa_defs.h"
23
24#pragma pack(1)
25
26/**
27 * BFI FW image type
28 */
29#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
30#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
31enum {
32 BFI_IMAGE_CB_FC,
33 BFI_IMAGE_CT_FC,
34 BFI_IMAGE_CT_CNA,
35 BFI_IMAGE_MAX,
36};
37
38/**
39 * Msg header common to all msgs
40 */
41struct bfi_mhdr {
42 u8 msg_class; /*!< @ref enum bfi_mclass */
43 u8 msg_id; /*!< msg opcode with in the class */
44 union {
45 struct {
46 u8 rsvd;
47 u8 lpu_id; /*!< msg destination */
48 } h2i;
49 u16 i2htok; /*!< token in msgs to host */
50 } mtag;
51};
52
53#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
54 (_mh).msg_class = (_mc); \
55 (_mh).msg_id = (_op); \
56 (_mh).mtag.h2i.lpu_id = (_lpuid); \
57} while (0)
58
59#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
60 (_mh).msg_class = (_mc); \
61 (_mh).msg_id = (_op); \
62 (_mh).mtag.i2htok = (_i2htok); \
63} while (0)
64
65/*
66 * Message opcodes: 0-127 to firmware, 128-255 to host
67 */
68#define BFI_I2H_OPCODE_BASE 128
69#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
70
71/**
72 ****************************************************************************
73 *
74 * Scatter Gather Element and Page definition
75 *
76 ****************************************************************************
77 */
78
79#define BFI_SGE_INLINE 1
80#define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1)
81
82/**
83 * SG Flags
84 */
85enum {
86 BFI_SGE_DATA = 0, /*!< data address, not last */
87 BFI_SGE_DATA_CPL = 1, /*!< data addr, last in current page */
88 BFI_SGE_DATA_LAST = 3, /*!< data address, last */
89 BFI_SGE_LINK = 2, /*!< link address */
90 BFI_SGE_PGDLEN = 2, /*!< cumulative data length for page */
91};
92
93/**
94 * DMA addresses
95 */
96union bfi_addr_u {
97 struct {
98 u32 addr_lo;
99 u32 addr_hi;
100 } a32;
101};
102
103/**
104 * Scatter Gather Element
105 */
106struct bfi_sge {
107#ifdef __BIGENDIAN
108 u32 flags:2,
109 rsvd:2,
110 sg_len:28;
111#else
112 u32 sg_len:28,
113 rsvd:2,
114 flags:2;
115#endif
116 union bfi_addr_u sga;
117};
118
119/**
120 * Scatter Gather Page
121 */
122#define BFI_SGPG_DATA_SGES 7
123#define BFI_SGPG_SGES_MAX (BFI_SGPG_DATA_SGES + 1)
124#define BFI_SGPG_RSVD_WD_LEN 8
125struct bfi_sgpg {
126 struct bfi_sge sges[BFI_SGPG_SGES_MAX];
127 u32 rsvd[BFI_SGPG_RSVD_WD_LEN];
128};
129
130/*
131 * Large Message structure - 128 Bytes size Msgs
132 */
133#define BFI_LMSG_SZ 128
134#define BFI_LMSG_PL_WSZ \
135 ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
136
137struct bfi_msg {
138 struct bfi_mhdr mhdr;
139 u32 pl[BFI_LMSG_PL_WSZ];
140};
141
142/**
143 * Mailbox message structure
144 */
145#define BFI_MBMSG_SZ 7
146struct bfi_mbmsg {
147 struct bfi_mhdr mh;
148 u32 pl[BFI_MBMSG_SZ];
149};
150
151/**
152 * Message Classes
153 */
154enum bfi_mclass {
155 BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
156 BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
157 BFI_MC_FLASH = 3, /*!< Flash message class */
158 BFI_MC_CEE = 4, /*!< CEE */
159 BFI_MC_FCPORT = 5, /*!< FC port */
160 BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */
161 BFI_MC_LL = 7, /*!< Link Layer */
162 BFI_MC_UF = 8, /*!< Unsolicited frame receive */
163 BFI_MC_FCXP = 9, /*!< FC Transport */
164 BFI_MC_LPS = 10, /*!< lport fc login services */
165 BFI_MC_RPORT = 11, /*!< Remote port */
166 BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */
167 BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */
168 BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */
169 BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */
170 BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */
171 BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */
172 BFI_MC_TSKIM = 18, /*!< Initiator Task management */
173 BFI_MC_SBOOT = 19, /*!< SAN boot services */
174 BFI_MC_IPFC = 20, /*!< IP over FC Msgs */
175 BFI_MC_PORT = 21, /*!< Physical port */
176 BFI_MC_SFP = 22, /*!< SFP module */
177 BFI_MC_MSGQ = 23, /*!< MSGQ */
178 BFI_MC_ENET = 24, /*!< ENET commands/responses */
179 BFI_MC_MAX = 32
180};
181
182#define BFI_IOC_MAX_CQS 4
183#define BFI_IOC_MAX_CQS_ASIC 8
184#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
185
186#define BFI_BOOT_TYPE_OFF 8
187#define BFI_BOOT_PARAM_OFF 12
188
189#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
190#define BFI_BOOT_TYPE_FLASH 1
191#define BFI_BOOT_TYPE_MEMTEST 2
192
193#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
194#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
195
196/**
197 *----------------------------------------------------------------------
198 * IOC
199 *----------------------------------------------------------------------
200 */
201
202enum bfi_ioc_h2i_msgs {
203 BFI_IOC_H2I_ENABLE_REQ = 1,
204 BFI_IOC_H2I_DISABLE_REQ = 2,
205 BFI_IOC_H2I_GETATTR_REQ = 3,
206 BFI_IOC_H2I_DBG_SYNC = 4,
207 BFI_IOC_H2I_DBG_DUMP = 5,
208};
209
210enum bfi_ioc_i2h_msgs {
211 BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
212 BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
213 BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
214 BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
215 BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
216};
217
218/**
219 * BFI_IOC_H2I_GETATTR_REQ message
220 */
221struct bfi_ioc_getattr_req {
222 struct bfi_mhdr mh;
223 union bfi_addr_u attr_addr;
224};
225
226struct bfi_ioc_attr {
227 u64 mfg_pwwn; /*!< Mfg port wwn */
228 u64 mfg_nwwn; /*!< Mfg node wwn */
229 mac_t mfg_mac; /*!< Mfg mac */
230 u16 rsvd_a;
231 u64 pwwn;
232 u64 nwwn;
233 mac_t mac; /*!< PBC or Mfg mac */
234 u16 rsvd_b;
235 mac_t fcoe_mac;
236 u16 rsvd_c;
237 char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
238 u8 pcie_gen;
239 u8 pcie_lanes_orig;
240 u8 pcie_lanes;
241 u8 rx_bbcredit; /*!< receive buffer credits */
242 u32 adapter_prop; /*!< adapter properties */
243 u16 maxfrsize; /*!< max receive frame size */
244 char asic_rev;
245 u8 rsvd_d;
246 char fw_version[BFA_VERSION_LEN];
247 char optrom_version[BFA_VERSION_LEN];
248 struct bfa_mfg_vpd vpd;
249 u32 card_type; /*!< card type */
250};
251
252/**
253 * BFI_IOC_I2H_GETATTR_REPLY message
254 */
255struct bfi_ioc_getattr_reply {
256 struct bfi_mhdr mh; /*!< Common msg header */
257 u8 status; /*!< cfg reply status */
258 u8 rsvd[3];
259};
260
261/**
262 * Firmware memory page offsets
263 */
264#define BFI_IOC_SMEM_PG0_CB (0x40)
265#define BFI_IOC_SMEM_PG0_CT (0x180)
266
267/**
268 * Firmware statistic offset
269 */
270#define BFI_IOC_FWSTATS_OFF (0x6B40)
271#define BFI_IOC_FWSTATS_SZ (4096)
272
273/**
274 * Firmware trace offset
275 */
276#define BFI_IOC_TRC_OFF (0x4b00)
277#define BFI_IOC_TRC_ENTS 256
278
279#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
280#define BFI_IOC_MD5SUM_SZ 4
281struct bfi_ioc_image_hdr {
282 u32 signature; /*!< constant signature */
283 u32 rsvd_a;
284 u32 exec; /*!< exec vector */
285 u32 param; /*!< parameters */
286 u32 rsvd_b[4];
287 u32 md5sum[BFI_IOC_MD5SUM_SZ];
288};
289
290/**
291 * BFI_IOC_I2H_READY_EVENT message
292 */
293struct bfi_ioc_rdy_event {
294 struct bfi_mhdr mh; /*!< common msg header */
295 u8 init_status; /*!< init event status */
296 u8 rsvd[3];
297};
298
299struct bfi_ioc_hbeat {
300 struct bfi_mhdr mh; /*!< common msg header */
301 u32 hb_count; /*!< current heart beat count */
302};
303
304/**
305 * IOC hardware/firmware state
306 */
307enum bfi_ioc_state {
308 BFI_IOC_UNINIT = 0, /*!< not initialized */
309 BFI_IOC_INITING = 1, /*!< h/w is being initialized */
310 BFI_IOC_HWINIT = 2, /*!< h/w is initialized */
311 BFI_IOC_CFG = 3, /*!< IOC configuration in progress */
312 BFI_IOC_OP = 4, /*!< IOC is operational */
313 BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */
314 BFI_IOC_DISABLED = 6, /*!< IOC is disabled */
315 BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */
316 BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */
317 BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */
318};
319
320#define BFI_IOC_ENDIAN_SIG 0x12345678
321
322enum {
323 BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */
324 BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */
325 BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */
326 BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */
327 BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */
328 BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */
329 BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */
330 BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */
331 BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */
332 BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */
333};
334
335#define BFI_ADAPTER_GETP(__prop, __adap_prop) \
336 (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \
337 BFI_ADAPTER_ ## __prop ## _SH)
338#define BFI_ADAPTER_SETP(__prop, __val) \
339 ((__val) << BFI_ADAPTER_ ## __prop ## _SH)
340#define BFI_ADAPTER_IS_PROTO(__adap_type) \
341 ((__adap_type) & BFI_ADAPTER_PROTO)
342#define BFI_ADAPTER_IS_TTV(__adap_type) \
343 ((__adap_type) & BFI_ADAPTER_TTV)
344#define BFI_ADAPTER_IS_UNSUPP(__adap_type) \
345 ((__adap_type) & BFI_ADAPTER_UNSUPP)
346#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \
347 ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
348 BFI_ADAPTER_UNSUPP))
349
350/**
351 * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
352 */
353struct bfi_ioc_ctrl_req {
354 struct bfi_mhdr mh;
355 u8 ioc_class;
356 u8 rsvd[3];
357 u32 tv_sec;
358};
359
360/**
361 * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
362 */
363struct bfi_ioc_ctrl_reply {
364 struct bfi_mhdr mh; /*!< Common msg header */
365 u8 status; /*!< enable/disable status */
366 u8 rsvd[3];
367};
368
369#define BFI_IOC_MSGSZ 8
370/**
371 * H2I Messages
372 */
373union bfi_ioc_h2i_msg_u {
374 struct bfi_mhdr mh;
375 struct bfi_ioc_ctrl_req enable_req;
376 struct bfi_ioc_ctrl_req disable_req;
377 struct bfi_ioc_getattr_req getattr_req;
378 u32 mboxmsg[BFI_IOC_MSGSZ];
379};
380
381/**
382 * I2H Messages
383 */
384union bfi_ioc_i2h_msg_u {
385 struct bfi_mhdr mh;
386 struct bfi_ioc_rdy_event rdy_event;
387 u32 mboxmsg[BFI_IOC_MSGSZ];
388};
389
390#pragma pack()
391
392#endif /* __BFI_H__ */
diff --git a/drivers/net/bna/bfi_cna.h b/drivers/net/bna/bfi_cna.h
new file mode 100644
index 000000000000..4eecabea397b
--- /dev/null
+++ b/drivers/net/bna/bfi_cna.h
@@ -0,0 +1,199 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFI_CNA_H__
19#define __BFI_CNA_H__
20
21#include "bfi.h"
22#include "bfa_defs_cna.h"
23
24#pragma pack(1)
25
26enum bfi_port_h2i {
27 BFI_PORT_H2I_ENABLE_REQ = (1),
28 BFI_PORT_H2I_DISABLE_REQ = (2),
29 BFI_PORT_H2I_GET_STATS_REQ = (3),
30 BFI_PORT_H2I_CLEAR_STATS_REQ = (4),
31};
32
33enum bfi_port_i2h {
34 BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1),
35 BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2),
36 BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3),
37 BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
38};
39
40/**
41 * Generic REQ type
42 */
43struct bfi_port_generic_req {
44 struct bfi_mhdr mh; /*!< msg header */
45 u32 msgtag; /*!< msgtag for reply */
46 u32 rsvd;
47};
48
49/**
50 * Generic RSP type
51 */
52struct bfi_port_generic_rsp {
53 struct bfi_mhdr mh; /*!< common msg header */
54 u8 status; /*!< port enable status */
55 u8 rsvd[3];
56 u32 msgtag; /*!< msgtag for reply */
57};
58
59/**
60 * @todo
61 * BFI_PORT_H2I_ENABLE_REQ
62 */
63
64/**
65 * @todo
66 * BFI_PORT_I2H_ENABLE_RSP
67 */
68
69/**
70 * BFI_PORT_H2I_DISABLE_REQ
71 */
72
73/**
74 * BFI_PORT_I2H_DISABLE_RSP
75 */
76
77/**
78 * BFI_PORT_H2I_GET_STATS_REQ
79 */
80struct bfi_port_get_stats_req {
81 struct bfi_mhdr mh; /*!< common msg header */
82 union bfi_addr_u dma_addr;
83};
84
85/**
86 * BFI_PORT_I2H_GET_STATS_RSP
87 */
88
89/**
90 * BFI_PORT_H2I_CLEAR_STATS_REQ
91 */
92
93/**
94 * BFI_PORT_I2H_CLEAR_STATS_RSP
95 */
96
97union bfi_port_h2i_msg_u {
98 struct bfi_mhdr mh;
99 struct bfi_port_generic_req enable_req;
100 struct bfi_port_generic_req disable_req;
101 struct bfi_port_get_stats_req getstats_req;
102 struct bfi_port_generic_req clearstats_req;
103};
104
105union bfi_port_i2h_msg_u {
106 struct bfi_mhdr mh;
107 struct bfi_port_generic_rsp enable_rsp;
108 struct bfi_port_generic_rsp disable_rsp;
109 struct bfi_port_generic_rsp getstats_rsp;
110 struct bfi_port_generic_rsp clearstats_rsp;
111};
112
113/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */
114enum bfi_cee_h2i_msgs {
115 BFI_CEE_H2I_GET_CFG_REQ = 1,
116 BFI_CEE_H2I_RESET_STATS = 2,
117 BFI_CEE_H2I_GET_STATS_REQ = 3,
118};
119
120/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */
121enum bfi_cee_i2h_msgs {
122 BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1),
123 BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2),
124 BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3),
125};
126
127/* Data structures */
128
129/*
130 * @brief H2I command structure for resetting the stats.
131 * BFI_CEE_H2I_RESET_STATS
132 */
133struct bfi_lldp_reset_stats {
134 struct bfi_mhdr mh;
135};
136
137/*
138 * @brief H2I command structure for resetting the stats.
139 * BFI_CEE_H2I_RESET_STATS
140 */
141struct bfi_cee_reset_stats {
142 struct bfi_mhdr mh;
143};
144
145/*
146 * @brief get configuration command from host
147 * BFI_CEE_H2I_GET_CFG_REQ
148 */
149struct bfi_cee_get_req {
150 struct bfi_mhdr mh;
151 union bfi_addr_u dma_addr;
152};
153
154/*
155 * @brief reply message from firmware
156 * BFI_CEE_I2H_GET_CFG_RSP
157 */
158struct bfi_cee_get_rsp {
159 struct bfi_mhdr mh;
160 u8 cmd_status;
161 u8 rsvd[3];
162};
163
164/*
165 * @brief get configuration command from host
166 * BFI_CEE_H2I_GET_STATS_REQ
167 */
168struct bfi_cee_stats_req {
169 struct bfi_mhdr mh;
170 union bfi_addr_u dma_addr;
171};
172
173/*
174 * @brief reply message from firmware
175 * BFI_CEE_I2H_GET_STATS_RSP
176 */
177struct bfi_cee_stats_rsp {
178 struct bfi_mhdr mh;
179 u8 cmd_status;
180 u8 rsvd[3];
181};
182
183/* @brief mailbox command structures from host to firmware */
184union bfi_cee_h2i_msg_u {
185 struct bfi_mhdr mh;
186 struct bfi_cee_get_req get_req;
187 struct bfi_cee_stats_req stats_req;
188};
189
190/* @brief mailbox message structures from firmware to host */
191union bfi_cee_i2h_msg_u {
192 struct bfi_mhdr mh;
193 struct bfi_cee_get_rsp get_rsp;
194 struct bfi_cee_stats_rsp stats_rsp;
195};
196
197#pragma pack()
198
199#endif /* __BFI_CNA_H__ */
diff --git a/drivers/net/bna/bfi_ctreg.h b/drivers/net/bna/bfi_ctreg.h
new file mode 100644
index 000000000000..404ea351d4a1
--- /dev/null
+++ b/drivers/net/bna/bfi_ctreg.h
@@ -0,0 +1,637 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19/*
20 * bfi_ctreg.h catapult host block register definitions
21 *
22 * !!! Do not edit. Auto generated. !!!
23 */
24
25#ifndef __BFI_CTREG_H__
26#define __BFI_CTREG_H__
27
28#define HOSTFN0_LPU_MBOX0_0 0x00019200
29#define HOSTFN1_LPU_MBOX0_8 0x00019260
30#define LPU_HOSTFN0_MBOX0_0 0x00019280
31#define LPU_HOSTFN1_MBOX0_8 0x000192e0
32#define HOSTFN2_LPU_MBOX0_0 0x00019400
33#define HOSTFN3_LPU_MBOX0_8 0x00019460
34#define LPU_HOSTFN2_MBOX0_0 0x00019480
35#define LPU_HOSTFN3_MBOX0_8 0x000194e0
36#define HOSTFN0_INT_STATUS 0x00014000
37#define __HOSTFN0_HALT_OCCURRED 0x01000000
38#define __HOSTFN0_INT_STATUS_LVL_MK 0x00f00000
39#define __HOSTFN0_INT_STATUS_LVL_SH 20
40#define __HOSTFN0_INT_STATUS_LVL(_v) ((_v) << __HOSTFN0_INT_STATUS_LVL_SH)
41#define __HOSTFN0_INT_STATUS_P_MK 0x000f0000
42#define __HOSTFN0_INT_STATUS_P_SH 16
43#define __HOSTFN0_INT_STATUS_P(_v) ((_v) << __HOSTFN0_INT_STATUS_P_SH)
44#define __HOSTFN0_INT_STATUS_F 0x0000ffff
45#define HOSTFN0_INT_MSK 0x00014004
46#define HOST_PAGE_NUM_FN0 0x00014008
47#define __HOST_PAGE_NUM_FN 0x000001ff
48#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c
49#define __MSIX_ERR_INDEX_FN 0x000001ff
50#define HOSTFN1_INT_STATUS 0x00014100
51#define __HOSTFN1_HALT_OCCURRED 0x01000000
52#define __HOSTFN1_INT_STATUS_LVL_MK 0x00f00000
53#define __HOSTFN1_INT_STATUS_LVL_SH 20
54#define __HOSTFN1_INT_STATUS_LVL(_v) ((_v) << __HOSTFN1_INT_STATUS_LVL_SH)
55#define __HOSTFN1_INT_STATUS_P_MK 0x000f0000
56#define __HOSTFN1_INT_STATUS_P_SH 16
57#define __HOSTFN1_INT_STATUS_P(_v) ((_v) << __HOSTFN1_INT_STATUS_P_SH)
58#define __HOSTFN1_INT_STATUS_F 0x0000ffff
59#define HOSTFN1_INT_MSK 0x00014104
60#define HOST_PAGE_NUM_FN1 0x00014108
61#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c
62#define APP_PLL_425_CTL_REG 0x00014204
63#define __P_425_PLL_LOCK 0x80000000
64#define __APP_PLL_425_SRAM_USE_100MHZ 0x00100000
65#define __APP_PLL_425_RESET_TIMER_MK 0x000e0000
66#define __APP_PLL_425_RESET_TIMER_SH 17
67#define __APP_PLL_425_RESET_TIMER(_v) ((_v) << __APP_PLL_425_RESET_TIMER_SH)
68#define __APP_PLL_425_LOGIC_SOFT_RESET 0x00010000
69#define __APP_PLL_425_CNTLMT0_1_MK 0x0000c000
70#define __APP_PLL_425_CNTLMT0_1_SH 14
71#define __APP_PLL_425_CNTLMT0_1(_v) ((_v) << __APP_PLL_425_CNTLMT0_1_SH)
72#define __APP_PLL_425_JITLMT0_1_MK 0x00003000
73#define __APP_PLL_425_JITLMT0_1_SH 12
74#define __APP_PLL_425_JITLMT0_1(_v) ((_v) << __APP_PLL_425_JITLMT0_1_SH)
75#define __APP_PLL_425_HREF 0x00000800
76#define __APP_PLL_425_HDIV 0x00000400
77#define __APP_PLL_425_P0_1_MK 0x00000300
78#define __APP_PLL_425_P0_1_SH 8
79#define __APP_PLL_425_P0_1(_v) ((_v) << __APP_PLL_425_P0_1_SH)
80#define __APP_PLL_425_Z0_2_MK 0x000000e0
81#define __APP_PLL_425_Z0_2_SH 5
82#define __APP_PLL_425_Z0_2(_v) ((_v) << __APP_PLL_425_Z0_2_SH)
83#define __APP_PLL_425_RSEL200500 0x00000010
84#define __APP_PLL_425_ENARST 0x00000008
85#define __APP_PLL_425_BYPASS 0x00000004
86#define __APP_PLL_425_LRESETN 0x00000002
87#define __APP_PLL_425_ENABLE 0x00000001
88#define APP_PLL_312_CTL_REG 0x00014208
89#define __P_312_PLL_LOCK 0x80000000
90#define __ENABLE_MAC_AHB_1 0x00800000
91#define __ENABLE_MAC_AHB_0 0x00400000
92#define __ENABLE_MAC_1 0x00200000
93#define __ENABLE_MAC_0 0x00100000
94#define __APP_PLL_312_RESET_TIMER_MK 0x000e0000
95#define __APP_PLL_312_RESET_TIMER_SH 17
96#define __APP_PLL_312_RESET_TIMER(_v) ((_v) << __APP_PLL_312_RESET_TIMER_SH)
97#define __APP_PLL_312_LOGIC_SOFT_RESET 0x00010000
98#define __APP_PLL_312_CNTLMT0_1_MK 0x0000c000
99#define __APP_PLL_312_CNTLMT0_1_SH 14
100#define __APP_PLL_312_CNTLMT0_1(_v) ((_v) << __APP_PLL_312_CNTLMT0_1_SH)
101#define __APP_PLL_312_JITLMT0_1_MK 0x00003000
102#define __APP_PLL_312_JITLMT0_1_SH 12
103#define __APP_PLL_312_JITLMT0_1(_v) ((_v) << __APP_PLL_312_JITLMT0_1_SH)
104#define __APP_PLL_312_HREF 0x00000800
105#define __APP_PLL_312_HDIV 0x00000400
106#define __APP_PLL_312_P0_1_MK 0x00000300
107#define __APP_PLL_312_P0_1_SH 8
108#define __APP_PLL_312_P0_1(_v) ((_v) << __APP_PLL_312_P0_1_SH)
109#define __APP_PLL_312_Z0_2_MK 0x000000e0
110#define __APP_PLL_312_Z0_2_SH 5
111#define __APP_PLL_312_Z0_2(_v) ((_v) << __APP_PLL_312_Z0_2_SH)
112#define __APP_PLL_312_RSEL200500 0x00000010
113#define __APP_PLL_312_ENARST 0x00000008
114#define __APP_PLL_312_BYPASS 0x00000004
115#define __APP_PLL_312_LRESETN 0x00000002
116#define __APP_PLL_312_ENABLE 0x00000001
117#define MBIST_CTL_REG 0x00014220
118#define __EDRAM_BISTR_START 0x00000004
119#define __MBIST_RESET 0x00000002
120#define __MBIST_START 0x00000001
121#define MBIST_STAT_REG 0x00014224
122#define __EDRAM_BISTR_STATUS 0x00000008
123#define __EDRAM_BISTR_DONE 0x00000004
124#define __MEM_BIT_STATUS 0x00000002
125#define __MBIST_DONE 0x00000001
126#define HOST_SEM0_REG 0x00014230
127#define __HOST_SEMAPHORE 0x00000001
128#define HOST_SEM1_REG 0x00014234
129#define HOST_SEM2_REG 0x00014238
130#define HOST_SEM3_REG 0x0001423c
131#define HOST_SEM0_INFO_REG 0x00014240
132#define HOST_SEM1_INFO_REG 0x00014244
133#define HOST_SEM2_INFO_REG 0x00014248
134#define HOST_SEM3_INFO_REG 0x0001424c
135#define ETH_MAC_SER_REG 0x00014288
136#define __APP_EMS_CKBUFAMPIN 0x00000020
137#define __APP_EMS_REFCLKSEL 0x00000010
138#define __APP_EMS_CMLCKSEL 0x00000008
139#define __APP_EMS_REFCKBUFEN2 0x00000004
140#define __APP_EMS_REFCKBUFEN1 0x00000002
141#define __APP_EMS_CHANNEL_SEL 0x00000001
142#define HOSTFN2_INT_STATUS 0x00014300
143#define __HOSTFN2_HALT_OCCURRED 0x01000000
144#define __HOSTFN2_INT_STATUS_LVL_MK 0x00f00000
145#define __HOSTFN2_INT_STATUS_LVL_SH 20
146#define __HOSTFN2_INT_STATUS_LVL(_v) ((_v) << __HOSTFN2_INT_STATUS_LVL_SH)
147#define __HOSTFN2_INT_STATUS_P_MK 0x000f0000
148#define __HOSTFN2_INT_STATUS_P_SH 16
149#define __HOSTFN2_INT_STATUS_P(_v) ((_v) << __HOSTFN2_INT_STATUS_P_SH)
150#define __HOSTFN2_INT_STATUS_F 0x0000ffff
151#define HOSTFN2_INT_MSK 0x00014304
152#define HOST_PAGE_NUM_FN2 0x00014308
153#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c
154#define HOSTFN3_INT_STATUS 0x00014400
155#define __HALT_OCCURRED 0x01000000
156#define __HOSTFN3_INT_STATUS_LVL_MK 0x00f00000
157#define __HOSTFN3_INT_STATUS_LVL_SH 20
158#define __HOSTFN3_INT_STATUS_LVL(_v) ((_v) << __HOSTFN3_INT_STATUS_LVL_SH)
159#define __HOSTFN3_INT_STATUS_P_MK 0x000f0000
160#define __HOSTFN3_INT_STATUS_P_SH 16
161#define __HOSTFN3_INT_STATUS_P(_v) ((_v) << __HOSTFN3_INT_STATUS_P_SH)
162#define __HOSTFN3_INT_STATUS_F 0x0000ffff
163#define HOSTFN3_INT_MSK 0x00014404
164#define HOST_PAGE_NUM_FN3 0x00014408
165#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c
166#define FNC_ID_REG 0x00014600
167#define __FUNCTION_NUMBER 0x00000007
168#define FNC_PERS_REG 0x00014604
169#define __F3_FUNCTION_ACTIVE 0x80000000
170#define __F3_FUNCTION_MODE 0x40000000
171#define __F3_PORT_MAP_MK 0x30000000
172#define __F3_PORT_MAP_SH 28
173#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
174#define __F3_VM_MODE 0x08000000
175#define __F3_INTX_STATUS_MK 0x07000000
176#define __F3_INTX_STATUS_SH 24
177#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
178#define __F2_FUNCTION_ACTIVE 0x00800000
179#define __F2_FUNCTION_MODE 0x00400000
180#define __F2_PORT_MAP_MK 0x00300000
181#define __F2_PORT_MAP_SH 20
182#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
183#define __F2_VM_MODE 0x00080000
184#define __F2_INTX_STATUS_MK 0x00070000
185#define __F2_INTX_STATUS_SH 16
186#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
187#define __F1_FUNCTION_ACTIVE 0x00008000
188#define __F1_FUNCTION_MODE 0x00004000
189#define __F1_PORT_MAP_MK 0x00003000
190#define __F1_PORT_MAP_SH 12
191#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
192#define __F1_VM_MODE 0x00000800
193#define __F1_INTX_STATUS_MK 0x00000700
194#define __F1_INTX_STATUS_SH 8
195#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
196#define __F0_FUNCTION_ACTIVE 0x00000080
197#define __F0_FUNCTION_MODE 0x00000040
198#define __F0_PORT_MAP_MK 0x00000030
199#define __F0_PORT_MAP_SH 4
200#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
201#define __F0_VM_MODE 0x00000008
202#define __F0_INTX_STATUS 0x00000007
203enum {
204 __F0_INTX_STATUS_MSIX = 0x0,
205 __F0_INTX_STATUS_INTA = 0x1,
206 __F0_INTX_STATUS_INTB = 0x2,
207 __F0_INTX_STATUS_INTC = 0x3,
208 __F0_INTX_STATUS_INTD = 0x4,
209};
210#define OP_MODE 0x0001460c
211#define __APP_ETH_CLK_LOWSPEED 0x00000004
212#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
213#define __GLOBAL_FCOE_MODE 0x00000001
214#define HOST_SEM4_REG 0x00014610
215#define HOST_SEM5_REG 0x00014614
216#define HOST_SEM6_REG 0x00014618
217#define HOST_SEM7_REG 0x0001461c
218#define HOST_SEM4_INFO_REG 0x00014620
219#define HOST_SEM5_INFO_REG 0x00014624
220#define HOST_SEM6_INFO_REG 0x00014628
221#define HOST_SEM7_INFO_REG 0x0001462c
222#define HOSTFN0_LPU0_MBOX0_CMD_STAT 0x00019000
223#define __HOSTFN0_LPU0_MBOX0_INFO_MK 0xfffffffe
224#define __HOSTFN0_LPU0_MBOX0_INFO_SH 1
225#define __HOSTFN0_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU0_MBOX0_INFO_SH)
226#define __HOSTFN0_LPU0_MBOX0_CMD_STATUS 0x00000001
227#define HOSTFN0_LPU1_MBOX0_CMD_STAT 0x00019004
228#define __HOSTFN0_LPU1_MBOX0_INFO_MK 0xfffffffe
229#define __HOSTFN0_LPU1_MBOX0_INFO_SH 1
230#define __HOSTFN0_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN0_LPU1_MBOX0_INFO_SH)
231#define __HOSTFN0_LPU1_MBOX0_CMD_STATUS 0x00000001
232#define LPU0_HOSTFN0_MBOX0_CMD_STAT 0x00019008
233#define __LPU0_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
234#define __LPU0_HOSTFN0_MBOX0_INFO_SH 1
235#define __LPU0_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN0_MBOX0_INFO_SH)
236#define __LPU0_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
237#define LPU1_HOSTFN0_MBOX0_CMD_STAT 0x0001900c
238#define __LPU1_HOSTFN0_MBOX0_INFO_MK 0xfffffffe
239#define __LPU1_HOSTFN0_MBOX0_INFO_SH 1
240#define __LPU1_HOSTFN0_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN0_MBOX0_INFO_SH)
241#define __LPU1_HOSTFN0_MBOX0_CMD_STATUS 0x00000001
242#define HOSTFN1_LPU0_MBOX0_CMD_STAT 0x00019010
243#define __HOSTFN1_LPU0_MBOX0_INFO_MK 0xfffffffe
244#define __HOSTFN1_LPU0_MBOX0_INFO_SH 1
245#define __HOSTFN1_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU0_MBOX0_INFO_SH)
246#define __HOSTFN1_LPU0_MBOX0_CMD_STATUS 0x00000001
247#define HOSTFN1_LPU1_MBOX0_CMD_STAT 0x00019014
248#define __HOSTFN1_LPU1_MBOX0_INFO_MK 0xfffffffe
249#define __HOSTFN1_LPU1_MBOX0_INFO_SH 1
250#define __HOSTFN1_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN1_LPU1_MBOX0_INFO_SH)
251#define __HOSTFN1_LPU1_MBOX0_CMD_STATUS 0x00000001
252#define LPU0_HOSTFN1_MBOX0_CMD_STAT 0x00019018
253#define __LPU0_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
254#define __LPU0_HOSTFN1_MBOX0_INFO_SH 1
255#define __LPU0_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN1_MBOX0_INFO_SH)
256#define __LPU0_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
257#define LPU1_HOSTFN1_MBOX0_CMD_STAT 0x0001901c
258#define __LPU1_HOSTFN1_MBOX0_INFO_MK 0xfffffffe
259#define __LPU1_HOSTFN1_MBOX0_INFO_SH 1
260#define __LPU1_HOSTFN1_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN1_MBOX0_INFO_SH)
261#define __LPU1_HOSTFN1_MBOX0_CMD_STATUS 0x00000001
262#define HOSTFN2_LPU0_MBOX0_CMD_STAT 0x00019150
263#define __HOSTFN2_LPU0_MBOX0_INFO_MK 0xfffffffe
264#define __HOSTFN2_LPU0_MBOX0_INFO_SH 1
265#define __HOSTFN2_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU0_MBOX0_INFO_SH)
266#define __HOSTFN2_LPU0_MBOX0_CMD_STATUS 0x00000001
267#define HOSTFN2_LPU1_MBOX0_CMD_STAT 0x00019154
268#define __HOSTFN2_LPU1_MBOX0_INFO_MK 0xfffffffe
269#define __HOSTFN2_LPU1_MBOX0_INFO_SH 1
270#define __HOSTFN2_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN2_LPU1_MBOX0_INFO_SH)
271#define __HOSTFN2_LPU1_MBOX0BOX0_CMD_STATUS 0x00000001
272#define LPU0_HOSTFN2_MBOX0_CMD_STAT 0x00019158
273#define __LPU0_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
274#define __LPU0_HOSTFN2_MBOX0_INFO_SH 1
275#define __LPU0_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN2_MBOX0_INFO_SH)
276#define __LPU0_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
277#define LPU1_HOSTFN2_MBOX0_CMD_STAT 0x0001915c
278#define __LPU1_HOSTFN2_MBOX0_INFO_MK 0xfffffffe
279#define __LPU1_HOSTFN2_MBOX0_INFO_SH 1
280#define __LPU1_HOSTFN2_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN2_MBOX0_INFO_SH)
281#define __LPU1_HOSTFN2_MBOX0_CMD_STATUS 0x00000001
282#define HOSTFN3_LPU0_MBOX0_CMD_STAT 0x00019160
283#define __HOSTFN3_LPU0_MBOX0_INFO_MK 0xfffffffe
284#define __HOSTFN3_LPU0_MBOX0_INFO_SH 1
285#define __HOSTFN3_LPU0_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU0_MBOX0_INFO_SH)
286#define __HOSTFN3_LPU0_MBOX0_CMD_STATUS 0x00000001
287#define HOSTFN3_LPU1_MBOX0_CMD_STAT 0x00019164
288#define __HOSTFN3_LPU1_MBOX0_INFO_MK 0xfffffffe
289#define __HOSTFN3_LPU1_MBOX0_INFO_SH 1
290#define __HOSTFN3_LPU1_MBOX0_INFO(_v) ((_v) << __HOSTFN3_LPU1_MBOX0_INFO_SH)
291#define __HOSTFN3_LPU1_MBOX0_CMD_STATUS 0x00000001
292#define LPU0_HOSTFN3_MBOX0_CMD_STAT 0x00019168
293#define __LPU0_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
294#define __LPU0_HOSTFN3_MBOX0_INFO_SH 1
295#define __LPU0_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU0_HOSTFN3_MBOX0_INFO_SH)
296#define __LPU0_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
297#define LPU1_HOSTFN3_MBOX0_CMD_STAT 0x0001916c
298#define __LPU1_HOSTFN3_MBOX0_INFO_MK 0xfffffffe
299#define __LPU1_HOSTFN3_MBOX0_INFO_SH 1
300#define __LPU1_HOSTFN3_MBOX0_INFO(_v) ((_v) << __LPU1_HOSTFN3_MBOX0_INFO_SH)
301#define __LPU1_HOSTFN3_MBOX0_CMD_STATUS 0x00000001
302#define FW_INIT_HALT_P0 0x000191ac
303#define __FW_INIT_HALT_P 0x00000001
304#define FW_INIT_HALT_P1 0x000191bc
305#define CPE_PI_PTR_Q0 0x00038000
306#define __CPE_PI_UNUSED_MK 0xffff0000
307#define __CPE_PI_UNUSED_SH 16
308#define __CPE_PI_UNUSED(_v) ((_v) << __CPE_PI_UNUSED_SH)
309#define __CPE_PI_PTR 0x0000ffff
310#define CPE_PI_PTR_Q1 0x00038040
311#define CPE_CI_PTR_Q0 0x00038004
312#define __CPE_CI_UNUSED_MK 0xffff0000
313#define __CPE_CI_UNUSED_SH 16
314#define __CPE_CI_UNUSED(_v) ((_v) << __CPE_CI_UNUSED_SH)
315#define __CPE_CI_PTR 0x0000ffff
316#define CPE_CI_PTR_Q1 0x00038044
317#define CPE_DEPTH_Q0 0x00038008
318#define __CPE_DEPTH_UNUSED_MK 0xf8000000
319#define __CPE_DEPTH_UNUSED_SH 27
320#define __CPE_DEPTH_UNUSED(_v) ((_v) << __CPE_DEPTH_UNUSED_SH)
321#define __CPE_MSIX_VEC_INDEX_MK 0x07ff0000
322#define __CPE_MSIX_VEC_INDEX_SH 16
323#define __CPE_MSIX_VEC_INDEX(_v) ((_v) << __CPE_MSIX_VEC_INDEX_SH)
324#define __CPE_DEPTH 0x0000ffff
325#define CPE_DEPTH_Q1 0x00038048
326#define CPE_QCTRL_Q0 0x0003800c
327#define __CPE_CTRL_UNUSED30_MK 0xfc000000
328#define __CPE_CTRL_UNUSED30_SH 26
329#define __CPE_CTRL_UNUSED30(_v) ((_v) << __CPE_CTRL_UNUSED30_SH)
330#define __CPE_FUNC_INT_CTRL_MK 0x03000000
331#define __CPE_FUNC_INT_CTRL_SH 24
332#define __CPE_FUNC_INT_CTRL(_v) ((_v) << __CPE_FUNC_INT_CTRL_SH)
333enum {
334 __CPE_FUNC_INT_CTRL_DISABLE = 0x0,
335 __CPE_FUNC_INT_CTRL_F2NF = 0x1,
336 __CPE_FUNC_INT_CTRL_3QUART = 0x2,
337 __CPE_FUNC_INT_CTRL_HALF = 0x3,
338};
339#define __CPE_CTRL_UNUSED20_MK 0x00f00000
340#define __CPE_CTRL_UNUSED20_SH 20
341#define __CPE_CTRL_UNUSED20(_v) ((_v) << __CPE_CTRL_UNUSED20_SH)
342#define __CPE_SCI_TH_MK 0x000f0000
343#define __CPE_SCI_TH_SH 16
344#define __CPE_SCI_TH(_v) ((_v) << __CPE_SCI_TH_SH)
345#define __CPE_CTRL_UNUSED10_MK 0x0000c000
346#define __CPE_CTRL_UNUSED10_SH 14
347#define __CPE_CTRL_UNUSED10(_v) ((_v) << __CPE_CTRL_UNUSED10_SH)
348#define __CPE_ACK_PENDING 0x00002000
349#define __CPE_CTRL_UNUSED40_MK 0x00001c00
350#define __CPE_CTRL_UNUSED40_SH 10
351#define __CPE_CTRL_UNUSED40(_v) ((_v) << __CPE_CTRL_UNUSED40_SH)
352#define __CPE_PCIEID_MK 0x00000300
353#define __CPE_PCIEID_SH 8
354#define __CPE_PCIEID(_v) ((_v) << __CPE_PCIEID_SH)
355#define __CPE_CTRL_UNUSED00_MK 0x000000fe
356#define __CPE_CTRL_UNUSED00_SH 1
357#define __CPE_CTRL_UNUSED00(_v) ((_v) << __CPE_CTRL_UNUSED00_SH)
358#define __CPE_ESIZE 0x00000001
359#define CPE_QCTRL_Q1 0x0003804c
360#define __CPE_CTRL_UNUSED31_MK 0xfc000000
361#define __CPE_CTRL_UNUSED31_SH 26
362#define __CPE_CTRL_UNUSED31(_v) ((_v) << __CPE_CTRL_UNUSED31_SH)
363#define __CPE_CTRL_UNUSED21_MK 0x00f00000
364#define __CPE_CTRL_UNUSED21_SH 20
365#define __CPE_CTRL_UNUSED21(_v) ((_v) << __CPE_CTRL_UNUSED21_SH)
366#define __CPE_CTRL_UNUSED11_MK 0x0000c000
367#define __CPE_CTRL_UNUSED11_SH 14
368#define __CPE_CTRL_UNUSED11(_v) ((_v) << __CPE_CTRL_UNUSED11_SH)
369#define __CPE_CTRL_UNUSED41_MK 0x00001c00
370#define __CPE_CTRL_UNUSED41_SH 10
371#define __CPE_CTRL_UNUSED41(_v) ((_v) << __CPE_CTRL_UNUSED41_SH)
372#define __CPE_CTRL_UNUSED01_MK 0x000000fe
373#define __CPE_CTRL_UNUSED01_SH 1
374#define __CPE_CTRL_UNUSED01(_v) ((_v) << __CPE_CTRL_UNUSED01_SH)
375#define RME_PI_PTR_Q0 0x00038020
376#define __LATENCY_TIME_STAMP_MK 0xffff0000
377#define __LATENCY_TIME_STAMP_SH 16
378#define __LATENCY_TIME_STAMP(_v) ((_v) << __LATENCY_TIME_STAMP_SH)
379#define __RME_PI_PTR 0x0000ffff
380#define RME_PI_PTR_Q1 0x00038060
381#define RME_CI_PTR_Q0 0x00038024
382#define __DELAY_TIME_STAMP_MK 0xffff0000
383#define __DELAY_TIME_STAMP_SH 16
384#define __DELAY_TIME_STAMP(_v) ((_v) << __DELAY_TIME_STAMP_SH)
385#define __RME_CI_PTR 0x0000ffff
386#define RME_CI_PTR_Q1 0x00038064
387#define RME_DEPTH_Q0 0x00038028
388#define __RME_DEPTH_UNUSED_MK 0xf8000000
389#define __RME_DEPTH_UNUSED_SH 27
390#define __RME_DEPTH_UNUSED(_v) ((_v) << __RME_DEPTH_UNUSED_SH)
391#define __RME_MSIX_VEC_INDEX_MK 0x07ff0000
392#define __RME_MSIX_VEC_INDEX_SH 16
393#define __RME_MSIX_VEC_INDEX(_v) ((_v) << __RME_MSIX_VEC_INDEX_SH)
394#define __RME_DEPTH 0x0000ffff
395#define RME_DEPTH_Q1 0x00038068
396#define RME_QCTRL_Q0 0x0003802c
397#define __RME_INT_LATENCY_TIMER_MK 0xff000000
398#define __RME_INT_LATENCY_TIMER_SH 24
399#define __RME_INT_LATENCY_TIMER(_v) ((_v) << __RME_INT_LATENCY_TIMER_SH)
400#define __RME_INT_DELAY_TIMER_MK 0x00ff0000
401#define __RME_INT_DELAY_TIMER_SH 16
402#define __RME_INT_DELAY_TIMER(_v) ((_v) << __RME_INT_DELAY_TIMER_SH)
403#define __RME_INT_DELAY_DISABLE 0x00008000
404#define __RME_DLY_DELAY_DISABLE 0x00004000
405#define __RME_ACK_PENDING 0x00002000
406#define __RME_FULL_INTERRUPT_DISABLE 0x00001000
407#define __RME_CTRL_UNUSED10_MK 0x00000c00
408#define __RME_CTRL_UNUSED10_SH 10
409#define __RME_CTRL_UNUSED10(_v) ((_v) << __RME_CTRL_UNUSED10_SH)
410#define __RME_PCIEID_MK 0x00000300
411#define __RME_PCIEID_SH 8
412#define __RME_PCIEID(_v) ((_v) << __RME_PCIEID_SH)
413#define __RME_CTRL_UNUSED00_MK 0x000000fe
414#define __RME_CTRL_UNUSED00_SH 1
415#define __RME_CTRL_UNUSED00(_v) ((_v) << __RME_CTRL_UNUSED00_SH)
416#define __RME_ESIZE 0x00000001
417#define RME_QCTRL_Q1 0x0003806c
418#define __RME_CTRL_UNUSED11_MK 0x00000c00
419#define __RME_CTRL_UNUSED11_SH 10
420#define __RME_CTRL_UNUSED11(_v) ((_v) << __RME_CTRL_UNUSED11_SH)
421#define __RME_CTRL_UNUSED01_MK 0x000000fe
422#define __RME_CTRL_UNUSED01_SH 1
423#define __RME_CTRL_UNUSED01(_v) ((_v) << __RME_CTRL_UNUSED01_SH)
424#define PSS_CTL_REG 0x00018800
425#define __PSS_I2C_CLK_DIV_MK 0x007f0000
426#define __PSS_I2C_CLK_DIV_SH 16
427#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
428#define __PSS_LMEM_INIT_DONE 0x00001000
429#define __PSS_LMEM_RESET 0x00000200
430#define __PSS_LMEM_INIT_EN 0x00000100
431#define __PSS_LPU1_RESET 0x00000002
432#define __PSS_LPU0_RESET 0x00000001
433#define PSS_ERR_STATUS_REG 0x00018810
434#define __PSS_LPU1_TCM_READ_ERR 0x00200000
435#define __PSS_LPU0_TCM_READ_ERR 0x00100000
436#define __PSS_LMEM5_CORR_ERR 0x00080000
437#define __PSS_LMEM4_CORR_ERR 0x00040000
438#define __PSS_LMEM3_CORR_ERR 0x00020000
439#define __PSS_LMEM2_CORR_ERR 0x00010000
440#define __PSS_LMEM1_CORR_ERR 0x00008000
441#define __PSS_LMEM0_CORR_ERR 0x00004000
442#define __PSS_LMEM5_UNCORR_ERR 0x00002000
443#define __PSS_LMEM4_UNCORR_ERR 0x00001000
444#define __PSS_LMEM3_UNCORR_ERR 0x00000800
445#define __PSS_LMEM2_UNCORR_ERR 0x00000400
446#define __PSS_LMEM1_UNCORR_ERR 0x00000200
447#define __PSS_LMEM0_UNCORR_ERR 0x00000100
448#define __PSS_BAL_PERR 0x00000080
449#define __PSS_DIP_IF_ERR 0x00000040
450#define __PSS_IOH_IF_ERR 0x00000020
451#define __PSS_TDS_IF_ERR 0x00000010
452#define __PSS_RDS_IF_ERR 0x00000008
453#define __PSS_SGM_IF_ERR 0x00000004
454#define __PSS_LPU1_RAM_ERR 0x00000002
455#define __PSS_LPU0_RAM_ERR 0x00000001
456#define ERR_SET_REG 0x00018818
457#define __PSS_ERR_STATUS_SET 0x003fffff
458#define PMM_1T_RESET_REG_P0 0x0002381c
459#define __PMM_1T_RESET_P 0x00000001
460#define PMM_1T_RESET_REG_P1 0x00023c1c
461#define HQM_QSET0_RXQ_DRBL_P0 0x00038000
462#define __RXQ0_ADD_VECTORS_P 0x80000000
463#define __RXQ0_STOP_P 0x40000000
464#define __RXQ0_PRD_PTR_P 0x0000ffff
465#define HQM_QSET1_RXQ_DRBL_P0 0x00038080
466#define __RXQ1_ADD_VECTORS_P 0x80000000
467#define __RXQ1_STOP_P 0x40000000
468#define __RXQ1_PRD_PTR_P 0x0000ffff
469#define HQM_QSET0_RXQ_DRBL_P1 0x0003c000
470#define HQM_QSET1_RXQ_DRBL_P1 0x0003c080
471#define HQM_QSET0_TXQ_DRBL_P0 0x00038020
472#define __TXQ0_ADD_VECTORS_P 0x80000000
473#define __TXQ0_STOP_P 0x40000000
474#define __TXQ0_PRD_PTR_P 0x0000ffff
475#define HQM_QSET1_TXQ_DRBL_P0 0x000380a0
476#define __TXQ1_ADD_VECTORS_P 0x80000000
477#define __TXQ1_STOP_P 0x40000000
478#define __TXQ1_PRD_PTR_P 0x0000ffff
479#define HQM_QSET0_TXQ_DRBL_P1 0x0003c020
480#define HQM_QSET1_TXQ_DRBL_P1 0x0003c0a0
481#define HQM_QSET0_IB_DRBL_1_P0 0x00038040
482#define __IB1_0_ACK_P 0x80000000
483#define __IB1_0_DISABLE_P 0x40000000
484#define __IB1_0_COALESCING_CFG_P_MK 0x00ff0000
485#define __IB1_0_COALESCING_CFG_P_SH 16
486#define __IB1_0_COALESCING_CFG_P(_v) ((_v) << __IB1_0_COALESCING_CFG_P_SH)
487#define __IB1_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
488#define HQM_QSET1_IB_DRBL_1_P0 0x000380c0
489#define __IB1_1_ACK_P 0x80000000
490#define __IB1_1_DISABLE_P 0x40000000
491#define __IB1_1_COALESCING_CFG_P_MK 0x00ff0000
492#define __IB1_1_COALESCING_CFG_P_SH 16
493#define __IB1_1_COALESCING_CFG_P(_v) ((_v) << __IB1_1_COALESCING_CFG_P_SH)
494#define __IB1_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
495#define HQM_QSET0_IB_DRBL_1_P1 0x0003c040
496#define HQM_QSET1_IB_DRBL_1_P1 0x0003c0c0
497#define HQM_QSET0_IB_DRBL_2_P0 0x00038060
498#define __IB2_0_ACK_P 0x80000000
499#define __IB2_0_DISABLE_P 0x40000000
500#define __IB2_0_COALESCING_CFG_P_MK 0x00ff0000
501#define __IB2_0_COALESCING_CFG_P_SH 16
502#define __IB2_0_COALESCING_CFG_P(_v) ((_v) << __IB2_0_COALESCING_CFG_P_SH)
503#define __IB2_0_NUM_OF_ACKED_EVENTS_P 0x0000ffff
504#define HQM_QSET1_IB_DRBL_2_P0 0x000380e0
505#define __IB2_1_ACK_P 0x80000000
506#define __IB2_1_DISABLE_P 0x40000000
507#define __IB2_1_COALESCING_CFG_P_MK 0x00ff0000
508#define __IB2_1_COALESCING_CFG_P_SH 16
509#define __IB2_1_COALESCING_CFG_P(_v) ((_v) << __IB2_1_COALESCING_CFG_P_SH)
510#define __IB2_1_NUM_OF_ACKED_EVENTS_P 0x0000ffff
511#define HQM_QSET0_IB_DRBL_2_P1 0x0003c060
512#define HQM_QSET1_IB_DRBL_2_P1 0x0003c0e0
513
514/*
515 * These definitions are either in error/missing in spec. Its auto-generated
516 * from hard coded values in regparse.pl.
517 */
518#define __EMPHPOST_AT_4G_MK_FIX 0x0000001c
519#define __EMPHPOST_AT_4G_SH_FIX 0x00000002
520#define __EMPHPRE_AT_4G_FIX 0x00000003
521#define __SFP_TXRATE_EN_FIX 0x00000100
522#define __SFP_RXRATE_EN_FIX 0x00000080
523
524/*
525 * These register definitions are auto-generated from hard coded values
526 * in regparse.pl.
527 */
528
529/*
530 * These register mapping definitions are auto-generated from mapping tables
531 * in regparse.pl.
532 */
533#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
534#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
535#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
536#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
537#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
538
539#define CPE_DEPTH_Q(__n) \
540 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
541#define CPE_QCTRL_Q(__n) \
542 (CPE_QCTRL_Q0 + (__n) * (CPE_QCTRL_Q1 - CPE_QCTRL_Q0))
543#define CPE_PI_PTR_Q(__n) \
544 (CPE_PI_PTR_Q0 + (__n) * (CPE_PI_PTR_Q1 - CPE_PI_PTR_Q0))
545#define CPE_CI_PTR_Q(__n) \
546 (CPE_CI_PTR_Q0 + (__n) * (CPE_CI_PTR_Q1 - CPE_CI_PTR_Q0))
547#define RME_DEPTH_Q(__n) \
548 (RME_DEPTH_Q0 + (__n) * (RME_DEPTH_Q1 - RME_DEPTH_Q0))
549#define RME_QCTRL_Q(__n) \
550 (RME_QCTRL_Q0 + (__n) * (RME_QCTRL_Q1 - RME_QCTRL_Q0))
551#define RME_PI_PTR_Q(__n) \
552 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
553#define RME_CI_PTR_Q(__n) \
554 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
555#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
556 * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
557#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
558 * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
559#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
560 * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
561#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
562 * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
563#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
564 * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
565#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
566 * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
567#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
568 * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
569#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
570 * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
571
572#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
573#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
574#define CPE_Q_MASK(__q) ((__q) & 0x3)
575#define RME_Q_MASK(__q) ((__q) & 0x3)
576
577/*
578 * PCI MSI-X vector defines
579 */
580enum {
581 BFA_MSIX_CPE_Q0 = 0,
582 BFA_MSIX_CPE_Q1 = 1,
583 BFA_MSIX_CPE_Q2 = 2,
584 BFA_MSIX_CPE_Q3 = 3,
585 BFA_MSIX_RME_Q0 = 4,
586 BFA_MSIX_RME_Q1 = 5,
587 BFA_MSIX_RME_Q2 = 6,
588 BFA_MSIX_RME_Q3 = 7,
589 BFA_MSIX_LPU_ERR = 8,
590 BFA_MSIX_CT_MAX = 9,
591};
592
593/*
594 * And corresponding host interrupt status bit field defines
595 */
596#define __HFN_INT_CPE_Q0 0x00000001U
597#define __HFN_INT_CPE_Q1 0x00000002U
598#define __HFN_INT_CPE_Q2 0x00000004U
599#define __HFN_INT_CPE_Q3 0x00000008U
600#define __HFN_INT_CPE_Q4 0x00000010U
601#define __HFN_INT_CPE_Q5 0x00000020U
602#define __HFN_INT_CPE_Q6 0x00000040U
603#define __HFN_INT_CPE_Q7 0x00000080U
604#define __HFN_INT_RME_Q0 0x00000100U
605#define __HFN_INT_RME_Q1 0x00000200U
606#define __HFN_INT_RME_Q2 0x00000400U
607#define __HFN_INT_RME_Q3 0x00000800U
608#define __HFN_INT_RME_Q4 0x00001000U
609#define __HFN_INT_RME_Q5 0x00002000U
610#define __HFN_INT_RME_Q6 0x00004000U
611#define __HFN_INT_RME_Q7 0x00008000U
612#define __HFN_INT_ERR_EMC 0x00010000U
613#define __HFN_INT_ERR_LPU0 0x00020000U
614#define __HFN_INT_ERR_LPU1 0x00040000U
615#define __HFN_INT_ERR_PSS 0x00080000U
616#define __HFN_INT_MBOX_LPU0 0x00100000U
617#define __HFN_INT_MBOX_LPU1 0x00200000U
618#define __HFN_INT_MBOX1_LPU0 0x00400000U
619#define __HFN_INT_MBOX1_LPU1 0x00800000U
620#define __HFN_INT_LL_HALT 0x01000000U
621#define __HFN_INT_CPE_MASK 0x000000ffU
622#define __HFN_INT_RME_MASK 0x0000ff00U
623
624/*
625 * catapult memory map.
626 */
627#define LL_PGN_HQM0 0x0096
628#define LL_PGN_HQM1 0x0097
629#define PSS_SMEM_PAGE_START 0x8000
630#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
631#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
632
633/*
634 * End of catapult memory map
635 */
636
637#endif /* __BFI_CTREG_H__ */
diff --git a/drivers/net/bna/bfi_ll.h b/drivers/net/bna/bfi_ll.h
new file mode 100644
index 000000000000..bee4d054066a
--- /dev/null
+++ b/drivers/net/bna/bfi_ll.h
@@ -0,0 +1,438 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BFI_LL_H__
19#define __BFI_LL_H__
20
21#include "bfi.h"
22
23#pragma pack(1)
24
25/**
26 * @brief
27 * "enums" for all LL mailbox messages other than IOC
28 */
29enum {
30 BFI_LL_H2I_MAC_UCAST_SET_REQ = 1,
31 BFI_LL_H2I_MAC_UCAST_ADD_REQ = 2,
32 BFI_LL_H2I_MAC_UCAST_DEL_REQ = 3,
33
34 BFI_LL_H2I_MAC_MCAST_ADD_REQ = 4,
35 BFI_LL_H2I_MAC_MCAST_DEL_REQ = 5,
36 BFI_LL_H2I_MAC_MCAST_FILTER_REQ = 6,
37 BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ = 7,
38
39 BFI_LL_H2I_PORT_ADMIN_REQ = 8,
40 BFI_LL_H2I_STATS_GET_REQ = 9,
41 BFI_LL_H2I_STATS_CLEAR_REQ = 10,
42
43 BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ = 11,
44 BFI_LL_H2I_RXF_DEFAULT_SET_REQ = 12,
45
46 BFI_LL_H2I_TXQ_STOP_REQ = 13,
47 BFI_LL_H2I_RXQ_STOP_REQ = 14,
48
49 BFI_LL_H2I_DIAG_LOOPBACK_REQ = 15,
50
51 BFI_LL_H2I_SET_PAUSE_REQ = 16,
52 BFI_LL_H2I_MTU_INFO_REQ = 17,
53
54 BFI_LL_H2I_RX_REQ = 18,
55} ;
56
57enum {
58 BFI_LL_I2H_MAC_UCAST_SET_RSP = BFA_I2HM(1),
59 BFI_LL_I2H_MAC_UCAST_ADD_RSP = BFA_I2HM(2),
60 BFI_LL_I2H_MAC_UCAST_DEL_RSP = BFA_I2HM(3),
61
62 BFI_LL_I2H_MAC_MCAST_ADD_RSP = BFA_I2HM(4),
63 BFI_LL_I2H_MAC_MCAST_DEL_RSP = BFA_I2HM(5),
64 BFI_LL_I2H_MAC_MCAST_FILTER_RSP = BFA_I2HM(6),
65 BFI_LL_I2H_MAC_MCAST_DEL_ALL_RSP = BFA_I2HM(7),
66
67 BFI_LL_I2H_PORT_ADMIN_RSP = BFA_I2HM(8),
68 BFI_LL_I2H_STATS_GET_RSP = BFA_I2HM(9),
69 BFI_LL_I2H_STATS_CLEAR_RSP = BFA_I2HM(10),
70
71 BFI_LL_I2H_RXF_PROMISCUOUS_SET_RSP = BFA_I2HM(11),
72 BFI_LL_I2H_RXF_DEFAULT_SET_RSP = BFA_I2HM(12),
73
74 BFI_LL_I2H_TXQ_STOP_RSP = BFA_I2HM(13),
75 BFI_LL_I2H_RXQ_STOP_RSP = BFA_I2HM(14),
76
77 BFI_LL_I2H_DIAG_LOOPBACK_RSP = BFA_I2HM(15),
78
79 BFI_LL_I2H_SET_PAUSE_RSP = BFA_I2HM(16),
80
81 BFI_LL_I2H_MTU_INFO_RSP = BFA_I2HM(17),
82 BFI_LL_I2H_RX_RSP = BFA_I2HM(18),
83
84 BFI_LL_I2H_LINK_DOWN_AEN = BFA_I2HM(19),
85 BFI_LL_I2H_LINK_UP_AEN = BFA_I2HM(20),
86
87 BFI_LL_I2H_PORT_ENABLE_AEN = BFA_I2HM(21),
88 BFI_LL_I2H_PORT_DISABLE_AEN = BFA_I2HM(22),
89} ;
90
91/**
92 * @brief bfi_ll_mac_addr_req is used by:
93 * BFI_LL_H2I_MAC_UCAST_SET_REQ
94 * BFI_LL_H2I_MAC_UCAST_ADD_REQ
95 * BFI_LL_H2I_MAC_UCAST_DEL_REQ
96 * BFI_LL_H2I_MAC_MCAST_ADD_REQ
97 * BFI_LL_H2I_MAC_MCAST_DEL_REQ
98 */
99struct bfi_ll_mac_addr_req {
100 struct bfi_mhdr mh; /*!< common msg header */
101 u8 rxf_id;
102 u8 rsvd1[3];
103 mac_t mac_addr;
104 u8 rsvd2[2];
105};
106
107/**
108 * @brief bfi_ll_mcast_filter_req is used by:
109 * BFI_LL_H2I_MAC_MCAST_FILTER_REQ
110 */
111struct bfi_ll_mcast_filter_req {
112 struct bfi_mhdr mh; /*!< common msg header */
113 u8 rxf_id;
114 u8 enable;
115 u8 rsvd[2];
116};
117
118/**
119 * @brief bfi_ll_mcast_del_all is used by:
120 * BFI_LL_H2I_MAC_MCAST_DEL_ALL_REQ
121 */
122struct bfi_ll_mcast_del_all_req {
123 struct bfi_mhdr mh; /*!< common msg header */
124 u8 rxf_id;
125 u8 rsvd[3];
126};
127
128/**
129 * @brief bfi_ll_q_stop_req is used by:
130 * BFI_LL_H2I_TXQ_STOP_REQ
131 * BFI_LL_H2I_RXQ_STOP_REQ
132 */
133struct bfi_ll_q_stop_req {
134 struct bfi_mhdr mh; /*!< common msg header */
135 u32 q_id_mask[2]; /* !< bit-mask for queue ids */
136};
137
138/**
139 * @brief bfi_ll_stats_req is used by:
140 * BFI_LL_I2H_STATS_GET_REQ
141 * BFI_LL_I2H_STATS_CLEAR_REQ
142 */
143struct bfi_ll_stats_req {
144 struct bfi_mhdr mh; /*!< common msg header */
145 u16 stats_mask; /* !< bit-mask for non-function statistics */
146 u8 rsvd[2];
147 u32 rxf_id_mask[2]; /* !< bit-mask for RxF Statistics */
148 u32 txf_id_mask[2]; /* !< bit-mask for TxF Statistics */
149 union bfi_addr_u host_buffer; /* !< where statistics are returned */
150};
151
152/**
153 * @brief defines for "stats_mask" above.
154 */
155#define BFI_LL_STATS_MAC (1 << 0) /* !< MAC Statistics */
156#define BFI_LL_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
157#define BFI_LL_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
158#define BFI_LL_STATS_RX_FC (1 << 3) /* !< Rx FC Stats from RxA */
159#define BFI_LL_STATS_TX_FC (1 << 4) /* !< Tx FC Stats from TxA */
160
161#define BFI_LL_STATS_ALL 0x1f
162
163/**
164 * @brief bfi_ll_port_admin_req
165 */
166struct bfi_ll_port_admin_req {
167 struct bfi_mhdr mh; /*!< common msg header */
168 u8 up;
169 u8 rsvd[3];
170};
171
172/**
173 * @brief bfi_ll_rxf_req is used by:
174 * BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
175 * BFI_LL_H2I_RXF_DEFAULT_SET_REQ
176 */
177struct bfi_ll_rxf_req {
178 struct bfi_mhdr mh; /*!< common msg header */
179 u8 rxf_id;
180 u8 enable;
181 u8 rsvd[2];
182};
183
184/**
185 * @brief bfi_ll_rxf_multi_req is used by:
186 * BFI_LL_H2I_RX_REQ
187 */
188struct bfi_ll_rxf_multi_req {
189 struct bfi_mhdr mh; /*!< common msg header */
190 u32 rxf_id_mask[2];
191 u8 enable;
192 u8 rsvd[3];
193};
194
195/**
196 * @brief enum for Loopback opmodes
197 */
198enum {
199 BFI_LL_DIAG_LB_OPMODE_EXT = 0,
200 BFI_LL_DIAG_LB_OPMODE_CBL = 1,
201};
202
203/**
204 * @brief bfi_ll_set_pause_req is used by:
205 * BFI_LL_H2I_SET_PAUSE_REQ
206 */
207struct bfi_ll_set_pause_req {
208 struct bfi_mhdr mh;
209 u8 tx_pause; /* 1 = enable, 0 = disable */
210 u8 rx_pause; /* 1 = enable, 0 = disable */
211 u8 rsvd[2];
212};
213
214/**
215 * @brief bfi_ll_mtu_info_req is used by:
216 * BFI_LL_H2I_MTU_INFO_REQ
217 */
218struct bfi_ll_mtu_info_req {
219 struct bfi_mhdr mh;
220 u16 mtu;
221 u8 rsvd[2];
222};
223
224/**
225 * @brief
226 * Response header format used by all responses
227 * For both responses and asynchronous notifications
228 */
229struct bfi_ll_rsp {
230 struct bfi_mhdr mh; /*!< common msg header */
231 u8 error;
232 u8 rsvd[3];
233};
234
235/**
236 * @brief bfi_ll_cee_aen is used by:
237 * BFI_LL_I2H_LINK_DOWN_AEN
238 * BFI_LL_I2H_LINK_UP_AEN
239 */
240struct bfi_ll_aen {
241 struct bfi_mhdr mh; /*!< common msg header */
242 u32 reason;
243 u8 cee_linkup;
244 u8 prio_map; /*!< LL priority bit-map */
245 u8 rsvd[2];
246};
247
248/**
249 * @brief
250 * The following error codes can be returned
251 * by the mbox commands
252 */
253enum {
254 BFI_LL_CMD_OK = 0,
255 BFI_LL_CMD_FAIL = 1,
256 BFI_LL_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */
257 BFI_LL_CMD_CAM_FULL = 3, /* !< CAM is full */
258 BFI_LL_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */
259 BFI_LL_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */
260 BFI_LL_CMD_WAITING = 6, /* !< Waiting for completion (VMware) */
261 BFI_LL_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
262} ;
263
264/* Statistics */
265#define BFI_LL_TXF_ID_MAX 64
266#define BFI_LL_RXF_ID_MAX 64
267
268/* TxF Frame Statistics */
269struct bfi_ll_stats_txf {
270 u64 ucast_octets;
271 u64 ucast;
272 u64 ucast_vlan;
273
274 u64 mcast_octets;
275 u64 mcast;
276 u64 mcast_vlan;
277
278 u64 bcast_octets;
279 u64 bcast;
280 u64 bcast_vlan;
281
282 u64 errors;
283 u64 filter_vlan; /* frames filtered due to VLAN */
284 u64 filter_mac_sa; /* frames filtered due to SA check */
285};
286
287/* RxF Frame Statistics */
288struct bfi_ll_stats_rxf {
289 u64 ucast_octets;
290 u64 ucast;
291 u64 ucast_vlan;
292
293 u64 mcast_octets;
294 u64 mcast;
295 u64 mcast_vlan;
296
297 u64 bcast_octets;
298 u64 bcast;
299 u64 bcast_vlan;
300 u64 frame_drops;
301};
302
303/* FC Tx Frame Statistics */
304struct bfi_ll_stats_fc_tx {
305 u64 txf_ucast_octets;
306 u64 txf_ucast;
307 u64 txf_ucast_vlan;
308
309 u64 txf_mcast_octets;
310 u64 txf_mcast;
311 u64 txf_mcast_vlan;
312
313 u64 txf_bcast_octets;
314 u64 txf_bcast;
315 u64 txf_bcast_vlan;
316
317 u64 txf_parity_errors;
318 u64 txf_timeout;
319 u64 txf_fid_parity_errors;
320};
321
322/* FC Rx Frame Statistics */
323struct bfi_ll_stats_fc_rx {
324 u64 rxf_ucast_octets;
325 u64 rxf_ucast;
326 u64 rxf_ucast_vlan;
327
328 u64 rxf_mcast_octets;
329 u64 rxf_mcast;
330 u64 rxf_mcast_vlan;
331
332 u64 rxf_bcast_octets;
333 u64 rxf_bcast;
334 u64 rxf_bcast_vlan;
335};
336
337/* RAD Frame Statistics */
338struct bfi_ll_stats_rad {
339 u64 rx_frames;
340 u64 rx_octets;
341 u64 rx_vlan_frames;
342
343 u64 rx_ucast;
344 u64 rx_ucast_octets;
345 u64 rx_ucast_vlan;
346
347 u64 rx_mcast;
348 u64 rx_mcast_octets;
349 u64 rx_mcast_vlan;
350
351 u64 rx_bcast;
352 u64 rx_bcast_octets;
353 u64 rx_bcast_vlan;
354
355 u64 rx_drops;
356};
357
358/* BPC Tx Registers */
359struct bfi_ll_stats_bpc {
360 /* transmit stats */
361 u64 tx_pause[8];
362 u64 tx_zero_pause[8]; /*!< Pause cancellation */
363 /*!<Pause initiation rather than retention */
364 u64 tx_first_pause[8];
365
366 /* receive stats */
367 u64 rx_pause[8];
368 u64 rx_zero_pause[8]; /*!< Pause cancellation */
369 /*!<Pause initiation rather than retention */
370 u64 rx_first_pause[8];
371};
372
373/* MAC Rx Statistics */
374struct bfi_ll_stats_mac {
375 u64 frame_64; /* both rx and tx counter */
376 u64 frame_65_127; /* both rx and tx counter */
377 u64 frame_128_255; /* both rx and tx counter */
378 u64 frame_256_511; /* both rx and tx counter */
379 u64 frame_512_1023; /* both rx and tx counter */
380 u64 frame_1024_1518; /* both rx and tx counter */
381 u64 frame_1519_1522; /* both rx and tx counter */
382
383 /* receive stats */
384 u64 rx_bytes;
385 u64 rx_packets;
386 u64 rx_fcs_error;
387 u64 rx_multicast;
388 u64 rx_broadcast;
389 u64 rx_control_frames;
390 u64 rx_pause;
391 u64 rx_unknown_opcode;
392 u64 rx_alignment_error;
393 u64 rx_frame_length_error;
394 u64 rx_code_error;
395 u64 rx_carrier_sense_error;
396 u64 rx_undersize;
397 u64 rx_oversize;
398 u64 rx_fragments;
399 u64 rx_jabber;
400 u64 rx_drop;
401
402 /* transmit stats */
403 u64 tx_bytes;
404 u64 tx_packets;
405 u64 tx_multicast;
406 u64 tx_broadcast;
407 u64 tx_pause;
408 u64 tx_deferral;
409 u64 tx_excessive_deferral;
410 u64 tx_single_collision;
411 u64 tx_muliple_collision;
412 u64 tx_late_collision;
413 u64 tx_excessive_collision;
414 u64 tx_total_collision;
415 u64 tx_pause_honored;
416 u64 tx_drop;
417 u64 tx_jabber;
418 u64 tx_fcs_error;
419 u64 tx_control_frame;
420 u64 tx_oversize;
421 u64 tx_undersize;
422 u64 tx_fragments;
423};
424
425/* Complete statistics */
426struct bfi_ll_stats {
427 struct bfi_ll_stats_mac mac_stats;
428 struct bfi_ll_stats_bpc bpc_stats;
429 struct bfi_ll_stats_rad rad_stats;
430 struct bfi_ll_stats_fc_rx fc_rx_stats;
431 struct bfi_ll_stats_fc_tx fc_tx_stats;
432 struct bfi_ll_stats_rxf rxf_stats[BFI_LL_RXF_ID_MAX];
433 struct bfi_ll_stats_txf txf_stats[BFI_LL_TXF_ID_MAX];
434};
435
436#pragma pack()
437
438#endif /* __BFI_LL_H__ */
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
new file mode 100644
index 000000000000..6a2b3291c190
--- /dev/null
+++ b/drivers/net/bna/bna.h
@@ -0,0 +1,654 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __BNA_H__
14#define __BNA_H__
15
16#include "bfa_wc.h"
17#include "bfa_ioc.h"
18#include "cna.h"
19#include "bfi_ll.h"
20#include "bna_types.h"
21
22extern u32 bna_dim_vector[][BNA_BIAS_T_MAX];
23extern u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
24
25/**
26 *
27 * Macros and constants
28 *
29 */
30
31#define BNA_IOC_TIMER_FREQ 200
32
33/* Log string size */
34#define BNA_MESSAGE_SIZE 256
35
36#define bna_device_timer(_dev) bfa_timer_beat(&((_dev)->timer_mod))
37
38/* MBOX API for PORT, TX, RX */
39#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \
40do { \
41 memcpy(&((_qe)->cmd.msg[0]), (_cmd), (_cmd_len)); \
42 (_qe)->cbfn = (_cbfn); \
43 (_qe)->cbarg = (_cbarg); \
44} while (0)
45
46#define bna_is_small_rxq(rcb) ((rcb)->id == 1)
47
48#define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
49 (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
50
51#define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
52
53#define BNA_TO_POWER_OF_2(x) \
54do { \
55 int _shift = 0; \
56 while ((x) && (x) != 1) { \
57 (x) >>= 1; \
58 _shift++; \
59 } \
60 (x) <<= _shift; \
61} while (0)
62
63#define BNA_TO_POWER_OF_2_HIGH(x) \
64do { \
65 int n = 1; \
66 while (n < (x)) \
67 n <<= 1; \
68 (x) = n; \
69} while (0)
70
71/*
72 * input : _addr-> os dma addr in host endian format,
73 * output : _bna_dma_addr-> pointer to hw dma addr
74 */
75#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
76do { \
77 u64 tmp_addr = \
78 cpu_to_be64((u64)(_addr)); \
79 (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
80 (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
81} while (0)
82
83/*
84 * input : _bna_dma_addr-> pointer to hw dma addr
85 * output : _addr-> os dma addr in host endian format
86 */
87#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
88do { \
89 (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
90 | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
91} while (0)
92
93#define containing_rec(addr, type, field) \
94 ((type *)((unsigned char *)(addr) - \
95 (unsigned char *)(&((type *)0)->field)))
96
97#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
98
99/* TxQ element is 64 bytes */
100#define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
101#define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
102
103#define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
104{ \
105 unsigned int page_index; /* index within a page */ \
106 void *page_addr; \
107 page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
108 (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
109 page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
110 (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
111}
112
113/* RxQ element is 8 bytes */
114#define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
115#define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
116
117#define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
118{ \
119 unsigned int page_index; /* index within a page */ \
120 void *page_addr; \
121 page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
122 (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
123 page_addr = (_qpt_ptr)[((_qe_idx) >> \
124 BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
125 (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
126}
127
128/* CQ element is 16 bytes */
129#define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
130#define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
131
132#define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
133{ \
134 unsigned int page_index; /* index within a page */ \
135 void *page_addr; \
136 \
137 page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
138 (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
139 page_addr = (_qpt_ptr)[((_qe_idx) >> \
140 BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
141 (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
142}
143
144#define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
145 (&((_cast *)(_q_base))[(_qe_idx)])
146
147#define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
148
149#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
150 ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
151
152#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
153 (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
154
155#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
156 (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
157 ((_q_depth) - 1))
158
159#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
160 ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
161 (_q_depth - 1))
162
163#define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
164
165#define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
166
167#define BNA_Q_PI_ADD(_q_ptr, _num) \
168 (_q_ptr)->q.producer_index = \
169 (((_q_ptr)->q.producer_index + (_num)) & \
170 ((_q_ptr)->q.q_depth - 1))
171
172#define BNA_Q_CI_ADD(_q_ptr, _num) \
173 (_q_ptr)->q.consumer_index = \
174 (((_q_ptr)->q.consumer_index + (_num)) \
175 & ((_q_ptr)->q.q_depth - 1))
176
177#define BNA_Q_FREE_COUNT(_q_ptr) \
178 (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
179
180#define BNA_Q_IN_USE_COUNT(_q_ptr) \
181 (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
182
183/* These macros build the data portion of the TxQ/RxQ doorbell */
184#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
185#define BNA_DOORBELL_Q_STOP (0x40000000)
186
187/* These macros build the data portion of the IB doorbell */
188#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
189 (0x80000000 | ((_timeout) << 16) | (_events))
190#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
191
192/* Set the coalescing timer for the given ib */
193#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
194 ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
195
196/* Acks 'events' # of events for a given ib */
197#define bna_ib_ack(_i_dbell, _events) \
198 (writel(((_i_dbell)->doorbell_ack | (_events)), \
199 (_i_dbell)->doorbell_addr));
200
201#define bna_txq_prod_indx_doorbell(_tcb) \
202 (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
203 (_tcb)->q_dbell));
204
205#define bna_rxq_prod_indx_doorbell(_rcb) \
206 (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
207 (_rcb)->q_dbell));
208
209#define BNA_LARGE_PKT_SIZE 1000
210
211#define BNA_UPDATE_PKT_CNT(_pkt, _len) \
212do { \
213 if ((_len) > BNA_LARGE_PKT_SIZE) { \
214 (_pkt)->large_pkt_cnt++; \
215 } else { \
216 (_pkt)->small_pkt_cnt++; \
217 } \
218} while (0)
219
220#define call_rxf_stop_cbfn(rxf, status) \
221 if ((rxf)->stop_cbfn) { \
222 (*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status)); \
223 (rxf)->stop_cbfn = NULL; \
224 (rxf)->stop_cbarg = NULL; \
225 }
226
227#define call_rxf_start_cbfn(rxf, status) \
228 if ((rxf)->start_cbfn) { \
229 (*(rxf)->start_cbfn)((rxf)->start_cbarg, (status)); \
230 (rxf)->start_cbfn = NULL; \
231 (rxf)->start_cbarg = NULL; \
232 }
233
234#define call_rxf_cam_fltr_cbfn(rxf, status) \
235 if ((rxf)->cam_fltr_cbfn) { \
236 (*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx, \
237 (status)); \
238 (rxf)->cam_fltr_cbfn = NULL; \
239 (rxf)->cam_fltr_cbarg = NULL; \
240 }
241
242#define call_rxf_pause_cbfn(rxf, status) \
243 if ((rxf)->oper_state_cbfn) { \
244 (*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\
245 (status)); \
246 (rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED; \
247 (rxf)->oper_state_cbfn = NULL; \
248 (rxf)->oper_state_cbarg = NULL; \
249 }
250
251#define call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status)
252
253#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
254
255#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
256
257#define xxx_enable(mode, bitmask, xxx) \
258do { \
259 bitmask |= xxx; \
260 mode |= xxx; \
261} while (0)
262
263#define xxx_disable(mode, bitmask, xxx) \
264do { \
265 bitmask |= xxx; \
266 mode &= ~xxx; \
267} while (0)
268
269#define xxx_inactive(mode, bitmask, xxx) \
270do { \
271 bitmask &= ~xxx; \
272 mode &= ~xxx; \
273} while (0)
274
275#define is_promisc_enable(mode, bitmask) \
276 is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
277
278#define is_promisc_disable(mode, bitmask) \
279 is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
280
281#define promisc_enable(mode, bitmask) \
282 xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
283
284#define promisc_disable(mode, bitmask) \
285 xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
286
287#define promisc_inactive(mode, bitmask) \
288 xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
289
290#define is_default_enable(mode, bitmask) \
291 is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
292
293#define is_default_disable(mode, bitmask) \
294 is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
295
296#define default_enable(mode, bitmask) \
297 xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
298
299#define default_disable(mode, bitmask) \
300 xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
301
302#define default_inactive(mode, bitmask) \
303 xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
304
305#define is_allmulti_enable(mode, bitmask) \
306 is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
307
308#define is_allmulti_disable(mode, bitmask) \
309 is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
310
311#define allmulti_enable(mode, bitmask) \
312 xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
313
314#define allmulti_disable(mode, bitmask) \
315 xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
316
317#define allmulti_inactive(mode, bitmask) \
318 xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
319
320#define GET_RXQS(rxp, q0, q1) do { \
321 switch ((rxp)->type) { \
322 case BNA_RXP_SINGLE: \
323 (q0) = rxp->rxq.single.only; \
324 (q1) = NULL; \
325 break; \
326 case BNA_RXP_SLR: \
327 (q0) = rxp->rxq.slr.large; \
328 (q1) = rxp->rxq.slr.small; \
329 break; \
330 case BNA_RXP_HDS: \
331 (q0) = rxp->rxq.hds.data; \
332 (q1) = rxp->rxq.hds.hdr; \
333 break; \
334 } \
335} while (0)
336
337/**
338 *
339 * Function prototypes
340 *
341 */
342
343/**
344 * BNA
345 */
346
347/* Internal APIs */
348void bna_adv_res_req(struct bna_res_info *res_info);
349
350/* APIs for BNAD */
351void bna_res_req(struct bna_res_info *res_info);
352void bna_init(struct bna *bna, struct bnad *bnad,
353 struct bfa_pcidev *pcidev,
354 struct bna_res_info *res_info);
355void bna_uninit(struct bna *bna);
356void bna_stats_get(struct bna *bna);
357void bna_stats_clr(struct bna *bna);
358void bna_get_perm_mac(struct bna *bna, u8 *mac);
359
360/* APIs for Rx */
361int bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size);
362
363/* APIs for RxF */
364struct bna_mac *bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod);
365void bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod,
366 struct bna_mac *mac);
367struct bna_mac *bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod);
368void bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod,
369 struct bna_mac *mac);
370struct bna_rit_segment *
371bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size);
372void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
373 struct bna_rit_segment *seg);
374
375/**
376 * DEVICE
377 */
378
379/* Interanl APIs */
380void bna_adv_device_init(struct bna_device *device, struct bna *bna,
381 struct bna_res_info *res_info);
382
383/* APIs for BNA */
384void bna_device_init(struct bna_device *device, struct bna *bna,
385 struct bna_res_info *res_info);
386void bna_device_uninit(struct bna_device *device);
387void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
388int bna_device_status_get(struct bna_device *device);
389int bna_device_state_get(struct bna_device *device);
390
391/* APIs for BNAD */
392void bna_device_enable(struct bna_device *device);
393void bna_device_disable(struct bna_device *device,
394 enum bna_cleanup_type type);
395
396/**
397 * MBOX
398 */
399
400/* APIs for DEVICE */
401void bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna);
402void bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod);
403void bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod);
404void bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod);
405
406/* APIs for PORT, TX, RX */
407void bna_mbox_handler(struct bna *bna, u32 intr_status);
408void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
409
410/**
411 * PORT
412 */
413
414/* APIs for BNA */
415void bna_port_init(struct bna_port *port, struct bna *bna);
416void bna_port_uninit(struct bna_port *port);
417int bna_port_state_get(struct bna_port *port);
418int bna_llport_state_get(struct bna_llport *llport);
419
420/* APIs for DEVICE */
421void bna_port_start(struct bna_port *port);
422void bna_port_stop(struct bna_port *port);
423void bna_port_fail(struct bna_port *port);
424
425/* API for RX */
426int bna_port_mtu_get(struct bna_port *port);
427void bna_llport_admin_up(struct bna_llport *llport);
428void bna_llport_admin_down(struct bna_llport *llport);
429
430/* API for BNAD */
431void bna_port_enable(struct bna_port *port);
432void bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
433 void (*cbfn)(void *, enum bna_cb_status));
434void bna_port_pause_config(struct bna_port *port,
435 struct bna_pause_config *pause_config,
436 void (*cbfn)(struct bnad *, enum bna_cb_status));
437void bna_port_mtu_set(struct bna_port *port, int mtu,
438 void (*cbfn)(struct bnad *, enum bna_cb_status));
439void bna_port_mac_get(struct bna_port *port, mac_t *mac);
440void bna_port_type_set(struct bna_port *port, enum bna_port_type type);
441void bna_port_linkcbfn_set(struct bna_port *port,
442 void (*linkcbfn)(struct bnad *,
443 enum bna_link_status));
444void bna_port_admin_up(struct bna_port *port);
445void bna_port_admin_down(struct bna_port *port);
446
447/* Callbacks for TX, RX */
448void bna_port_cb_tx_stopped(struct bna_port *port,
449 enum bna_cb_status status);
450void bna_port_cb_rx_stopped(struct bna_port *port,
451 enum bna_cb_status status);
452
453/* Callbacks for MBOX */
454void bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
455 int status);
456void bna_port_cb_link_down(struct bna_port *port, int status);
457
458/**
459 * IB
460 */
461
462/* APIs for BNA */
463void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
464 struct bna_res_info *res_info);
465void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
466
467/* APIs for TX, RX */
468struct bna_ib *bna_ib_get(struct bna_ib_mod *ib_mod,
469 enum bna_intr_type intr_type, int vector);
470void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib);
471int bna_ib_reserve_idx(struct bna_ib *ib);
472void bna_ib_release_idx(struct bna_ib *ib, int idx);
473int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config);
474void bna_ib_start(struct bna_ib *ib);
475void bna_ib_stop(struct bna_ib *ib);
476void bna_ib_fail(struct bna_ib *ib);
477void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo);
478
479/**
480 * TX MODULE AND TX
481 */
482
483/* Internal APIs */
484void bna_tx_prio_changed(struct bna_tx *tx, int prio);
485
486/* APIs for BNA */
487void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
488 struct bna_res_info *res_info);
489void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod);
490int bna_tx_state_get(struct bna_tx *tx);
491
492/* APIs for PORT */
493void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
494void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type);
495void bna_tx_mod_fail(struct bna_tx_mod *tx_mod);
496void bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio);
497void bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link);
498
499/* APIs for BNAD */
500void bna_tx_res_req(int num_txq, int txq_depth,
501 struct bna_res_info *res_info);
502struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad,
503 struct bna_tx_config *tx_cfg,
504 struct bna_tx_event_cbfn *tx_cbfn,
505 struct bna_res_info *res_info, void *priv);
506void bna_tx_destroy(struct bna_tx *tx);
507void bna_tx_enable(struct bna_tx *tx);
508void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
509 void (*cbfn)(void *, struct bna_tx *,
510 enum bna_cb_status));
511enum bna_cb_status
512bna_tx_prio_set(struct bna_tx *tx, int prio,
513 void (*cbfn)(struct bnad *, struct bna_tx *,
514 enum bna_cb_status));
515void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
516
517/**
518 * RX MODULE, RX, RXF
519 */
520
521/* Internal APIs */
522void rxf_cb_cam_fltr_mbox_cmd(void *arg, int status);
523void rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
524 const struct bna_mac *mac_addr);
525void __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status);
526void bna_rxf_adv_init(struct bna_rxf *rxf,
527 struct bna_rx *rx,
528 struct bna_rx_config *q_config);
529int rxf_process_packet_filter_ucast(struct bna_rxf *rxf);
530int rxf_process_packet_filter_promisc(struct bna_rxf *rxf);
531int rxf_process_packet_filter_default(struct bna_rxf *rxf);
532int rxf_process_packet_filter_allmulti(struct bna_rxf *rxf);
533int rxf_clear_packet_filter_ucast(struct bna_rxf *rxf);
534int rxf_clear_packet_filter_promisc(struct bna_rxf *rxf);
535int rxf_clear_packet_filter_default(struct bna_rxf *rxf);
536int rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf);
537void rxf_reset_packet_filter_ucast(struct bna_rxf *rxf);
538void rxf_reset_packet_filter_promisc(struct bna_rxf *rxf);
539void rxf_reset_packet_filter_default(struct bna_rxf *rxf);
540void rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf);
541
542/* APIs for BNA */
543void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
544 struct bna_res_info *res_info);
545void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod);
546int bna_rx_state_get(struct bna_rx *rx);
547int bna_rxf_state_get(struct bna_rxf *rxf);
548
549/* APIs for PORT */
550void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
551void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type);
552void bna_rx_mod_fail(struct bna_rx_mod *rx_mod);
553
554/* APIs for BNAD */
555void bna_rx_res_req(struct bna_rx_config *rx_config,
556 struct bna_res_info *res_info);
557struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad,
558 struct bna_rx_config *rx_cfg,
559 struct bna_rx_event_cbfn *rx_cbfn,
560 struct bna_res_info *res_info, void *priv);
561void bna_rx_destroy(struct bna_rx *rx);
562void bna_rx_enable(struct bna_rx *rx);
563void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
564 void (*cbfn)(void *, struct bna_rx *,
565 enum bna_cb_status));
566void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
567void bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]);
568void bna_rx_dim_update(struct bna_ccb *ccb);
569enum bna_cb_status
570bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
571 void (*cbfn)(struct bnad *, struct bna_rx *,
572 enum bna_cb_status));
573enum bna_cb_status
574bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
575 void (*cbfn)(struct bnad *, struct bna_rx *,
576 enum bna_cb_status));
577enum bna_cb_status
578bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
579 void (*cbfn)(struct bnad *, struct bna_rx *,
580 enum bna_cb_status));
581enum bna_cb_status
582bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
583 void (*cbfn)(struct bnad *, struct bna_rx *,
584 enum bna_cb_status));
585enum bna_cb_status
586bna_rx_mcast_del(struct bna_rx *rx, u8 *mcmac,
587 void (*cbfn)(struct bnad *, struct bna_rx *,
588 enum bna_cb_status));
589enum bna_cb_status
590bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
591 void (*cbfn)(struct bnad *, struct bna_rx *,
592 enum bna_cb_status));
593void bna_rx_mcast_delall(struct bna_rx *rx,
594 void (*cbfn)(struct bnad *, struct bna_rx *,
595 enum bna_cb_status));
596enum bna_cb_status
597bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
598 enum bna_rxmode bitmask,
599 void (*cbfn)(struct bnad *, struct bna_rx *,
600 enum bna_cb_status));
601void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
602void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
603void bna_rx_vlanfilter_enable(struct bna_rx *rx);
604void bna_rx_vlanfilter_disable(struct bna_rx *rx);
605void bna_rx_rss_enable(struct bna_rx *rx);
606void bna_rx_rss_disable(struct bna_rx *rx);
607void bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config);
608void bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors,
609 int nvectors);
610void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
611 void (*cbfn)(struct bnad *, struct bna_rx *,
612 enum bna_cb_status));
613void bna_rx_hds_disable(struct bna_rx *rx,
614 void (*cbfn)(struct bnad *, struct bna_rx *,
615 enum bna_cb_status));
616void bna_rx_receive_pause(struct bna_rx *rx,
617 void (*cbfn)(struct bnad *, struct bna_rx *,
618 enum bna_cb_status));
619void bna_rx_receive_resume(struct bna_rx *rx,
620 void (*cbfn)(struct bnad *, struct bna_rx *,
621 enum bna_cb_status));
622
623/* RxF APIs for RX */
624void bna_rxf_start(struct bna_rxf *rxf);
625void bna_rxf_stop(struct bna_rxf *rxf);
626void bna_rxf_fail(struct bna_rxf *rxf);
627void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx,
628 struct bna_rx_config *q_config);
629void bna_rxf_uninit(struct bna_rxf *rxf);
630
631/* Callback from RXF to RX */
632void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status);
633void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
634
635/**
636 * BNAD
637 */
638
639/* Callbacks for BNA */
640void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
641 struct bna_stats *stats);
642void bnad_cb_stats_clr(struct bnad *bnad);
643
644/* Callbacks for DEVICE */
645void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
646void bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status);
647void bnad_cb_device_enable_mbox_intr(struct bnad *bnad);
648void bnad_cb_device_disable_mbox_intr(struct bnad *bnad);
649
650/* Callbacks for port */
651void bnad_cb_port_link_status(struct bnad *bnad,
652 enum bna_link_status status);
653
654#endif /* __BNA_H__ */
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
new file mode 100644
index 000000000000..ddd922f210c7
--- /dev/null
+++ b/drivers/net/bna/bna_ctrl.c
@@ -0,0 +1,3624 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include "bna.h"
19#include "bfa_sm.h"
20#include "bfa_wc.h"
21
22/**
23 * MBOX
24 */
25static int
26bna_is_aen(u8 msg_id)
27{
28 return msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
29 msg_id == BFI_LL_I2H_LINK_UP_AEN;
30}
31
32static void
33bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
34{
35 struct bfi_ll_aen *aen = (struct bfi_ll_aen *)(msg);
36
37 switch (aen->mh.msg_id) {
38 case BFI_LL_I2H_LINK_UP_AEN:
39 bna_port_cb_link_up(&bna->port, aen, aen->reason);
40 break;
41 case BFI_LL_I2H_LINK_DOWN_AEN:
42 bna_port_cb_link_down(&bna->port, aen->reason);
43 break;
44 default:
45 break;
46 }
47}
48
49static void
50bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
51{
52 struct bna *bna = (struct bna *)(llarg);
53 struct bfi_ll_rsp *mb_rsp = (struct bfi_ll_rsp *)(msg);
54 struct bfi_mhdr *cmd_h, *rsp_h;
55 struct bna_mbox_qe *mb_qe = NULL;
56 int to_post = 0;
57 u8 aen = 0;
58 char message[BNA_MESSAGE_SIZE];
59
60 aen = bna_is_aen(mb_rsp->mh.msg_id);
61
62 if (!aen) {
63 mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
64 cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
65 rsp_h = (struct bfi_mhdr *)(&mb_rsp->mh);
66
67 if ((BFA_I2HM(cmd_h->msg_id) == rsp_h->msg_id) &&
68 (cmd_h->mtag.i2htok == rsp_h->mtag.i2htok)) {
69 /* Remove the request from posted_q, update state */
70 list_del(&mb_qe->qe);
71 bna->mbox_mod.msg_pending--;
72 if (list_empty(&bna->mbox_mod.posted_q))
73 bna->mbox_mod.state = BNA_MBOX_FREE;
74 else
75 to_post = 1;
76
77 /* Dispatch the cbfn */
78 if (mb_qe->cbfn)
79 mb_qe->cbfn(mb_qe->cbarg, mb_rsp->error);
80
81 /* Post the next entry, if needed */
82 if (to_post) {
83 mb_qe = bfa_q_first(&bna->mbox_mod.posted_q);
84 bfa_nw_ioc_mbox_queue(&bna->device.ioc,
85 &mb_qe->cmd);
86 }
87 } else {
88 snprintf(message, BNA_MESSAGE_SIZE,
89 "No matching rsp for [%d:%d:%d]\n",
90 mb_rsp->mh.msg_class, mb_rsp->mh.msg_id,
91 mb_rsp->mh.mtag.i2htok);
92 pr_info("%s", message);
93 }
94
95 } else
96 bna_mbox_aen_callback(bna, msg);
97}
98
99void
100bna_err_handler(struct bna *bna, u32 intr_status)
101{
102 u32 init_halt;
103
104 if (intr_status & __HALT_STATUS_BITS) {
105 init_halt = readl(bna->device.ioc.ioc_regs.ll_halt);
106 init_halt &= ~__FW_INIT_HALT_P;
107 writel(init_halt, bna->device.ioc.ioc_regs.ll_halt);
108 }
109
110 bfa_nw_ioc_error_isr(&bna->device.ioc);
111}
112
113void
114bna_mbox_handler(struct bna *bna, u32 intr_status)
115{
116 if (BNA_IS_ERR_INTR(intr_status)) {
117 bna_err_handler(bna, intr_status);
118 return;
119 }
120 if (BNA_IS_MBOX_INTR(intr_status))
121 bfa_nw_ioc_mbox_isr(&bna->device.ioc);
122}
123
124void
125bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
126{
127 struct bfi_mhdr *mh;
128
129 mh = (struct bfi_mhdr *)(&mbox_qe->cmd.msg[0]);
130
131 mh->mtag.i2htok = htons(bna->mbox_mod.msg_ctr);
132 bna->mbox_mod.msg_ctr++;
133 bna->mbox_mod.msg_pending++;
134 if (bna->mbox_mod.state == BNA_MBOX_FREE) {
135 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
136 bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd);
137 bna->mbox_mod.state = BNA_MBOX_POSTED;
138 } else {
139 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q);
140 }
141}
142
143void
144bna_mbox_flush_q(struct bna *bna, struct list_head *q)
145{
146 struct bna_mbox_qe *mb_qe = NULL;
147 struct bfi_mhdr *cmd_h;
148 struct list_head *mb_q;
149 void (*cbfn)(void *arg, int status);
150 void *cbarg;
151
152 mb_q = &bna->mbox_mod.posted_q;
153
154 while (!list_empty(mb_q)) {
155 bfa_q_deq(mb_q, &mb_qe);
156 cbfn = mb_qe->cbfn;
157 cbarg = mb_qe->cbarg;
158 bfa_q_qe_init(mb_qe);
159 bna->mbox_mod.msg_pending--;
160
161 cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
162 if (cbfn)
163 cbfn(cbarg, BNA_CB_NOT_EXEC);
164 }
165
166 bna->mbox_mod.state = BNA_MBOX_FREE;
167}
168
169void
170bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
171{
172}
173
174void
175bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
176{
177 bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
178}
179
180void
181bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
182{
183 bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
184 mbox_mod->state = BNA_MBOX_FREE;
185 mbox_mod->msg_ctr = mbox_mod->msg_pending = 0;
186 INIT_LIST_HEAD(&mbox_mod->posted_q);
187 mbox_mod->bna = bna;
188}
189
190void
191bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
192{
193 mbox_mod->bna = NULL;
194}
195
196/**
197 * LLPORT
198 */
199#define call_llport_stop_cbfn(llport, status)\
200do {\
201 if ((llport)->stop_cbfn)\
202 (llport)->stop_cbfn(&(llport)->bna->port, status);\
203 (llport)->stop_cbfn = NULL;\
204} while (0)
205
206static void bna_fw_llport_up(struct bna_llport *llport);
207static void bna_fw_cb_llport_up(void *arg, int status);
208static void bna_fw_llport_down(struct bna_llport *llport);
209static void bna_fw_cb_llport_down(void *arg, int status);
210static void bna_llport_start(struct bna_llport *llport);
211static void bna_llport_stop(struct bna_llport *llport);
212static void bna_llport_fail(struct bna_llport *llport);
213
214enum bna_llport_event {
215 LLPORT_E_START = 1,
216 LLPORT_E_STOP = 2,
217 LLPORT_E_FAIL = 3,
218 LLPORT_E_UP = 4,
219 LLPORT_E_DOWN = 5,
220 LLPORT_E_FWRESP_UP = 6,
221 LLPORT_E_FWRESP_DOWN = 7
222};
223
224enum bna_llport_state {
225 BNA_LLPORT_STOPPED = 1,
226 BNA_LLPORT_DOWN = 2,
227 BNA_LLPORT_UP_RESP_WAIT = 3,
228 BNA_LLPORT_DOWN_RESP_WAIT = 4,
229 BNA_LLPORT_UP = 5,
230 BNA_LLPORT_LAST_RESP_WAIT = 6
231};
232
233bfa_fsm_state_decl(bna_llport, stopped, struct bna_llport,
234 enum bna_llport_event);
235bfa_fsm_state_decl(bna_llport, down, struct bna_llport,
236 enum bna_llport_event);
237bfa_fsm_state_decl(bna_llport, up_resp_wait, struct bna_llport,
238 enum bna_llport_event);
239bfa_fsm_state_decl(bna_llport, down_resp_wait, struct bna_llport,
240 enum bna_llport_event);
241bfa_fsm_state_decl(bna_llport, up, struct bna_llport,
242 enum bna_llport_event);
243bfa_fsm_state_decl(bna_llport, last_resp_wait, struct bna_llport,
244 enum bna_llport_event);
245
246static struct bfa_sm_table llport_sm_table[] = {
247 {BFA_SM(bna_llport_sm_stopped), BNA_LLPORT_STOPPED},
248 {BFA_SM(bna_llport_sm_down), BNA_LLPORT_DOWN},
249 {BFA_SM(bna_llport_sm_up_resp_wait), BNA_LLPORT_UP_RESP_WAIT},
250 {BFA_SM(bna_llport_sm_down_resp_wait), BNA_LLPORT_DOWN_RESP_WAIT},
251 {BFA_SM(bna_llport_sm_up), BNA_LLPORT_UP},
252 {BFA_SM(bna_llport_sm_last_resp_wait), BNA_LLPORT_LAST_RESP_WAIT}
253};
254
255static void
256bna_llport_sm_stopped_entry(struct bna_llport *llport)
257{
258 llport->bna->port.link_cbfn((llport)->bna->bnad, BNA_LINK_DOWN);
259 call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
260}
261
262static void
263bna_llport_sm_stopped(struct bna_llport *llport,
264 enum bna_llport_event event)
265{
266 switch (event) {
267 case LLPORT_E_START:
268 bfa_fsm_set_state(llport, bna_llport_sm_down);
269 break;
270
271 case LLPORT_E_STOP:
272 call_llport_stop_cbfn(llport, BNA_CB_SUCCESS);
273 break;
274
275 case LLPORT_E_FAIL:
276 break;
277
278 case LLPORT_E_DOWN:
279 /* This event is received due to Rx objects failing */
280 /* No-op */
281 break;
282
283 case LLPORT_E_FWRESP_UP:
284 case LLPORT_E_FWRESP_DOWN:
285 /**
286 * These events are received due to flushing of mbox when
287 * device fails
288 */
289 /* No-op */
290 break;
291
292 default:
293 bfa_sm_fault(llport->bna, event);
294 }
295}
296
297static void
298bna_llport_sm_down_entry(struct bna_llport *llport)
299{
300 bnad_cb_port_link_status((llport)->bna->bnad, BNA_LINK_DOWN);
301}
302
303static void
304bna_llport_sm_down(struct bna_llport *llport,
305 enum bna_llport_event event)
306{
307 switch (event) {
308 case LLPORT_E_STOP:
309 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
310 break;
311
312 case LLPORT_E_FAIL:
313 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
314 break;
315
316 case LLPORT_E_UP:
317 bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
318 bna_fw_llport_up(llport);
319 break;
320
321 default:
322 bfa_sm_fault(llport->bna, event);
323 }
324}
325
326static void
327bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
328{
329 /**
330 * NOTE: Do not call bna_fw_llport_up() here. That will over step
331 * mbox due to down_resp_wait -> up_resp_wait transition on event
332 * LLPORT_E_UP
333 */
334}
335
336static void
337bna_llport_sm_up_resp_wait(struct bna_llport *llport,
338 enum bna_llport_event event)
339{
340 switch (event) {
341 case LLPORT_E_STOP:
342 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
343 break;
344
345 case LLPORT_E_FAIL:
346 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
347 break;
348
349 case LLPORT_E_DOWN:
350 bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
351 break;
352
353 case LLPORT_E_FWRESP_UP:
354 bfa_fsm_set_state(llport, bna_llport_sm_up);
355 break;
356
357 case LLPORT_E_FWRESP_DOWN:
358 /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
359 bna_fw_llport_up(llport);
360 break;
361
362 default:
363 bfa_sm_fault(llport->bna, event);
364 }
365}
366
367static void
368bna_llport_sm_down_resp_wait_entry(struct bna_llport *llport)
369{
370 /**
371 * NOTE: Do not call bna_fw_llport_down() here. That will over step
372 * mbox due to up_resp_wait -> down_resp_wait transition on event
373 * LLPORT_E_DOWN
374 */
375}
376
377static void
378bna_llport_sm_down_resp_wait(struct bna_llport *llport,
379 enum bna_llport_event event)
380{
381 switch (event) {
382 case LLPORT_E_STOP:
383 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
384 break;
385
386 case LLPORT_E_FAIL:
387 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
388 break;
389
390 case LLPORT_E_UP:
391 bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
392 break;
393
394 case LLPORT_E_FWRESP_UP:
395 /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
396 bna_fw_llport_down(llport);
397 break;
398
399 case LLPORT_E_FWRESP_DOWN:
400 bfa_fsm_set_state(llport, bna_llport_sm_down);
401 break;
402
403 default:
404 bfa_sm_fault(llport->bna, event);
405 }
406}
407
408static void
409bna_llport_sm_up_entry(struct bna_llport *llport)
410{
411}
412
413static void
414bna_llport_sm_up(struct bna_llport *llport,
415 enum bna_llport_event event)
416{
417 switch (event) {
418 case LLPORT_E_STOP:
419 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait);
420 bna_fw_llport_down(llport);
421 break;
422
423 case LLPORT_E_FAIL:
424 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
425 break;
426
427 case LLPORT_E_DOWN:
428 bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
429 bna_fw_llport_down(llport);
430 break;
431
432 default:
433 bfa_sm_fault(llport->bna, event);
434 }
435}
436
437static void
438bna_llport_sm_last_resp_wait_entry(struct bna_llport *llport)
439{
440}
441
442static void
443bna_llport_sm_last_resp_wait(struct bna_llport *llport,
444 enum bna_llport_event event)
445{
446 switch (event) {
447 case LLPORT_E_FAIL:
448 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
449 break;
450
451 case LLPORT_E_DOWN:
452 /**
453 * This event is received due to Rx objects stopping in
454 * parallel to llport
455 */
456 /* No-op */
457 break;
458
459 case LLPORT_E_FWRESP_UP:
460 /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
461 bna_fw_llport_down(llport);
462 break;
463
464 case LLPORT_E_FWRESP_DOWN:
465 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
466 break;
467
468 default:
469 bfa_sm_fault(llport->bna, event);
470 }
471}
472
473static void
474bna_fw_llport_admin_up(struct bna_llport *llport)
475{
476 struct bfi_ll_port_admin_req ll_req;
477
478 memset(&ll_req, 0, sizeof(ll_req));
479 ll_req.mh.msg_class = BFI_MC_LL;
480 ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
481 ll_req.mh.mtag.h2i.lpu_id = 0;
482
483 ll_req.up = BNA_STATUS_T_ENABLED;
484
485 bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
486 bna_fw_cb_llport_up, llport);
487
488 bna_mbox_send(llport->bna, &llport->mbox_qe);
489}
490
491static void
492bna_fw_llport_up(struct bna_llport *llport)
493{
494 if (llport->type == BNA_PORT_T_REGULAR)
495 bna_fw_llport_admin_up(llport);
496}
497
498static void
499bna_fw_cb_llport_up(void *arg, int status)
500{
501 struct bna_llport *llport = (struct bna_llport *)arg;
502
503 bfa_q_qe_init(&llport->mbox_qe.qe);
504 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP);
505}
506
507static void
508bna_fw_llport_admin_down(struct bna_llport *llport)
509{
510 struct bfi_ll_port_admin_req ll_req;
511
512 memset(&ll_req, 0, sizeof(ll_req));
513 ll_req.mh.msg_class = BFI_MC_LL;
514 ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ;
515 ll_req.mh.mtag.h2i.lpu_id = 0;
516
517 ll_req.up = BNA_STATUS_T_DISABLED;
518
519 bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req),
520 bna_fw_cb_llport_down, llport);
521
522 bna_mbox_send(llport->bna, &llport->mbox_qe);
523}
524
525static void
526bna_fw_llport_down(struct bna_llport *llport)
527{
528 if (llport->type == BNA_PORT_T_REGULAR)
529 bna_fw_llport_admin_down(llport);
530}
531
532static void
533bna_fw_cb_llport_down(void *arg, int status)
534{
535 struct bna_llport *llport = (struct bna_llport *)arg;
536
537 bfa_q_qe_init(&llport->mbox_qe.qe);
538 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
539}
540
541void
542bna_port_cb_llport_stopped(struct bna_port *port,
543 enum bna_cb_status status)
544{
545 bfa_wc_down(&port->chld_stop_wc);
546}
547
548static void
549bna_llport_init(struct bna_llport *llport, struct bna *bna)
550{
551 llport->flags |= BNA_LLPORT_F_ENABLED;
552 llport->type = BNA_PORT_T_REGULAR;
553 llport->bna = bna;
554
555 llport->link_status = BNA_LINK_DOWN;
556
557 llport->admin_up_count = 0;
558
559 llport->stop_cbfn = NULL;
560
561 bfa_q_qe_init(&llport->mbox_qe.qe);
562
563 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
564}
565
566static void
567bna_llport_uninit(struct bna_llport *llport)
568{
569 llport->flags &= ~BNA_LLPORT_F_ENABLED;
570
571 llport->bna = NULL;
572}
573
574static void
575bna_llport_start(struct bna_llport *llport)
576{
577 bfa_fsm_send_event(llport, LLPORT_E_START);
578}
579
580static void
581bna_llport_stop(struct bna_llport *llport)
582{
583 llport->stop_cbfn = bna_port_cb_llport_stopped;
584
585 bfa_fsm_send_event(llport, LLPORT_E_STOP);
586}
587
588static void
589bna_llport_fail(struct bna_llport *llport)
590{
591 bfa_fsm_send_event(llport, LLPORT_E_FAIL);
592}
593
594int
595bna_llport_state_get(struct bna_llport *llport)
596{
597 return bfa_sm_to_state(llport_sm_table, llport->fsm);
598}
599
600void
601bna_llport_admin_up(struct bna_llport *llport)
602{
603 llport->admin_up_count++;
604
605 if (llport->admin_up_count == 1) {
606 llport->flags |= BNA_LLPORT_F_RX_ENABLED;
607 if (llport->flags & BNA_LLPORT_F_ENABLED)
608 bfa_fsm_send_event(llport, LLPORT_E_UP);
609 }
610}
611
612void
613bna_llport_admin_down(struct bna_llport *llport)
614{
615 llport->admin_up_count--;
616
617 if (llport->admin_up_count == 0) {
618 llport->flags &= ~BNA_LLPORT_F_RX_ENABLED;
619 if (llport->flags & BNA_LLPORT_F_ENABLED)
620 bfa_fsm_send_event(llport, LLPORT_E_DOWN);
621 }
622}
623
624/**
625 * PORT
626 */
627#define bna_port_chld_start(port)\
628do {\
629 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
630 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
631 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
632 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
633 bna_llport_start(&(port)->llport);\
634 bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
635 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
636} while (0)
637
638#define bna_port_chld_stop(port)\
639do {\
640 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
641 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
642 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
643 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
644 bfa_wc_up(&(port)->chld_stop_wc);\
645 bfa_wc_up(&(port)->chld_stop_wc);\
646 bfa_wc_up(&(port)->chld_stop_wc);\
647 bna_llport_stop(&(port)->llport);\
648 bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
649 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
650} while (0)
651
652#define bna_port_chld_fail(port)\
653do {\
654 bna_llport_fail(&(port)->llport);\
655 bna_tx_mod_fail(&(port)->bna->tx_mod);\
656 bna_rx_mod_fail(&(port)->bna->rx_mod);\
657} while (0)
658
659#define bna_port_rx_start(port)\
660do {\
661 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
662 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
663 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
664} while (0)
665
666#define bna_port_rx_stop(port)\
667do {\
668 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
669 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
670 bfa_wc_up(&(port)->chld_stop_wc);\
671 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
672} while (0)
673
674#define call_port_stop_cbfn(port, status)\
675do {\
676 if ((port)->stop_cbfn)\
677 (port)->stop_cbfn((port)->stop_cbarg, status);\
678 (port)->stop_cbfn = NULL;\
679 (port)->stop_cbarg = NULL;\
680} while (0)
681
682#define call_port_pause_cbfn(port, status)\
683do {\
684 if ((port)->pause_cbfn)\
685 (port)->pause_cbfn((port)->bna->bnad, status);\
686 (port)->pause_cbfn = NULL;\
687} while (0)
688
689#define call_port_mtu_cbfn(port, status)\
690do {\
691 if ((port)->mtu_cbfn)\
692 (port)->mtu_cbfn((port)->bna->bnad, status);\
693 (port)->mtu_cbfn = NULL;\
694} while (0)
695
696static void bna_fw_pause_set(struct bna_port *port);
697static void bna_fw_cb_pause_set(void *arg, int status);
698static void bna_fw_mtu_set(struct bna_port *port);
699static void bna_fw_cb_mtu_set(void *arg, int status);
700
701enum bna_port_event {
702 PORT_E_START = 1,
703 PORT_E_STOP = 2,
704 PORT_E_FAIL = 3,
705 PORT_E_PAUSE_CFG = 4,
706 PORT_E_MTU_CFG = 5,
707 PORT_E_CHLD_STOPPED = 6,
708 PORT_E_FWRESP_PAUSE = 7,
709 PORT_E_FWRESP_MTU = 8
710};
711
712enum bna_port_state {
713 BNA_PORT_STOPPED = 1,
714 BNA_PORT_MTU_INIT_WAIT = 2,
715 BNA_PORT_PAUSE_INIT_WAIT = 3,
716 BNA_PORT_LAST_RESP_WAIT = 4,
717 BNA_PORT_STARTED = 5,
718 BNA_PORT_PAUSE_CFG_WAIT = 6,
719 BNA_PORT_RX_STOP_WAIT = 7,
720 BNA_PORT_MTU_CFG_WAIT = 8,
721 BNA_PORT_CHLD_STOP_WAIT = 9
722};
723
724bfa_fsm_state_decl(bna_port, stopped, struct bna_port,
725 enum bna_port_event);
726bfa_fsm_state_decl(bna_port, mtu_init_wait, struct bna_port,
727 enum bna_port_event);
728bfa_fsm_state_decl(bna_port, pause_init_wait, struct bna_port,
729 enum bna_port_event);
730bfa_fsm_state_decl(bna_port, last_resp_wait, struct bna_port,
731 enum bna_port_event);
732bfa_fsm_state_decl(bna_port, started, struct bna_port,
733 enum bna_port_event);
734bfa_fsm_state_decl(bna_port, pause_cfg_wait, struct bna_port,
735 enum bna_port_event);
736bfa_fsm_state_decl(bna_port, rx_stop_wait, struct bna_port,
737 enum bna_port_event);
738bfa_fsm_state_decl(bna_port, mtu_cfg_wait, struct bna_port,
739 enum bna_port_event);
740bfa_fsm_state_decl(bna_port, chld_stop_wait, struct bna_port,
741 enum bna_port_event);
742
743static struct bfa_sm_table port_sm_table[] = {
744 {BFA_SM(bna_port_sm_stopped), BNA_PORT_STOPPED},
745 {BFA_SM(bna_port_sm_mtu_init_wait), BNA_PORT_MTU_INIT_WAIT},
746 {BFA_SM(bna_port_sm_pause_init_wait), BNA_PORT_PAUSE_INIT_WAIT},
747 {BFA_SM(bna_port_sm_last_resp_wait), BNA_PORT_LAST_RESP_WAIT},
748 {BFA_SM(bna_port_sm_started), BNA_PORT_STARTED},
749 {BFA_SM(bna_port_sm_pause_cfg_wait), BNA_PORT_PAUSE_CFG_WAIT},
750 {BFA_SM(bna_port_sm_rx_stop_wait), BNA_PORT_RX_STOP_WAIT},
751 {BFA_SM(bna_port_sm_mtu_cfg_wait), BNA_PORT_MTU_CFG_WAIT},
752 {BFA_SM(bna_port_sm_chld_stop_wait), BNA_PORT_CHLD_STOP_WAIT}
753};
754
755static void
756bna_port_sm_stopped_entry(struct bna_port *port)
757{
758 call_port_pause_cbfn(port, BNA_CB_SUCCESS);
759 call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
760 call_port_stop_cbfn(port, BNA_CB_SUCCESS);
761}
762
763static void
764bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
765{
766 switch (event) {
767 case PORT_E_START:
768 bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
769 break;
770
771 case PORT_E_STOP:
772 call_port_stop_cbfn(port, BNA_CB_SUCCESS);
773 break;
774
775 case PORT_E_FAIL:
776 /* No-op */
777 break;
778
779 case PORT_E_PAUSE_CFG:
780 call_port_pause_cbfn(port, BNA_CB_SUCCESS);
781 break;
782
783 case PORT_E_MTU_CFG:
784 call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
785 break;
786
787 case PORT_E_CHLD_STOPPED:
788 /**
789 * This event is received due to LLPort, Tx and Rx objects
790 * failing
791 */
792 /* No-op */
793 break;
794
795 case PORT_E_FWRESP_PAUSE:
796 case PORT_E_FWRESP_MTU:
797 /**
798 * These events are received due to flushing of mbox when
799 * device fails
800 */
801 /* No-op */
802 break;
803
804 default:
805 bfa_sm_fault(port->bna, event);
806 }
807}
808
809static void
810bna_port_sm_mtu_init_wait_entry(struct bna_port *port)
811{
812 bna_fw_mtu_set(port);
813}
814
815static void
816bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
817{
818 switch (event) {
819 case PORT_E_STOP:
820 bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
821 break;
822
823 case PORT_E_FAIL:
824 bfa_fsm_set_state(port, bna_port_sm_stopped);
825 break;
826
827 case PORT_E_PAUSE_CFG:
828 /* No-op */
829 break;
830
831 case PORT_E_MTU_CFG:
832 port->flags |= BNA_PORT_F_MTU_CHANGED;
833 break;
834
835 case PORT_E_FWRESP_MTU:
836 if (port->flags & BNA_PORT_F_MTU_CHANGED) {
837 port->flags &= ~BNA_PORT_F_MTU_CHANGED;
838 bna_fw_mtu_set(port);
839 } else {
840 bfa_fsm_set_state(port, bna_port_sm_pause_init_wait);
841 }
842 break;
843
844 default:
845 bfa_sm_fault(port->bna, event);
846 }
847}
848
849static void
850bna_port_sm_pause_init_wait_entry(struct bna_port *port)
851{
852 bna_fw_pause_set(port);
853}
854
855static void
856bna_port_sm_pause_init_wait(struct bna_port *port,
857 enum bna_port_event event)
858{
859 switch (event) {
860 case PORT_E_STOP:
861 bfa_fsm_set_state(port, bna_port_sm_last_resp_wait);
862 break;
863
864 case PORT_E_FAIL:
865 bfa_fsm_set_state(port, bna_port_sm_stopped);
866 break;
867
868 case PORT_E_PAUSE_CFG:
869 port->flags |= BNA_PORT_F_PAUSE_CHANGED;
870 break;
871
872 case PORT_E_MTU_CFG:
873 port->flags |= BNA_PORT_F_MTU_CHANGED;
874 break;
875
876 case PORT_E_FWRESP_PAUSE:
877 if (port->flags & BNA_PORT_F_PAUSE_CHANGED) {
878 port->flags &= ~BNA_PORT_F_PAUSE_CHANGED;
879 bna_fw_pause_set(port);
880 } else if (port->flags & BNA_PORT_F_MTU_CHANGED) {
881 port->flags &= ~BNA_PORT_F_MTU_CHANGED;
882 bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait);
883 } else {
884 bfa_fsm_set_state(port, bna_port_sm_started);
885 bna_port_chld_start(port);
886 }
887 break;
888
889 default:
890 bfa_sm_fault(port->bna, event);
891 }
892}
893
894static void
895bna_port_sm_last_resp_wait_entry(struct bna_port *port)
896{
897}
898
899static void
900bna_port_sm_last_resp_wait(struct bna_port *port,
901 enum bna_port_event event)
902{
903 switch (event) {
904 case PORT_E_FAIL:
905 case PORT_E_FWRESP_PAUSE:
906 case PORT_E_FWRESP_MTU:
907 bfa_fsm_set_state(port, bna_port_sm_stopped);
908 break;
909
910 default:
911 bfa_sm_fault(port->bna, event);
912 }
913}
914
915static void
916bna_port_sm_started_entry(struct bna_port *port)
917{
918 /**
919 * NOTE: Do not call bna_port_chld_start() here, since it will be
920 * inadvertently called during pause_cfg_wait->started transition
921 * as well
922 */
923 call_port_pause_cbfn(port, BNA_CB_SUCCESS);
924 call_port_mtu_cbfn(port, BNA_CB_SUCCESS);
925}
926
927static void
928bna_port_sm_started(struct bna_port *port,
929 enum bna_port_event event)
930{
931 switch (event) {
932 case PORT_E_STOP:
933 bfa_fsm_set_state(port, bna_port_sm_chld_stop_wait);
934 break;
935
936 case PORT_E_FAIL:
937 bfa_fsm_set_state(port, bna_port_sm_stopped);
938 bna_port_chld_fail(port);
939 break;
940
941 case PORT_E_PAUSE_CFG:
942 bfa_fsm_set_state(port, bna_port_sm_pause_cfg_wait);
943 break;
944
945 case PORT_E_MTU_CFG:
946 bfa_fsm_set_state(port, bna_port_sm_rx_stop_wait);
947 break;
948
949 default:
950 bfa_sm_fault(port->bna, event);
951 }
952}
953
954static void
955bna_port_sm_pause_cfg_wait_entry(struct bna_port *port)
956{
957 bna_fw_pause_set(port);
958}
959
960static void
961bna_port_sm_pause_cfg_wait(struct bna_port *port,
962 enum bna_port_event event)
963{
964 switch (event) {
965 case PORT_E_FAIL:
966 bfa_fsm_set_state(port, bna_port_sm_stopped);
967 bna_port_chld_fail(port);
968 break;
969
970 case PORT_E_FWRESP_PAUSE:
971 bfa_fsm_set_state(port, bna_port_sm_started);
972 break;
973
974 default:
975 bfa_sm_fault(port->bna, event);
976 }
977}
978
979static void
980bna_port_sm_rx_stop_wait_entry(struct bna_port *port)
981{
982 bna_port_rx_stop(port);
983}
984
985static void
986bna_port_sm_rx_stop_wait(struct bna_port *port,
987 enum bna_port_event event)
988{
989 switch (event) {
990 case PORT_E_FAIL:
991 bfa_fsm_set_state(port, bna_port_sm_stopped);
992 bna_port_chld_fail(port);
993 break;
994
995 case PORT_E_CHLD_STOPPED:
996 bfa_fsm_set_state(port, bna_port_sm_mtu_cfg_wait);
997 break;
998
999 default:
1000 bfa_sm_fault(port->bna, event);
1001 }
1002}
1003
1004static void
1005bna_port_sm_mtu_cfg_wait_entry(struct bna_port *port)
1006{
1007 bna_fw_mtu_set(port);
1008}
1009
1010static void
1011bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
1012{
1013 switch (event) {
1014 case PORT_E_FAIL:
1015 bfa_fsm_set_state(port, bna_port_sm_stopped);
1016 bna_port_chld_fail(port);
1017 break;
1018
1019 case PORT_E_FWRESP_MTU:
1020 bfa_fsm_set_state(port, bna_port_sm_started);
1021 bna_port_rx_start(port);
1022 break;
1023
1024 default:
1025 bfa_sm_fault(port->bna, event);
1026 }
1027}
1028
1029static void
1030bna_port_sm_chld_stop_wait_entry(struct bna_port *port)
1031{
1032 bna_port_chld_stop(port);
1033}
1034
1035static void
1036bna_port_sm_chld_stop_wait(struct bna_port *port,
1037 enum bna_port_event event)
1038{
1039 switch (event) {
1040 case PORT_E_FAIL:
1041 bfa_fsm_set_state(port, bna_port_sm_stopped);
1042 bna_port_chld_fail(port);
1043 break;
1044
1045 case PORT_E_CHLD_STOPPED:
1046 bfa_fsm_set_state(port, bna_port_sm_stopped);
1047 break;
1048
1049 default:
1050 bfa_sm_fault(port->bna, event);
1051 }
1052}
1053
1054static void
1055bna_fw_pause_set(struct bna_port *port)
1056{
1057 struct bfi_ll_set_pause_req ll_req;
1058
1059 memset(&ll_req, 0, sizeof(ll_req));
1060 ll_req.mh.msg_class = BFI_MC_LL;
1061 ll_req.mh.msg_id = BFI_LL_H2I_SET_PAUSE_REQ;
1062 ll_req.mh.mtag.h2i.lpu_id = 0;
1063
1064 ll_req.tx_pause = port->pause_config.tx_pause;
1065 ll_req.rx_pause = port->pause_config.rx_pause;
1066
1067 bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
1068 bna_fw_cb_pause_set, port);
1069
1070 bna_mbox_send(port->bna, &port->mbox_qe);
1071}
1072
1073static void
1074bna_fw_cb_pause_set(void *arg, int status)
1075{
1076 struct bna_port *port = (struct bna_port *)arg;
1077
1078 bfa_q_qe_init(&port->mbox_qe.qe);
1079 bfa_fsm_send_event(port, PORT_E_FWRESP_PAUSE);
1080}
1081
1082void
1083bna_fw_mtu_set(struct bna_port *port)
1084{
1085 struct bfi_ll_mtu_info_req ll_req;
1086
1087 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0);
1088 ll_req.mtu = htons((u16)port->mtu);
1089
1090 bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req),
1091 bna_fw_cb_mtu_set, port);
1092 bna_mbox_send(port->bna, &port->mbox_qe);
1093}
1094
1095void
1096bna_fw_cb_mtu_set(void *arg, int status)
1097{
1098 struct bna_port *port = (struct bna_port *)arg;
1099
1100 bfa_q_qe_init(&port->mbox_qe.qe);
1101 bfa_fsm_send_event(port, PORT_E_FWRESP_MTU);
1102}
1103
1104static void
1105bna_port_cb_chld_stopped(void *arg)
1106{
1107 struct bna_port *port = (struct bna_port *)arg;
1108
1109 bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
1110}
1111
1112void
1113bna_port_init(struct bna_port *port, struct bna *bna)
1114{
1115 port->bna = bna;
1116 port->flags = 0;
1117 port->mtu = 0;
1118 port->type = BNA_PORT_T_REGULAR;
1119
1120 port->link_cbfn = bnad_cb_port_link_status;
1121
1122 port->chld_stop_wc.wc_resume = bna_port_cb_chld_stopped;
1123 port->chld_stop_wc.wc_cbarg = port;
1124 port->chld_stop_wc.wc_count = 0;
1125
1126 port->stop_cbfn = NULL;
1127 port->stop_cbarg = NULL;
1128
1129 port->pause_cbfn = NULL;
1130
1131 port->mtu_cbfn = NULL;
1132
1133 bfa_q_qe_init(&port->mbox_qe.qe);
1134
1135 bfa_fsm_set_state(port, bna_port_sm_stopped);
1136
1137 bna_llport_init(&port->llport, bna);
1138}
1139
1140void
1141bna_port_uninit(struct bna_port *port)
1142{
1143 bna_llport_uninit(&port->llport);
1144
1145 port->flags = 0;
1146
1147 port->bna = NULL;
1148}
1149
1150int
1151bna_port_state_get(struct bna_port *port)
1152{
1153 return bfa_sm_to_state(port_sm_table, port->fsm);
1154}
1155
1156void
1157bna_port_start(struct bna_port *port)
1158{
1159 port->flags |= BNA_PORT_F_DEVICE_READY;
1160 if (port->flags & BNA_PORT_F_ENABLED)
1161 bfa_fsm_send_event(port, PORT_E_START);
1162}
1163
1164void
1165bna_port_stop(struct bna_port *port)
1166{
1167 port->stop_cbfn = bna_device_cb_port_stopped;
1168 port->stop_cbarg = &port->bna->device;
1169
1170 port->flags &= ~BNA_PORT_F_DEVICE_READY;
1171 bfa_fsm_send_event(port, PORT_E_STOP);
1172}
1173
1174void
1175bna_port_fail(struct bna_port *port)
1176{
1177 port->flags &= ~BNA_PORT_F_DEVICE_READY;
1178 bfa_fsm_send_event(port, PORT_E_FAIL);
1179}
1180
1181void
1182bna_port_cb_tx_stopped(struct bna_port *port, enum bna_cb_status status)
1183{
1184 bfa_wc_down(&port->chld_stop_wc);
1185}
1186
1187void
1188bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
1189{
1190 bfa_wc_down(&port->chld_stop_wc);
1191}
1192
1193void
1194bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
1195 int status)
1196{
1197 int i;
1198 u8 prio_map;
1199
1200 port->llport.link_status = BNA_LINK_UP;
1201 if (aen->cee_linkup)
1202 port->llport.link_status = BNA_CEE_UP;
1203
1204 /* Compute the priority */
1205 prio_map = aen->prio_map;
1206 if (prio_map) {
1207 for (i = 0; i < 8; i++) {
1208 if ((prio_map >> i) & 0x1)
1209 break;
1210 }
1211 port->priority = i;
1212 } else
1213 port->priority = 0;
1214
1215 /* Dispatch events */
1216 bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
1217 bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
1218 port->link_cbfn(port->bna->bnad, port->llport.link_status);
1219}
1220
1221void
1222bna_port_cb_link_down(struct bna_port *port, int status)
1223{
1224 port->llport.link_status = BNA_LINK_DOWN;
1225
1226 /* Dispatch events */
1227 bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
1228 port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
1229}
1230
1231int
1232bna_port_mtu_get(struct bna_port *port)
1233{
1234 return port->mtu;
1235}
1236
1237void
1238bna_port_enable(struct bna_port *port)
1239{
1240 if (port->fsm != (bfa_sm_t)bna_port_sm_stopped)
1241 return;
1242
1243 port->flags |= BNA_PORT_F_ENABLED;
1244
1245 if (port->flags & BNA_PORT_F_DEVICE_READY)
1246 bfa_fsm_send_event(port, PORT_E_START);
1247}
1248
1249void
1250bna_port_disable(struct bna_port *port, enum bna_cleanup_type type,
1251 void (*cbfn)(void *, enum bna_cb_status))
1252{
1253 if (type == BNA_SOFT_CLEANUP) {
1254 (*cbfn)(port->bna->bnad, BNA_CB_SUCCESS);
1255 return;
1256 }
1257
1258 port->stop_cbfn = cbfn;
1259 port->stop_cbarg = port->bna->bnad;
1260
1261 port->flags &= ~BNA_PORT_F_ENABLED;
1262
1263 bfa_fsm_send_event(port, PORT_E_STOP);
1264}
1265
1266void
1267bna_port_pause_config(struct bna_port *port,
1268 struct bna_pause_config *pause_config,
1269 void (*cbfn)(struct bnad *, enum bna_cb_status))
1270{
1271 port->pause_config = *pause_config;
1272
1273 port->pause_cbfn = cbfn;
1274
1275 bfa_fsm_send_event(port, PORT_E_PAUSE_CFG);
1276}
1277
1278void
1279bna_port_mtu_set(struct bna_port *port, int mtu,
1280 void (*cbfn)(struct bnad *, enum bna_cb_status))
1281{
1282 port->mtu = mtu;
1283
1284 port->mtu_cbfn = cbfn;
1285
1286 bfa_fsm_send_event(port, PORT_E_MTU_CFG);
1287}
1288
1289void
1290bna_port_mac_get(struct bna_port *port, mac_t *mac)
1291{
1292 *mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
1293}
1294
1295/**
1296 * Should be called only when port is disabled
1297 */
1298void
1299bna_port_type_set(struct bna_port *port, enum bna_port_type type)
1300{
1301 port->type = type;
1302 port->llport.type = type;
1303}
1304
1305/**
1306 * Should be called only when port is disabled
1307 */
1308void
1309bna_port_linkcbfn_set(struct bna_port *port,
1310 void (*linkcbfn)(struct bnad *, enum bna_link_status))
1311{
1312 port->link_cbfn = linkcbfn;
1313}
1314
1315void
1316bna_port_admin_up(struct bna_port *port)
1317{
1318 struct bna_llport *llport = &port->llport;
1319
1320 if (llport->flags & BNA_LLPORT_F_ENABLED)
1321 return;
1322
1323 llport->flags |= BNA_LLPORT_F_ENABLED;
1324
1325 if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
1326 bfa_fsm_send_event(llport, LLPORT_E_UP);
1327}
1328
1329void
1330bna_port_admin_down(struct bna_port *port)
1331{
1332 struct bna_llport *llport = &port->llport;
1333
1334 if (!(llport->flags & BNA_LLPORT_F_ENABLED))
1335 return;
1336
1337 llport->flags &= ~BNA_LLPORT_F_ENABLED;
1338
1339 if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
1340 bfa_fsm_send_event(llport, LLPORT_E_DOWN);
1341}
1342
1343/**
1344 * DEVICE
1345 */
1346#define enable_mbox_intr(_device)\
1347do {\
1348 u32 intr_status;\
1349 bna_intr_status_get((_device)->bna, intr_status);\
1350 bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
1351 bna_mbox_intr_enable((_device)->bna);\
1352} while (0)
1353
1354#define disable_mbox_intr(_device)\
1355do {\
1356 bna_mbox_intr_disable((_device)->bna);\
1357 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1358} while (0)
1359
1360const struct bna_chip_regs_offset reg_offset[] =
1361{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
1362 HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
1363{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
1364 HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1},
1365{HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS,
1366 HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2},
1367{HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS,
1368 HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3},
1369};
1370
1371enum bna_device_event {
1372 DEVICE_E_ENABLE = 1,
1373 DEVICE_E_DISABLE = 2,
1374 DEVICE_E_IOC_READY = 3,
1375 DEVICE_E_IOC_FAILED = 4,
1376 DEVICE_E_IOC_DISABLED = 5,
1377 DEVICE_E_IOC_RESET = 6,
1378 DEVICE_E_PORT_STOPPED = 7,
1379};
1380
1381enum bna_device_state {
1382 BNA_DEVICE_STOPPED = 1,
1383 BNA_DEVICE_IOC_READY_WAIT = 2,
1384 BNA_DEVICE_READY = 3,
1385 BNA_DEVICE_PORT_STOP_WAIT = 4,
1386 BNA_DEVICE_IOC_DISABLE_WAIT = 5,
1387 BNA_DEVICE_FAILED = 6
1388};
1389
1390bfa_fsm_state_decl(bna_device, stopped, struct bna_device,
1391 enum bna_device_event);
1392bfa_fsm_state_decl(bna_device, ioc_ready_wait, struct bna_device,
1393 enum bna_device_event);
1394bfa_fsm_state_decl(bna_device, ready, struct bna_device,
1395 enum bna_device_event);
1396bfa_fsm_state_decl(bna_device, port_stop_wait, struct bna_device,
1397 enum bna_device_event);
1398bfa_fsm_state_decl(bna_device, ioc_disable_wait, struct bna_device,
1399 enum bna_device_event);
1400bfa_fsm_state_decl(bna_device, failed, struct bna_device,
1401 enum bna_device_event);
1402
1403static struct bfa_sm_table device_sm_table[] = {
1404 {BFA_SM(bna_device_sm_stopped), BNA_DEVICE_STOPPED},
1405 {BFA_SM(bna_device_sm_ioc_ready_wait), BNA_DEVICE_IOC_READY_WAIT},
1406 {BFA_SM(bna_device_sm_ready), BNA_DEVICE_READY},
1407 {BFA_SM(bna_device_sm_port_stop_wait), BNA_DEVICE_PORT_STOP_WAIT},
1408 {BFA_SM(bna_device_sm_ioc_disable_wait), BNA_DEVICE_IOC_DISABLE_WAIT},
1409 {BFA_SM(bna_device_sm_failed), BNA_DEVICE_FAILED},
1410};
1411
1412static void
1413bna_device_sm_stopped_entry(struct bna_device *device)
1414{
1415 if (device->stop_cbfn)
1416 device->stop_cbfn(device->stop_cbarg, BNA_CB_SUCCESS);
1417
1418 device->stop_cbfn = NULL;
1419 device->stop_cbarg = NULL;
1420}
1421
1422static void
1423bna_device_sm_stopped(struct bna_device *device,
1424 enum bna_device_event event)
1425{
1426 switch (event) {
1427 case DEVICE_E_ENABLE:
1428 if (device->intr_type == BNA_INTR_T_MSIX)
1429 bna_mbox_msix_idx_set(device);
1430 bfa_nw_ioc_enable(&device->ioc);
1431 bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
1432 break;
1433
1434 case DEVICE_E_DISABLE:
1435 bfa_fsm_set_state(device, bna_device_sm_stopped);
1436 break;
1437
1438 case DEVICE_E_IOC_RESET:
1439 enable_mbox_intr(device);
1440 break;
1441
1442 case DEVICE_E_IOC_FAILED:
1443 bfa_fsm_set_state(device, bna_device_sm_failed);
1444 break;
1445
1446 default:
1447 bfa_sm_fault(device->bna, event);
1448 }
1449}
1450
1451static void
1452bna_device_sm_ioc_ready_wait_entry(struct bna_device *device)
1453{
1454 /**
1455 * Do not call bfa_ioc_enable() here. It must be called in the
1456 * previous state due to failed -> ioc_ready_wait transition.
1457 */
1458}
1459
1460static void
1461bna_device_sm_ioc_ready_wait(struct bna_device *device,
1462 enum bna_device_event event)
1463{
1464 switch (event) {
1465 case DEVICE_E_DISABLE:
1466 if (device->ready_cbfn)
1467 device->ready_cbfn(device->ready_cbarg,
1468 BNA_CB_INTERRUPT);
1469 device->ready_cbfn = NULL;
1470 device->ready_cbarg = NULL;
1471 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1472 break;
1473
1474 case DEVICE_E_IOC_READY:
1475 bfa_fsm_set_state(device, bna_device_sm_ready);
1476 break;
1477
1478 case DEVICE_E_IOC_FAILED:
1479 bfa_fsm_set_state(device, bna_device_sm_failed);
1480 break;
1481
1482 case DEVICE_E_IOC_RESET:
1483 enable_mbox_intr(device);
1484 break;
1485
1486 default:
1487 bfa_sm_fault(device->bna, event);
1488 }
1489}
1490
1491static void
1492bna_device_sm_ready_entry(struct bna_device *device)
1493{
1494 bna_mbox_mod_start(&device->bna->mbox_mod);
1495 bna_port_start(&device->bna->port);
1496
1497 if (device->ready_cbfn)
1498 device->ready_cbfn(device->ready_cbarg,
1499 BNA_CB_SUCCESS);
1500 device->ready_cbfn = NULL;
1501 device->ready_cbarg = NULL;
1502}
1503
1504static void
1505bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
1506{
1507 switch (event) {
1508 case DEVICE_E_DISABLE:
1509 bfa_fsm_set_state(device, bna_device_sm_port_stop_wait);
1510 break;
1511
1512 case DEVICE_E_IOC_FAILED:
1513 bfa_fsm_set_state(device, bna_device_sm_failed);
1514 break;
1515
1516 default:
1517 bfa_sm_fault(device->bna, event);
1518 }
1519}
1520
1521static void
1522bna_device_sm_port_stop_wait_entry(struct bna_device *device)
1523{
1524 bna_port_stop(&device->bna->port);
1525}
1526
1527static void
1528bna_device_sm_port_stop_wait(struct bna_device *device,
1529 enum bna_device_event event)
1530{
1531 switch (event) {
1532 case DEVICE_E_PORT_STOPPED:
1533 bna_mbox_mod_stop(&device->bna->mbox_mod);
1534 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1535 break;
1536
1537 case DEVICE_E_IOC_FAILED:
1538 disable_mbox_intr(device);
1539 bna_port_fail(&device->bna->port);
1540 break;
1541
1542 default:
1543 bfa_sm_fault(device->bna, event);
1544 }
1545}
1546
1547static void
1548bna_device_sm_ioc_disable_wait_entry(struct bna_device *device)
1549{
1550 bfa_nw_ioc_disable(&device->ioc);
1551}
1552
1553static void
1554bna_device_sm_ioc_disable_wait(struct bna_device *device,
1555 enum bna_device_event event)
1556{
1557 switch (event) {
1558 case DEVICE_E_IOC_DISABLED:
1559 disable_mbox_intr(device);
1560 bfa_fsm_set_state(device, bna_device_sm_stopped);
1561 break;
1562
1563 default:
1564 bfa_sm_fault(device->bna, event);
1565 }
1566}
1567
1568static void
1569bna_device_sm_failed_entry(struct bna_device *device)
1570{
1571 disable_mbox_intr(device);
1572 bna_port_fail(&device->bna->port);
1573 bna_mbox_mod_stop(&device->bna->mbox_mod);
1574
1575 if (device->ready_cbfn)
1576 device->ready_cbfn(device->ready_cbarg,
1577 BNA_CB_FAIL);
1578 device->ready_cbfn = NULL;
1579 device->ready_cbarg = NULL;
1580}
1581
1582static void
1583bna_device_sm_failed(struct bna_device *device,
1584 enum bna_device_event event)
1585{
1586 switch (event) {
1587 case DEVICE_E_DISABLE:
1588 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait);
1589 break;
1590
1591 case DEVICE_E_IOC_RESET:
1592 enable_mbox_intr(device);
1593 bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait);
1594 break;
1595
1596 default:
1597 bfa_sm_fault(device->bna, event);
1598 }
1599}
1600
1601/* IOC callback functions */
1602
1603static void
1604bna_device_cb_iocll_ready(void *dev, enum bfa_status error)
1605{
1606 struct bna_device *device = (struct bna_device *)dev;
1607
1608 if (error)
1609 bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
1610 else
1611 bfa_fsm_send_event(device, DEVICE_E_IOC_READY);
1612}
1613
1614static void
1615bna_device_cb_iocll_disabled(void *dev)
1616{
1617 struct bna_device *device = (struct bna_device *)dev;
1618
1619 bfa_fsm_send_event(device, DEVICE_E_IOC_DISABLED);
1620}
1621
1622static void
1623bna_device_cb_iocll_failed(void *dev)
1624{
1625 struct bna_device *device = (struct bna_device *)dev;
1626
1627 bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED);
1628}
1629
1630static void
1631bna_device_cb_iocll_reset(void *dev)
1632{
1633 struct bna_device *device = (struct bna_device *)dev;
1634
1635 bfa_fsm_send_event(device, DEVICE_E_IOC_RESET);
1636}
1637
1638static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
1639 bna_device_cb_iocll_ready,
1640 bna_device_cb_iocll_disabled,
1641 bna_device_cb_iocll_failed,
1642 bna_device_cb_iocll_reset
1643};
1644
1645void
1646bna_device_init(struct bna_device *device, struct bna *bna,
1647 struct bna_res_info *res_info)
1648{
1649 u64 dma;
1650
1651 device->bna = bna;
1652
1653 /**
1654 * Attach IOC and claim:
1655 * 1. DMA memory for IOC attributes
1656 * 2. Kernel memory for FW trace
1657 */
1658 bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn);
1659 bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL);
1660
1661 BNA_GET_DMA_ADDR(
1662 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1663 bfa_nw_ioc_mem_claim(&device->ioc,
1664 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva,
1665 dma);
1666
1667 bna_adv_device_init(device, bna, res_info);
1668 /*
1669 * Initialize mbox_mod only after IOC, so that mbox handler
1670 * registration goes through
1671 */
1672 device->intr_type =
1673 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type;
1674 device->vector =
1675 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.idl[0].vector;
1676 bna_mbox_mod_init(&bna->mbox_mod, bna);
1677
1678 device->ready_cbfn = device->stop_cbfn = NULL;
1679 device->ready_cbarg = device->stop_cbarg = NULL;
1680
1681 bfa_fsm_set_state(device, bna_device_sm_stopped);
1682}
1683
1684void
1685bna_device_uninit(struct bna_device *device)
1686{
1687 bna_mbox_mod_uninit(&device->bna->mbox_mod);
1688
1689 bfa_nw_ioc_detach(&device->ioc);
1690
1691 device->bna = NULL;
1692}
1693
1694void
1695bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
1696{
1697 struct bna_device *device = (struct bna_device *)arg;
1698
1699 bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
1700}
1701
1702int
1703bna_device_status_get(struct bna_device *device)
1704{
1705 return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
1706}
1707
1708void
1709bna_device_enable(struct bna_device *device)
1710{
1711 if (device->fsm != (bfa_fsm_t)bna_device_sm_stopped) {
1712 bnad_cb_device_enabled(device->bna->bnad, BNA_CB_BUSY);
1713 return;
1714 }
1715
1716 device->ready_cbfn = bnad_cb_device_enabled;
1717 device->ready_cbarg = device->bna->bnad;
1718
1719 bfa_fsm_send_event(device, DEVICE_E_ENABLE);
1720}
1721
1722void
1723bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
1724{
1725 if (type == BNA_SOFT_CLEANUP) {
1726 bnad_cb_device_disabled(device->bna->bnad, BNA_CB_SUCCESS);
1727 return;
1728 }
1729
1730 device->stop_cbfn = bnad_cb_device_disabled;
1731 device->stop_cbarg = device->bna->bnad;
1732
1733 bfa_fsm_send_event(device, DEVICE_E_DISABLE);
1734}
1735
1736int
1737bna_device_state_get(struct bna_device *device)
1738{
1739 return bfa_sm_to_state(device_sm_table, device->fsm);
1740}
1741
1742u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1743 {12, 20},
1744 {10, 18},
1745 {8, 16},
1746 {6, 12},
1747 {4, 8},
1748 {3, 6},
1749 {2, 4},
1750 {1, 2},
1751};
1752
1753u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1754 {12, 12},
1755 {6, 10},
1756 {5, 10},
1757 {4, 8},
1758 {3, 6},
1759 {3, 6},
1760 {2, 4},
1761 {1, 2},
1762};
1763
1764/* device */
1765void
1766bna_adv_device_init(struct bna_device *device, struct bna *bna,
1767 struct bna_res_info *res_info)
1768{
1769 u8 *kva;
1770 u64 dma;
1771
1772 device->bna = bna;
1773
1774 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1775
1776 /**
1777 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1778 * DMA memory.
1779 */
1780 BNA_GET_DMA_ADDR(
1781 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1782 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1783
1784 bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
1785 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1786 kva += bfa_nw_cee_meminfo();
1787 dma += bfa_nw_cee_meminfo();
1788
1789}
1790
1791/* utils */
1792
1793void
1794bna_adv_res_req(struct bna_res_info *res_info)
1795{
1796 /* DMA memory for COMMON_MODULE */
1797 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1798 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1799 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1800 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1801 bfa_nw_cee_meminfo(), PAGE_SIZE);
1802
1803 /* Virtual memory for retreiving fw_trc */
1804 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1805 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1806 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
1807 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
1808
1809 /* DMA memory for retreiving stats */
1810 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1811 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1812 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1813 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1814 ALIGN(BFI_HW_STATS_SIZE, PAGE_SIZE);
1815
1816 /* Virtual memory for soft stats */
1817 res_info[BNA_RES_MEM_T_SWSTATS].res_type = BNA_RES_T_MEM;
1818 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1819 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.num = 1;
1820 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.len =
1821 sizeof(struct bna_sw_stats);
1822}
1823
1824static void
1825bna_sw_stats_get(struct bna *bna, struct bna_sw_stats *sw_stats)
1826{
1827 struct bna_tx *tx;
1828 struct bna_txq *txq;
1829 struct bna_rx *rx;
1830 struct bna_rxp *rxp;
1831 struct list_head *qe;
1832 struct list_head *txq_qe;
1833 struct list_head *rxp_qe;
1834 struct list_head *mac_qe;
1835 int i;
1836
1837 sw_stats->device_state = bna_device_state_get(&bna->device);
1838 sw_stats->port_state = bna_port_state_get(&bna->port);
1839 sw_stats->port_flags = bna->port.flags;
1840 sw_stats->llport_state = bna_llport_state_get(&bna->port.llport);
1841 sw_stats->priority = bna->port.priority;
1842
1843 i = 0;
1844 list_for_each(qe, &bna->tx_mod.tx_active_q) {
1845 tx = (struct bna_tx *)qe;
1846 sw_stats->tx_stats[i].tx_state = bna_tx_state_get(tx);
1847 sw_stats->tx_stats[i].tx_flags = tx->flags;
1848
1849 sw_stats->tx_stats[i].num_txqs = 0;
1850 sw_stats->tx_stats[i].txq_bmap[0] = 0;
1851 sw_stats->tx_stats[i].txq_bmap[1] = 0;
1852 list_for_each(txq_qe, &tx->txq_q) {
1853 txq = (struct bna_txq *)txq_qe;
1854 if (txq->txq_id < 32)
1855 sw_stats->tx_stats[i].txq_bmap[0] |=
1856 ((u32)1 << txq->txq_id);
1857 else
1858 sw_stats->tx_stats[i].txq_bmap[1] |=
1859 ((u32)
1860 1 << (txq->txq_id - 32));
1861 sw_stats->tx_stats[i].num_txqs++;
1862 }
1863
1864 sw_stats->tx_stats[i].txf_id = tx->txf.txf_id;
1865
1866 i++;
1867 }
1868 sw_stats->num_active_tx = i;
1869
1870 i = 0;
1871 list_for_each(qe, &bna->rx_mod.rx_active_q) {
1872 rx = (struct bna_rx *)qe;
1873 sw_stats->rx_stats[i].rx_state = bna_rx_state_get(rx);
1874 sw_stats->rx_stats[i].rx_flags = rx->rx_flags;
1875
1876 sw_stats->rx_stats[i].num_rxps = 0;
1877 sw_stats->rx_stats[i].num_rxqs = 0;
1878 sw_stats->rx_stats[i].rxq_bmap[0] = 0;
1879 sw_stats->rx_stats[i].rxq_bmap[1] = 0;
1880 sw_stats->rx_stats[i].cq_bmap[0] = 0;
1881 sw_stats->rx_stats[i].cq_bmap[1] = 0;
1882 list_for_each(rxp_qe, &rx->rxp_q) {
1883 rxp = (struct bna_rxp *)rxp_qe;
1884
1885 sw_stats->rx_stats[i].num_rxqs += 1;
1886
1887 if (rxp->type == BNA_RXP_SINGLE) {
1888 if (rxp->rxq.single.only->rxq_id < 32) {
1889 sw_stats->rx_stats[i].rxq_bmap[0] |=
1890 ((u32)1 <<
1891 rxp->rxq.single.only->rxq_id);
1892 } else {
1893 sw_stats->rx_stats[i].rxq_bmap[1] |=
1894 ((u32)1 <<
1895 (rxp->rxq.single.only->rxq_id - 32));
1896 }
1897 } else {
1898 if (rxp->rxq.slr.large->rxq_id < 32) {
1899 sw_stats->rx_stats[i].rxq_bmap[0] |=
1900 ((u32)1 <<
1901 rxp->rxq.slr.large->rxq_id);
1902 } else {
1903 sw_stats->rx_stats[i].rxq_bmap[1] |=
1904 ((u32)1 <<
1905 (rxp->rxq.slr.large->rxq_id - 32));
1906 }
1907
1908 if (rxp->rxq.slr.small->rxq_id < 32) {
1909 sw_stats->rx_stats[i].rxq_bmap[0] |=
1910 ((u32)1 <<
1911 rxp->rxq.slr.small->rxq_id);
1912 } else {
1913 sw_stats->rx_stats[i].rxq_bmap[1] |=
1914 ((u32)1 <<
1915 (rxp->rxq.slr.small->rxq_id - 32));
1916 }
1917 sw_stats->rx_stats[i].num_rxqs += 1;
1918 }
1919
1920 if (rxp->cq.cq_id < 32)
1921 sw_stats->rx_stats[i].cq_bmap[0] |=
1922 (1 << rxp->cq.cq_id);
1923 else
1924 sw_stats->rx_stats[i].cq_bmap[1] |=
1925 (1 << (rxp->cq.cq_id - 32));
1926
1927 sw_stats->rx_stats[i].num_rxps++;
1928 }
1929
1930 sw_stats->rx_stats[i].rxf_id = rx->rxf.rxf_id;
1931 sw_stats->rx_stats[i].rxf_state = bna_rxf_state_get(&rx->rxf);
1932 sw_stats->rx_stats[i].rxf_oper_state = rx->rxf.rxf_oper_state;
1933
1934 sw_stats->rx_stats[i].num_active_ucast = 0;
1935 if (rx->rxf.ucast_active_mac)
1936 sw_stats->rx_stats[i].num_active_ucast++;
1937 list_for_each(mac_qe, &rx->rxf.ucast_active_q)
1938 sw_stats->rx_stats[i].num_active_ucast++;
1939
1940 sw_stats->rx_stats[i].num_active_mcast = 0;
1941 list_for_each(mac_qe, &rx->rxf.mcast_active_q)
1942 sw_stats->rx_stats[i].num_active_mcast++;
1943
1944 sw_stats->rx_stats[i].rxmode_active = rx->rxf.rxmode_active;
1945 sw_stats->rx_stats[i].vlan_filter_status =
1946 rx->rxf.vlan_filter_status;
1947 memcpy(sw_stats->rx_stats[i].vlan_filter_table,
1948 rx->rxf.vlan_filter_table,
1949 sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32));
1950
1951 sw_stats->rx_stats[i].rss_status = rx->rxf.rss_status;
1952 sw_stats->rx_stats[i].hds_status = rx->rxf.hds_status;
1953
1954 i++;
1955 }
1956 sw_stats->num_active_rx = i;
1957}
1958
1959static void
1960bna_fw_cb_stats_get(void *arg, int status)
1961{
1962 struct bna *bna = (struct bna *)arg;
1963 u64 *p_stats;
1964 int i, count;
1965 int rxf_count, txf_count;
1966 u64 rxf_bmap, txf_bmap;
1967
1968 bfa_q_qe_init(&bna->mbox_qe.qe);
1969
1970 if (status == 0) {
1971 p_stats = (u64 *)bna->stats.hw_stats;
1972 count = sizeof(struct bfi_ll_stats) / sizeof(u64);
1973 for (i = 0; i < count; i++)
1974 p_stats[i] = cpu_to_be64(p_stats[i]);
1975
1976 rxf_count = 0;
1977 rxf_bmap = (u64)bna->stats.rxf_bmap[0] |
1978 ((u64)bna->stats.rxf_bmap[1] << 32);
1979 for (i = 0; i < BFI_LL_RXF_ID_MAX; i++)
1980 if (rxf_bmap & ((u64)1 << i))
1981 rxf_count++;
1982
1983 txf_count = 0;
1984 txf_bmap = (u64)bna->stats.txf_bmap[0] |
1985 ((u64)bna->stats.txf_bmap[1] << 32);
1986 for (i = 0; i < BFI_LL_TXF_ID_MAX; i++)
1987 if (txf_bmap & ((u64)1 << i))
1988 txf_count++;
1989
1990 p_stats = (u64 *)&bna->stats.hw_stats->rxf_stats[0] +
1991 ((rxf_count * sizeof(struct bfi_ll_stats_rxf) +
1992 txf_count * sizeof(struct bfi_ll_stats_txf))/
1993 sizeof(u64));
1994
1995 /* Populate the TXF stats from the firmware DMAed copy */
1996 for (i = (BFI_LL_TXF_ID_MAX - 1); i >= 0; i--)
1997 if (txf_bmap & ((u64)1 << i)) {
1998 p_stats -= sizeof(struct bfi_ll_stats_txf)/
1999 sizeof(u64);
2000 memcpy(&bna->stats.hw_stats->txf_stats[i],
2001 p_stats,
2002 sizeof(struct bfi_ll_stats_txf));
2003 }
2004
2005 /* Populate the RXF stats from the firmware DMAed copy */
2006 for (i = (BFI_LL_RXF_ID_MAX - 1); i >= 0; i--)
2007 if (rxf_bmap & ((u64)1 << i)) {
2008 p_stats -= sizeof(struct bfi_ll_stats_rxf)/
2009 sizeof(u64);
2010 memcpy(&bna->stats.hw_stats->rxf_stats[i],
2011 p_stats,
2012 sizeof(struct bfi_ll_stats_rxf));
2013 }
2014
2015 bna_sw_stats_get(bna, bna->stats.sw_stats);
2016 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
2017 } else
2018 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2019}
2020
2021static void
2022bna_fw_stats_get(struct bna *bna)
2023{
2024 struct bfi_ll_stats_req ll_req;
2025
2026 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0);
2027 ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
2028
2029 ll_req.rxf_id_mask[0] = htonl(bna->rx_mod.rxf_bmap[0]);
2030 ll_req.rxf_id_mask[1] = htonl(bna->rx_mod.rxf_bmap[1]);
2031 ll_req.txf_id_mask[0] = htonl(bna->tx_mod.txf_bmap[0]);
2032 ll_req.txf_id_mask[1] = htonl(bna->tx_mod.txf_bmap[1]);
2033
2034 ll_req.host_buffer.a32.addr_hi = bna->hw_stats_dma.msb;
2035 ll_req.host_buffer.a32.addr_lo = bna->hw_stats_dma.lsb;
2036
2037 bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
2038 bna_fw_cb_stats_get, bna);
2039 bna_mbox_send(bna, &bna->mbox_qe);
2040
2041 bna->stats.rxf_bmap[0] = bna->rx_mod.rxf_bmap[0];
2042 bna->stats.rxf_bmap[1] = bna->rx_mod.rxf_bmap[1];
2043 bna->stats.txf_bmap[0] = bna->tx_mod.txf_bmap[0];
2044 bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
2045}
2046
2047static void
2048bna_fw_cb_stats_clr(void *arg, int status)
2049{
2050 struct bna *bna = (struct bna *)arg;
2051
2052 bfa_q_qe_init(&bna->mbox_qe.qe);
2053
2054 memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats));
2055 memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats));
2056
2057 bnad_cb_stats_clr(bna->bnad);
2058}
2059
2060static void
2061bna_fw_stats_clr(struct bna *bna)
2062{
2063 struct bfi_ll_stats_req ll_req;
2064
2065 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
2066 ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
2067 ll_req.rxf_id_mask[0] = htonl(0xffffffff);
2068 ll_req.rxf_id_mask[1] = htonl(0xffffffff);
2069 ll_req.txf_id_mask[0] = htonl(0xffffffff);
2070 ll_req.txf_id_mask[1] = htonl(0xffffffff);
2071
2072 bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
2073 bna_fw_cb_stats_clr, bna);
2074 bna_mbox_send(bna, &bna->mbox_qe);
2075}
2076
2077void
2078bna_stats_get(struct bna *bna)
2079{
2080 if (bna_device_status_get(&bna->device))
2081 bna_fw_stats_get(bna);
2082 else
2083 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2084}
2085
2086void
2087bna_stats_clr(struct bna *bna)
2088{
2089 if (bna_device_status_get(&bna->device))
2090 bna_fw_stats_clr(bna);
2091 else {
2092 memset(&bna->stats.sw_stats, 0,
2093 sizeof(struct bna_sw_stats));
2094 memset(bna->stats.hw_stats, 0,
2095 sizeof(struct bfi_ll_stats));
2096 bnad_cb_stats_clr(bna->bnad);
2097 }
2098}
2099
2100/* IB */
2101void
2102bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
2103{
2104 ib->ib_config.coalescing_timeo = coalescing_timeo;
2105
2106 if (ib->start_count)
2107 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
2108 (u32)ib->ib_config.coalescing_timeo, 0);
2109}
2110
2111/* RxF */
2112void
2113bna_rxf_adv_init(struct bna_rxf *rxf,
2114 struct bna_rx *rx,
2115 struct bna_rx_config *q_config)
2116{
2117 switch (q_config->rxp_type) {
2118 case BNA_RXP_SINGLE:
2119 /* No-op */
2120 break;
2121 case BNA_RXP_SLR:
2122 rxf->ctrl_flags |= BNA_RXF_CF_SM_LG_RXQ;
2123 break;
2124 case BNA_RXP_HDS:
2125 rxf->hds_cfg.hdr_type = q_config->hds_config.hdr_type;
2126 rxf->hds_cfg.header_size =
2127 q_config->hds_config.header_size;
2128 rxf->forced_offset = 0;
2129 break;
2130 default:
2131 break;
2132 }
2133
2134 if (q_config->rss_status == BNA_STATUS_T_ENABLED) {
2135 rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
2136 rxf->rss_cfg.hash_type = q_config->rss_config.hash_type;
2137 rxf->rss_cfg.hash_mask = q_config->rss_config.hash_mask;
2138 memcpy(&rxf->rss_cfg.toeplitz_hash_key[0],
2139 &q_config->rss_config.toeplitz_hash_key[0],
2140 sizeof(rxf->rss_cfg.toeplitz_hash_key));
2141 }
2142}
2143
2144static void
2145rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
2146{
2147 struct bfi_ll_rxf_req req;
2148
2149 bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
2150
2151 req.rxf_id = rxf->rxf_id;
2152 req.enable = status;
2153
2154 bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
2155 rxf_cb_cam_fltr_mbox_cmd, rxf);
2156
2157 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
2158}
2159
2160void
2161__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
2162{
2163 struct bna_rx_fndb_ram *rx_fndb_ram;
2164 u32 ctrl_flags;
2165 int i;
2166
2167 rx_fndb_ram = (struct bna_rx_fndb_ram *)
2168 BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva,
2169 RX_FNDB_RAM_BASE_OFFSET);
2170
2171 for (i = 0; i < BFI_MAX_RXF; i++) {
2172 if (status == BNA_STATUS_T_ENABLED) {
2173 if (i == rxf->rxf_id)
2174 continue;
2175
2176 ctrl_flags =
2177 readl(&rx_fndb_ram[i].control_flags);
2178 ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
2179 writel(ctrl_flags,
2180 &rx_fndb_ram[i].control_flags);
2181 } else {
2182 ctrl_flags =
2183 readl(&rx_fndb_ram[i].control_flags);
2184 ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
2185 writel(ctrl_flags,
2186 &rx_fndb_ram[i].control_flags);
2187 }
2188 }
2189}
2190
2191int
2192rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
2193{
2194 struct bna_mac *mac = NULL;
2195 struct list_head *qe;
2196
2197 /* Add additional MAC entries */
2198 if (!list_empty(&rxf->ucast_pending_add_q)) {
2199 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
2200 bfa_q_qe_init(qe);
2201 mac = (struct bna_mac *)qe;
2202 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_ADD_REQ, mac);
2203 list_add_tail(&mac->qe, &rxf->ucast_active_q);
2204 return 1;
2205 }
2206
2207 /* Delete MAC addresses previousely added */
2208 if (!list_empty(&rxf->ucast_pending_del_q)) {
2209 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2210 bfa_q_qe_init(qe);
2211 mac = (struct bna_mac *)qe;
2212 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2213 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2214 return 1;
2215 }
2216
2217 return 0;
2218}
2219
2220int
2221rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
2222{
2223 struct bna *bna = rxf->rx->bna;
2224
2225 /* Enable/disable promiscuous mode */
2226 if (is_promisc_enable(rxf->rxmode_pending,
2227 rxf->rxmode_pending_bitmask)) {
2228 /* move promisc configuration from pending -> active */
2229 promisc_inactive(rxf->rxmode_pending,
2230 rxf->rxmode_pending_bitmask);
2231 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
2232
2233 /* Disable VLAN filter to allow all VLANs */
2234 __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
2235 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2236 BNA_STATUS_T_ENABLED);
2237 return 1;
2238 } else if (is_promisc_disable(rxf->rxmode_pending,
2239 rxf->rxmode_pending_bitmask)) {
2240 /* move promisc configuration from pending -> active */
2241 promisc_inactive(rxf->rxmode_pending,
2242 rxf->rxmode_pending_bitmask);
2243 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2244 bna->rxf_promisc_id = BFI_MAX_RXF;
2245
2246 /* Revert VLAN filter */
2247 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2248 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2249 BNA_STATUS_T_DISABLED);
2250 return 1;
2251 }
2252
2253 return 0;
2254}
2255
2256int
2257rxf_process_packet_filter_default(struct bna_rxf *rxf)
2258{
2259 struct bna *bna = rxf->rx->bna;
2260
2261 /* Enable/disable default mode */
2262 if (is_default_enable(rxf->rxmode_pending,
2263 rxf->rxmode_pending_bitmask)) {
2264 /* move default configuration from pending -> active */
2265 default_inactive(rxf->rxmode_pending,
2266 rxf->rxmode_pending_bitmask);
2267 rxf->rxmode_active |= BNA_RXMODE_DEFAULT;
2268
2269 /* Disable VLAN filter to allow all VLANs */
2270 __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
2271 /* Redirect all other RxF vlan filtering to this one */
2272 __rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED);
2273 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2274 BNA_STATUS_T_ENABLED);
2275 return 1;
2276 } else if (is_default_disable(rxf->rxmode_pending,
2277 rxf->rxmode_pending_bitmask)) {
2278 /* move default configuration from pending -> active */
2279 default_inactive(rxf->rxmode_pending,
2280 rxf->rxmode_pending_bitmask);
2281 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2282 bna->rxf_default_id = BFI_MAX_RXF;
2283
2284 /* Revert VLAN filter */
2285 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2286 /* Stop RxF vlan filter table redirection */
2287 __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
2288 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2289 BNA_STATUS_T_DISABLED);
2290 return 1;
2291 }
2292
2293 return 0;
2294}
2295
2296int
2297rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
2298{
2299 /* Enable/disable allmulti mode */
2300 if (is_allmulti_enable(rxf->rxmode_pending,
2301 rxf->rxmode_pending_bitmask)) {
2302 /* move allmulti configuration from pending -> active */
2303 allmulti_inactive(rxf->rxmode_pending,
2304 rxf->rxmode_pending_bitmask);
2305 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
2306
2307 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2308 BNA_STATUS_T_ENABLED);
2309 return 1;
2310 } else if (is_allmulti_disable(rxf->rxmode_pending,
2311 rxf->rxmode_pending_bitmask)) {
2312 /* move allmulti configuration from pending -> active */
2313 allmulti_inactive(rxf->rxmode_pending,
2314 rxf->rxmode_pending_bitmask);
2315 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2316
2317 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2318 BNA_STATUS_T_DISABLED);
2319 return 1;
2320 }
2321
2322 return 0;
2323}
2324
2325int
2326rxf_clear_packet_filter_ucast(struct bna_rxf *rxf)
2327{
2328 struct bna_mac *mac = NULL;
2329 struct list_head *qe;
2330
2331 /* 1. delete pending ucast entries */
2332 if (!list_empty(&rxf->ucast_pending_del_q)) {
2333 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2334 bfa_q_qe_init(qe);
2335 mac = (struct bna_mac *)qe;
2336 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2337 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2338 return 1;
2339 }
2340
2341 /* 2. clear active ucast entries; move them to pending_add_q */
2342 if (!list_empty(&rxf->ucast_active_q)) {
2343 bfa_q_deq(&rxf->ucast_active_q, &qe);
2344 bfa_q_qe_init(qe);
2345 mac = (struct bna_mac *)qe;
2346 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac);
2347 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
2348 return 1;
2349 }
2350
2351 return 0;
2352}
2353
2354int
2355rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
2356{
2357 struct bna *bna = rxf->rx->bna;
2358
2359 /* 6. Execute pending promisc mode disable command */
2360 if (is_promisc_disable(rxf->rxmode_pending,
2361 rxf->rxmode_pending_bitmask)) {
2362 /* move promisc configuration from pending -> active */
2363 promisc_inactive(rxf->rxmode_pending,
2364 rxf->rxmode_pending_bitmask);
2365 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2366 bna->rxf_promisc_id = BFI_MAX_RXF;
2367
2368 /* Revert VLAN filter */
2369 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2370 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2371 BNA_STATUS_T_DISABLED);
2372 return 1;
2373 }
2374
2375 /* 7. Clear active promisc mode; move it to pending enable */
2376 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2377 /* move promisc configuration from active -> pending */
2378 promisc_enable(rxf->rxmode_pending,
2379 rxf->rxmode_pending_bitmask);
2380 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2381
2382 /* Revert VLAN filter */
2383 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2384 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ,
2385 BNA_STATUS_T_DISABLED);
2386 return 1;
2387 }
2388
2389 return 0;
2390}
2391
2392int
2393rxf_clear_packet_filter_default(struct bna_rxf *rxf)
2394{
2395 struct bna *bna = rxf->rx->bna;
2396
2397 /* 8. Execute pending default mode disable command */
2398 if (is_default_disable(rxf->rxmode_pending,
2399 rxf->rxmode_pending_bitmask)) {
2400 /* move default configuration from pending -> active */
2401 default_inactive(rxf->rxmode_pending,
2402 rxf->rxmode_pending_bitmask);
2403 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2404 bna->rxf_default_id = BFI_MAX_RXF;
2405
2406 /* Revert VLAN filter */
2407 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2408 /* Stop RxF vlan filter table redirection */
2409 __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
2410 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2411 BNA_STATUS_T_DISABLED);
2412 return 1;
2413 }
2414
2415 /* 9. Clear active default mode; move it to pending enable */
2416 if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
2417 /* move default configuration from active -> pending */
2418 default_enable(rxf->rxmode_pending,
2419 rxf->rxmode_pending_bitmask);
2420 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2421
2422 /* Revert VLAN filter */
2423 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2424 /* Stop RxF vlan filter table redirection */
2425 __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
2426 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2427 BNA_STATUS_T_DISABLED);
2428 return 1;
2429 }
2430
2431 return 0;
2432}
2433
2434int
2435rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
2436{
2437 /* 10. Execute pending allmulti mode disable command */
2438 if (is_allmulti_disable(rxf->rxmode_pending,
2439 rxf->rxmode_pending_bitmask)) {
2440 /* move allmulti configuration from pending -> active */
2441 allmulti_inactive(rxf->rxmode_pending,
2442 rxf->rxmode_pending_bitmask);
2443 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2444 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2445 BNA_STATUS_T_DISABLED);
2446 return 1;
2447 }
2448
2449 /* 11. Clear active allmulti mode; move it to pending enable */
2450 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2451 /* move allmulti configuration from active -> pending */
2452 allmulti_enable(rxf->rxmode_pending,
2453 rxf->rxmode_pending_bitmask);
2454 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2455 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ,
2456 BNA_STATUS_T_DISABLED);
2457 return 1;
2458 }
2459
2460 return 0;
2461}
2462
2463void
2464rxf_reset_packet_filter_ucast(struct bna_rxf *rxf)
2465{
2466 struct list_head *qe;
2467 struct bna_mac *mac;
2468
2469 /* 1. Move active ucast entries to pending_add_q */
2470 while (!list_empty(&rxf->ucast_active_q)) {
2471 bfa_q_deq(&rxf->ucast_active_q, &qe);
2472 bfa_q_qe_init(qe);
2473 list_add_tail(qe, &rxf->ucast_pending_add_q);
2474 }
2475
2476 /* 2. Throw away delete pending ucast entries */
2477 while (!list_empty(&rxf->ucast_pending_del_q)) {
2478 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
2479 bfa_q_qe_init(qe);
2480 mac = (struct bna_mac *)qe;
2481 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2482 }
2483}
2484
2485void
2486rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
2487{
2488 struct bna *bna = rxf->rx->bna;
2489
2490 /* 6. Clear pending promisc mode disable */
2491 if (is_promisc_disable(rxf->rxmode_pending,
2492 rxf->rxmode_pending_bitmask)) {
2493 promisc_inactive(rxf->rxmode_pending,
2494 rxf->rxmode_pending_bitmask);
2495 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2496 bna->rxf_promisc_id = BFI_MAX_RXF;
2497 }
2498
2499 /* 7. Move promisc mode config from active -> pending */
2500 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2501 promisc_enable(rxf->rxmode_pending,
2502 rxf->rxmode_pending_bitmask);
2503 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
2504 }
2505
2506}
2507
2508void
2509rxf_reset_packet_filter_default(struct bna_rxf *rxf)
2510{
2511 struct bna *bna = rxf->rx->bna;
2512
2513 /* 8. Clear pending default mode disable */
2514 if (is_default_disable(rxf->rxmode_pending,
2515 rxf->rxmode_pending_bitmask)) {
2516 default_inactive(rxf->rxmode_pending,
2517 rxf->rxmode_pending_bitmask);
2518 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2519 bna->rxf_default_id = BFI_MAX_RXF;
2520 }
2521
2522 /* 9. Move default mode config from active -> pending */
2523 if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
2524 default_enable(rxf->rxmode_pending,
2525 rxf->rxmode_pending_bitmask);
2526 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2527 }
2528}
2529
2530void
2531rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
2532{
2533 /* 10. Clear pending allmulti mode disable */
2534 if (is_allmulti_disable(rxf->rxmode_pending,
2535 rxf->rxmode_pending_bitmask)) {
2536 allmulti_inactive(rxf->rxmode_pending,
2537 rxf->rxmode_pending_bitmask);
2538 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2539 }
2540
2541 /* 11. Move allmulti mode config from active -> pending */
2542 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2543 allmulti_enable(rxf->rxmode_pending,
2544 rxf->rxmode_pending_bitmask);
2545 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
2546 }
2547}
2548
2549/**
2550 * Should only be called by bna_rxf_mode_set.
2551 * Helps deciding if h/w configuration is needed or not.
2552 * Returns:
2553 * 0 = no h/w change
2554 * 1 = need h/w change
2555 */
2556int
2557rxf_promisc_enable(struct bna_rxf *rxf)
2558{
2559 struct bna *bna = rxf->rx->bna;
2560 int ret = 0;
2561
2562 /* There can not be any pending disable command */
2563
2564 /* Do nothing if pending enable or already enabled */
2565 if (is_promisc_enable(rxf->rxmode_pending,
2566 rxf->rxmode_pending_bitmask) ||
2567 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
2568 /* Schedule enable */
2569 } else {
2570 /* Promisc mode should not be active in the system */
2571 promisc_enable(rxf->rxmode_pending,
2572 rxf->rxmode_pending_bitmask);
2573 bna->rxf_promisc_id = rxf->rxf_id;
2574 ret = 1;
2575 }
2576
2577 return ret;
2578}
2579
2580/**
2581 * Should only be called by bna_rxf_mode_set.
2582 * Helps deciding if h/w configuration is needed or not.
2583 * Returns:
2584 * 0 = no h/w change
2585 * 1 = need h/w change
2586 */
2587int
2588rxf_promisc_disable(struct bna_rxf *rxf)
2589{
2590 struct bna *bna = rxf->rx->bna;
2591 int ret = 0;
2592
2593 /* There can not be any pending disable */
2594
2595 /* Turn off pending enable command , if any */
2596 if (is_promisc_enable(rxf->rxmode_pending,
2597 rxf->rxmode_pending_bitmask)) {
2598 /* Promisc mode should not be active */
2599 /* system promisc state should be pending */
2600 promisc_inactive(rxf->rxmode_pending,
2601 rxf->rxmode_pending_bitmask);
2602 /* Remove the promisc state from the system */
2603 bna->rxf_promisc_id = BFI_MAX_RXF;
2604
2605 /* Schedule disable */
2606 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
2607 /* Promisc mode should be active in the system */
2608 promisc_disable(rxf->rxmode_pending,
2609 rxf->rxmode_pending_bitmask);
2610 ret = 1;
2611
2612 /* Do nothing if already disabled */
2613 } else {
2614 }
2615
2616 return ret;
2617}
2618
2619/**
2620 * Should only be called by bna_rxf_mode_set.
2621 * Helps deciding if h/w configuration is needed or not.
2622 * Returns:
2623 * 0 = no h/w change
2624 * 1 = need h/w change
2625 */
2626int
2627rxf_default_enable(struct bna_rxf *rxf)
2628{
2629 struct bna *bna = rxf->rx->bna;
2630 int ret = 0;
2631
2632 /* There can not be any pending disable command */
2633
2634 /* Do nothing if pending enable or already enabled */
2635 if (is_default_enable(rxf->rxmode_pending,
2636 rxf->rxmode_pending_bitmask) ||
2637 (rxf->rxmode_active & BNA_RXMODE_DEFAULT)) {
2638 /* Schedule enable */
2639 } else {
2640 /* Default mode should not be active in the system */
2641 default_enable(rxf->rxmode_pending,
2642 rxf->rxmode_pending_bitmask);
2643 bna->rxf_default_id = rxf->rxf_id;
2644 ret = 1;
2645 }
2646
2647 return ret;
2648}
2649
2650/**
2651 * Should only be called by bna_rxf_mode_set.
2652 * Helps deciding if h/w configuration is needed or not.
2653 * Returns:
2654 * 0 = no h/w change
2655 * 1 = need h/w change
2656 */
2657int
2658rxf_default_disable(struct bna_rxf *rxf)
2659{
2660 struct bna *bna = rxf->rx->bna;
2661 int ret = 0;
2662
2663 /* There can not be any pending disable */
2664
2665 /* Turn off pending enable command , if any */
2666 if (is_default_enable(rxf->rxmode_pending,
2667 rxf->rxmode_pending_bitmask)) {
2668 /* Promisc mode should not be active */
2669 /* system default state should be pending */
2670 default_inactive(rxf->rxmode_pending,
2671 rxf->rxmode_pending_bitmask);
2672 /* Remove the default state from the system */
2673 bna->rxf_default_id = BFI_MAX_RXF;
2674
2675 /* Schedule disable */
2676 } else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
2677 /* Default mode should be active in the system */
2678 default_disable(rxf->rxmode_pending,
2679 rxf->rxmode_pending_bitmask);
2680 ret = 1;
2681
2682 /* Do nothing if already disabled */
2683 } else {
2684 }
2685
2686 return ret;
2687}
2688
2689/**
2690 * Should only be called by bna_rxf_mode_set.
2691 * Helps deciding if h/w configuration is needed or not.
2692 * Returns:
2693 * 0 = no h/w change
2694 * 1 = need h/w change
2695 */
2696int
2697rxf_allmulti_enable(struct bna_rxf *rxf)
2698{
2699 int ret = 0;
2700
2701 /* There can not be any pending disable command */
2702
2703 /* Do nothing if pending enable or already enabled */
2704 if (is_allmulti_enable(rxf->rxmode_pending,
2705 rxf->rxmode_pending_bitmask) ||
2706 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
2707 /* Schedule enable */
2708 } else {
2709 allmulti_enable(rxf->rxmode_pending,
2710 rxf->rxmode_pending_bitmask);
2711 ret = 1;
2712 }
2713
2714 return ret;
2715}
2716
2717/**
2718 * Should only be called by bna_rxf_mode_set.
2719 * Helps deciding if h/w configuration is needed or not.
2720 * Returns:
2721 * 0 = no h/w change
2722 * 1 = need h/w change
2723 */
2724int
2725rxf_allmulti_disable(struct bna_rxf *rxf)
2726{
2727 int ret = 0;
2728
2729 /* There can not be any pending disable */
2730
2731 /* Turn off pending enable command , if any */
2732 if (is_allmulti_enable(rxf->rxmode_pending,
2733 rxf->rxmode_pending_bitmask)) {
2734 /* Allmulti mode should not be active */
2735 allmulti_inactive(rxf->rxmode_pending,
2736 rxf->rxmode_pending_bitmask);
2737
2738 /* Schedule disable */
2739 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
2740 allmulti_disable(rxf->rxmode_pending,
2741 rxf->rxmode_pending_bitmask);
2742 ret = 1;
2743 }
2744
2745 return ret;
2746}
2747
2748/* RxF <- bnad */
2749void
2750bna_rx_mcast_delall(struct bna_rx *rx,
2751 void (*cbfn)(struct bnad *, struct bna_rx *,
2752 enum bna_cb_status))
2753{
2754 struct bna_rxf *rxf = &rx->rxf;
2755 struct list_head *qe;
2756 struct bna_mac *mac;
2757 int need_hw_config = 0;
2758
2759 /* Purge all entries from pending_add_q */
2760 while (!list_empty(&rxf->mcast_pending_add_q)) {
2761 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
2762 mac = (struct bna_mac *)qe;
2763 bfa_q_qe_init(&mac->qe);
2764 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
2765 }
2766
2767 /* Schedule all entries in active_q for deletion */
2768 while (!list_empty(&rxf->mcast_active_q)) {
2769 bfa_q_deq(&rxf->mcast_active_q, &qe);
2770 mac = (struct bna_mac *)qe;
2771 bfa_q_qe_init(&mac->qe);
2772 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
2773 need_hw_config = 1;
2774 }
2775
2776 if (need_hw_config) {
2777 rxf->cam_fltr_cbfn = cbfn;
2778 rxf->cam_fltr_cbarg = rx->bna->bnad;
2779 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2780 return;
2781 }
2782
2783 if (cbfn)
2784 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2785}
2786
2787/* RxF <- Rx */
2788void
2789bna_rx_receive_resume(struct bna_rx *rx,
2790 void (*cbfn)(struct bnad *, struct bna_rx *,
2791 enum bna_cb_status))
2792{
2793 struct bna_rxf *rxf = &rx->rxf;
2794
2795 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) {
2796 rxf->oper_state_cbfn = cbfn;
2797 rxf->oper_state_cbarg = rx->bna->bnad;
2798 bfa_fsm_send_event(rxf, RXF_E_RESUME);
2799 } else if (cbfn)
2800 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2801}
2802
2803void
2804bna_rx_receive_pause(struct bna_rx *rx,
2805 void (*cbfn)(struct bnad *, struct bna_rx *,
2806 enum bna_cb_status))
2807{
2808 struct bna_rxf *rxf = &rx->rxf;
2809
2810 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) {
2811 rxf->oper_state_cbfn = cbfn;
2812 rxf->oper_state_cbarg = rx->bna->bnad;
2813 bfa_fsm_send_event(rxf, RXF_E_PAUSE);
2814 } else if (cbfn)
2815 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2816}
2817
2818/* RxF <- bnad */
2819enum bna_cb_status
2820bna_rx_ucast_add(struct bna_rx *rx, u8 *addr,
2821 void (*cbfn)(struct bnad *, struct bna_rx *,
2822 enum bna_cb_status))
2823{
2824 struct bna_rxf *rxf = &rx->rxf;
2825 struct list_head *qe;
2826 struct bna_mac *mac;
2827
2828 /* Check if already added */
2829 list_for_each(qe, &rxf->ucast_active_q) {
2830 mac = (struct bna_mac *)qe;
2831 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2832 if (cbfn)
2833 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2834 return BNA_CB_SUCCESS;
2835 }
2836 }
2837
2838 /* Check if pending addition */
2839 list_for_each(qe, &rxf->ucast_pending_add_q) {
2840 mac = (struct bna_mac *)qe;
2841 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2842 if (cbfn)
2843 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2844 return BNA_CB_SUCCESS;
2845 }
2846 }
2847
2848 mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
2849 if (mac == NULL)
2850 return BNA_CB_UCAST_CAM_FULL;
2851 bfa_q_qe_init(&mac->qe);
2852 memcpy(mac->addr, addr, ETH_ALEN);
2853 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
2854
2855 rxf->cam_fltr_cbfn = cbfn;
2856 rxf->cam_fltr_cbarg = rx->bna->bnad;
2857
2858 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2859
2860 return BNA_CB_SUCCESS;
2861}
2862
2863/* RxF <- bnad */
2864enum bna_cb_status
2865bna_rx_ucast_del(struct bna_rx *rx, u8 *addr,
2866 void (*cbfn)(struct bnad *, struct bna_rx *,
2867 enum bna_cb_status))
2868{
2869 struct bna_rxf *rxf = &rx->rxf;
2870 struct list_head *qe;
2871 struct bna_mac *mac;
2872
2873 list_for_each(qe, &rxf->ucast_pending_add_q) {
2874 mac = (struct bna_mac *)qe;
2875 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2876 list_del(qe);
2877 bfa_q_qe_init(qe);
2878 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2879 if (cbfn)
2880 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2881 return BNA_CB_SUCCESS;
2882 }
2883 }
2884
2885 list_for_each(qe, &rxf->ucast_active_q) {
2886 mac = (struct bna_mac *)qe;
2887 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2888 list_del(qe);
2889 bfa_q_qe_init(qe);
2890 list_add_tail(qe, &rxf->ucast_pending_del_q);
2891 rxf->cam_fltr_cbfn = cbfn;
2892 rxf->cam_fltr_cbarg = rx->bna->bnad;
2893 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2894 return BNA_CB_SUCCESS;
2895 }
2896 }
2897
2898 return BNA_CB_INVALID_MAC;
2899}
2900
2901/* RxF <- bnad */
2902enum bna_cb_status
2903bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2904 enum bna_rxmode bitmask,
2905 void (*cbfn)(struct bnad *, struct bna_rx *,
2906 enum bna_cb_status))
2907{
2908 struct bna_rxf *rxf = &rx->rxf;
2909 int need_hw_config = 0;
2910
2911 /* Error checks */
2912
2913 if (is_promisc_enable(new_mode, bitmask)) {
2914 /* If promisc mode is already enabled elsewhere in the system */
2915 if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
2916 (rx->bna->rxf_promisc_id != rxf->rxf_id))
2917 goto err_return;
2918
2919 /* If default mode is already enabled in the system */
2920 if (rx->bna->rxf_default_id != BFI_MAX_RXF)
2921 goto err_return;
2922
2923 /* Trying to enable promiscuous and default mode together */
2924 if (is_default_enable(new_mode, bitmask))
2925 goto err_return;
2926 }
2927
2928 if (is_default_enable(new_mode, bitmask)) {
2929 /* If default mode is already enabled elsewhere in the system */
2930 if ((rx->bna->rxf_default_id != BFI_MAX_RXF) &&
2931 (rx->bna->rxf_default_id != rxf->rxf_id)) {
2932 goto err_return;
2933 }
2934
2935 /* If promiscuous mode is already enabled in the system */
2936 if (rx->bna->rxf_promisc_id != BFI_MAX_RXF)
2937 goto err_return;
2938 }
2939
2940 /* Process the commands */
2941
2942 if (is_promisc_enable(new_mode, bitmask)) {
2943 if (rxf_promisc_enable(rxf))
2944 need_hw_config = 1;
2945 } else if (is_promisc_disable(new_mode, bitmask)) {
2946 if (rxf_promisc_disable(rxf))
2947 need_hw_config = 1;
2948 }
2949
2950 if (is_default_enable(new_mode, bitmask)) {
2951 if (rxf_default_enable(rxf))
2952 need_hw_config = 1;
2953 } else if (is_default_disable(new_mode, bitmask)) {
2954 if (rxf_default_disable(rxf))
2955 need_hw_config = 1;
2956 }
2957
2958 if (is_allmulti_enable(new_mode, bitmask)) {
2959 if (rxf_allmulti_enable(rxf))
2960 need_hw_config = 1;
2961 } else if (is_allmulti_disable(new_mode, bitmask)) {
2962 if (rxf_allmulti_disable(rxf))
2963 need_hw_config = 1;
2964 }
2965
2966 /* Trigger h/w if needed */
2967
2968 if (need_hw_config) {
2969 rxf->cam_fltr_cbfn = cbfn;
2970 rxf->cam_fltr_cbarg = rx->bna->bnad;
2971 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2972 } else if (cbfn)
2973 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2974
2975 return BNA_CB_SUCCESS;
2976
2977err_return:
2978 return BNA_CB_FAIL;
2979}
2980
2981/* RxF <- bnad */
2982void
2983bna_rx_rss_enable(struct bna_rx *rx)
2984{
2985 struct bna_rxf *rxf = &rx->rxf;
2986
2987 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
2988 rxf->rss_status = BNA_STATUS_T_ENABLED;
2989 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2990}
2991
2992/* RxF <- bnad */
2993void
2994bna_rx_rss_disable(struct bna_rx *rx)
2995{
2996 struct bna_rxf *rxf = &rx->rxf;
2997
2998 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
2999 rxf->rss_status = BNA_STATUS_T_DISABLED;
3000 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3001}
3002
3003/* RxF <- bnad */
3004void
3005bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config)
3006{
3007 struct bna_rxf *rxf = &rx->rxf;
3008 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
3009 rxf->rss_status = BNA_STATUS_T_ENABLED;
3010 rxf->rss_cfg = *rss_config;
3011 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3012}
3013
3014void
3015/* RxF <- bnad */
3016bna_rx_vlanfilter_enable(struct bna_rx *rx)
3017{
3018 struct bna_rxf *rxf = &rx->rxf;
3019
3020 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
3021 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
3022 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
3023 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3024 }
3025}
3026
3027/* RxF <- bnad */
3028void
3029bna_rx_vlanfilter_disable(struct bna_rx *rx)
3030{
3031 struct bna_rxf *rxf = &rx->rxf;
3032
3033 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
3034 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
3035 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
3036 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3037 }
3038}
3039
3040/* Rx */
3041
3042struct bna_rxp *
3043bna_rx_get_rxp(struct bna_rx *rx, int vector)
3044{
3045 struct bna_rxp *rxp;
3046 struct list_head *qe;
3047
3048 list_for_each(qe, &rx->rxp_q) {
3049 rxp = (struct bna_rxp *)qe;
3050 if (rxp->vector == vector)
3051 return rxp;
3052 }
3053 return NULL;
3054}
3055
3056/*
3057 * bna_rx_rss_rit_set()
3058 * Sets the Q ids for the specified msi-x vectors in the RIT.
3059 * Maximum rit size supported is 64, which should be the max size of the
3060 * vectors array.
3061 */
3062
3063void
3064bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors)
3065{
3066 int i;
3067 struct bna_rxp *rxp;
3068 struct bna_rxq *q0 = NULL, *q1 = NULL;
3069 struct bna *bna;
3070 struct bna_rxf *rxf;
3071
3072 /* Build the RIT contents for this RX */
3073 bna = rx->bna;
3074
3075 rxf = &rx->rxf;
3076 for (i = 0; i < nvectors; i++) {
3077 rxp = bna_rx_get_rxp(rx, vectors[i]);
3078
3079 GET_RXQS(rxp, q0, q1);
3080 rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id;
3081 rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0);
3082 }
3083
3084 rxf->rit_segment->rit_size = nvectors;
3085
3086 /* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
3087}
3088
3089/* Rx <- bnad */
3090void
3091bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
3092{
3093 struct bna_rxp *rxp;
3094 struct list_head *qe;
3095
3096 list_for_each(qe, &rx->rxp_q) {
3097 rxp = (struct bna_rxp *)qe;
3098 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
3099 bna_ib_coalescing_timeo_set(rxp->cq.ib, coalescing_timeo);
3100 }
3101}
3102
3103/* Rx <- bnad */
3104void
3105bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX])
3106{
3107 int i, j;
3108
3109 for (i = 0; i < BNA_LOAD_T_MAX; i++)
3110 for (j = 0; j < BNA_BIAS_T_MAX; j++)
3111 bna->rx_mod.dim_vector[i][j] = vector[i][j];
3112}
3113
3114/* Rx <- bnad */
3115void
3116bna_rx_dim_update(struct bna_ccb *ccb)
3117{
3118 struct bna *bna = ccb->cq->rx->bna;
3119 u32 load, bias;
3120 u32 pkt_rt, small_rt, large_rt;
3121 u8 coalescing_timeo;
3122
3123 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
3124 (ccb->pkt_rate.large_pkt_cnt == 0))
3125 return;
3126
3127 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
3128
3129 small_rt = ccb->pkt_rate.small_pkt_cnt;
3130 large_rt = ccb->pkt_rate.large_pkt_cnt;
3131
3132 pkt_rt = small_rt + large_rt;
3133
3134 if (pkt_rt < BNA_PKT_RATE_10K)
3135 load = BNA_LOAD_T_LOW_4;
3136 else if (pkt_rt < BNA_PKT_RATE_20K)
3137 load = BNA_LOAD_T_LOW_3;
3138 else if (pkt_rt < BNA_PKT_RATE_30K)
3139 load = BNA_LOAD_T_LOW_2;
3140 else if (pkt_rt < BNA_PKT_RATE_40K)
3141 load = BNA_LOAD_T_LOW_1;
3142 else if (pkt_rt < BNA_PKT_RATE_50K)
3143 load = BNA_LOAD_T_HIGH_1;
3144 else if (pkt_rt < BNA_PKT_RATE_60K)
3145 load = BNA_LOAD_T_HIGH_2;
3146 else if (pkt_rt < BNA_PKT_RATE_80K)
3147 load = BNA_LOAD_T_HIGH_3;
3148 else
3149 load = BNA_LOAD_T_HIGH_4;
3150
3151 if (small_rt > (large_rt << 1))
3152 bias = 0;
3153 else
3154 bias = 1;
3155
3156 ccb->pkt_rate.small_pkt_cnt = 0;
3157 ccb->pkt_rate.large_pkt_cnt = 0;
3158
3159 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
3160 ccb->rx_coalescing_timeo = coalescing_timeo;
3161
3162 /* Set it to IB */
3163 bna_ib_coalescing_timeo_set(ccb->cq->ib, coalescing_timeo);
3164}
3165
3166/* Tx */
3167/* TX <- bnad */
3168enum bna_cb_status
3169bna_tx_prio_set(struct bna_tx *tx, int prio,
3170 void (*cbfn)(struct bnad *, struct bna_tx *,
3171 enum bna_cb_status))
3172{
3173 if (tx->flags & BNA_TX_F_PRIO_LOCK)
3174 return BNA_CB_FAIL;
3175 else {
3176 tx->prio_change_cbfn = cbfn;
3177 bna_tx_prio_changed(tx, prio);
3178 }
3179
3180 return BNA_CB_SUCCESS;
3181}
3182
3183/* TX <- bnad */
3184void
3185bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3186{
3187 struct bna_txq *txq;
3188 struct list_head *qe;
3189
3190 list_for_each(qe, &tx->txq_q) {
3191 txq = (struct bna_txq *)qe;
3192 bna_ib_coalescing_timeo_set(txq->ib, coalescing_timeo);
3193 }
3194}
3195
3196/*
3197 * Private data
3198 */
3199
3200struct bna_ritseg_pool_cfg {
3201 u32 pool_size;
3202 u32 pool_entry_size;
3203};
3204init_ritseg_pool(ritseg_pool_cfg);
3205
3206/*
3207 * Private functions
3208 */
3209static void
3210bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
3211 struct bna_res_info *res_info)
3212{
3213 int i;
3214
3215 ucam_mod->ucmac = (struct bna_mac *)
3216 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
3217
3218 INIT_LIST_HEAD(&ucam_mod->free_q);
3219 for (i = 0; i < BFI_MAX_UCMAC; i++) {
3220 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
3221 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
3222 }
3223
3224 ucam_mod->bna = bna;
3225}
3226
3227static void
3228bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
3229{
3230 struct list_head *qe;
3231 int i = 0;
3232
3233 list_for_each(qe, &ucam_mod->free_q)
3234 i++;
3235
3236 ucam_mod->bna = NULL;
3237}
3238
3239static void
3240bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
3241 struct bna_res_info *res_info)
3242{
3243 int i;
3244
3245 mcam_mod->mcmac = (struct bna_mac *)
3246 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
3247
3248 INIT_LIST_HEAD(&mcam_mod->free_q);
3249 for (i = 0; i < BFI_MAX_MCMAC; i++) {
3250 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
3251 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
3252 }
3253
3254 mcam_mod->bna = bna;
3255}
3256
3257static void
3258bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
3259{
3260 struct list_head *qe;
3261 int i = 0;
3262
3263 list_for_each(qe, &mcam_mod->free_q)
3264 i++;
3265
3266 mcam_mod->bna = NULL;
3267}
3268
3269static void
3270bna_rit_mod_init(struct bna_rit_mod *rit_mod,
3271 struct bna_res_info *res_info)
3272{
3273 int i;
3274 int j;
3275 int count;
3276 int offset;
3277
3278 rit_mod->rit = (struct bna_rit_entry *)
3279 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mdl[0].kva;
3280 rit_mod->rit_segment = (struct bna_rit_segment *)
3281 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mdl[0].kva;
3282
3283 count = 0;
3284 offset = 0;
3285 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3286 INIT_LIST_HEAD(&rit_mod->rit_seg_pool[i]);
3287 for (j = 0; j < ritseg_pool_cfg[i].pool_size; j++) {
3288 bfa_q_qe_init(&rit_mod->rit_segment[count].qe);
3289 rit_mod->rit_segment[count].max_rit_size =
3290 ritseg_pool_cfg[i].pool_entry_size;
3291 rit_mod->rit_segment[count].rit_offset = offset;
3292 rit_mod->rit_segment[count].rit =
3293 &rit_mod->rit[offset];
3294 list_add_tail(&rit_mod->rit_segment[count].qe,
3295 &rit_mod->rit_seg_pool[i]);
3296 count++;
3297 offset += ritseg_pool_cfg[i].pool_entry_size;
3298 }
3299 }
3300}
3301
3302static void
3303bna_rit_mod_uninit(struct bna_rit_mod *rit_mod)
3304{
3305 struct bna_rit_segment *rit_segment;
3306 struct list_head *qe;
3307 int i;
3308 int j;
3309
3310 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3311 j = 0;
3312 list_for_each(qe, &rit_mod->rit_seg_pool[i]) {
3313 rit_segment = (struct bna_rit_segment *)qe;
3314 j++;
3315 }
3316 }
3317}
3318
3319/*
3320 * Public functions
3321 */
3322
3323/* Called during probe(), before calling bna_init() */
3324void
3325bna_res_req(struct bna_res_info *res_info)
3326{
3327 bna_adv_res_req(res_info);
3328
3329 /* DMA memory for retrieving IOC attributes */
3330 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
3331 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
3332 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
3333 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
3334 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
3335
3336 /* DMA memory for index segment of an IB */
3337 res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3338 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
3339 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.len =
3340 BFI_IBIDX_SIZE * BFI_IBIDX_MAX_SEGSIZE;
3341 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.num = BFI_MAX_IB;
3342
3343 /* Virtual memory for IB objects - stored by IB module */
3344 res_info[BNA_RES_MEM_T_IB_ARRAY].res_type = BNA_RES_T_MEM;
3345 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mem_type =
3346 BNA_MEM_T_KVA;
3347 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.num = 1;
3348 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.len =
3349 BFI_MAX_IB * sizeof(struct bna_ib);
3350
3351 /* Virtual memory for intr objects - stored by IB module */
3352 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_type = BNA_RES_T_MEM;
3353 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mem_type =
3354 BNA_MEM_T_KVA;
3355 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.num = 1;
3356 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.len =
3357 BFI_MAX_IB * sizeof(struct bna_intr);
3358
3359 /* Virtual memory for idx_seg objects - stored by IB module */
3360 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_type = BNA_RES_T_MEM;
3361 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mem_type =
3362 BNA_MEM_T_KVA;
3363 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.num = 1;
3364 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.len =
3365 BFI_IBIDX_TOTAL_SEGS * sizeof(struct bna_ibidx_seg);
3366
3367 /* Virtual memory for Tx objects - stored by Tx module */
3368 res_info[BNA_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
3369 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
3370 BNA_MEM_T_KVA;
3371 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
3372 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
3373 BFI_MAX_TXQ * sizeof(struct bna_tx);
3374
3375 /* Virtual memory for TxQ - stored by Tx module */
3376 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
3377 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
3378 BNA_MEM_T_KVA;
3379 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
3380 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
3381 BFI_MAX_TXQ * sizeof(struct bna_txq);
3382
3383 /* Virtual memory for Rx objects - stored by Rx module */
3384 res_info[BNA_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
3385 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
3386 BNA_MEM_T_KVA;
3387 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
3388 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
3389 BFI_MAX_RXQ * sizeof(struct bna_rx);
3390
3391 /* Virtual memory for RxPath - stored by Rx module */
3392 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
3393 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
3394 BNA_MEM_T_KVA;
3395 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
3396 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
3397 BFI_MAX_RXQ * sizeof(struct bna_rxp);
3398
3399 /* Virtual memory for RxQ - stored by Rx module */
3400 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
3401 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
3402 BNA_MEM_T_KVA;
3403 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
3404 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
3405 BFI_MAX_RXQ * sizeof(struct bna_rxq);
3406
3407 /* Virtual memory for Unicast MAC address - stored by ucam module */
3408 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
3409 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
3410 BNA_MEM_T_KVA;
3411 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
3412 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
3413 BFI_MAX_UCMAC * sizeof(struct bna_mac);
3414
3415 /* Virtual memory for Multicast MAC address - stored by mcam module */
3416 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
3417 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
3418 BNA_MEM_T_KVA;
3419 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
3420 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
3421 BFI_MAX_MCMAC * sizeof(struct bna_mac);
3422
3423 /* Virtual memory for RIT entries */
3424 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_type = BNA_RES_T_MEM;
3425 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mem_type =
3426 BNA_MEM_T_KVA;
3427 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.num = 1;
3428 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.len =
3429 BFI_MAX_RIT_SIZE * sizeof(struct bna_rit_entry);
3430
3431 /* Virtual memory for RIT segment table */
3432 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_type = BNA_RES_T_MEM;
3433 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mem_type =
3434 BNA_MEM_T_KVA;
3435 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.num = 1;
3436 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.len =
3437 BFI_RIT_TOTAL_SEGS * sizeof(struct bna_rit_segment);
3438
3439 /* Interrupt resource for mailbox interrupt */
3440 res_info[BNA_RES_INTR_T_MBOX].res_type = BNA_RES_T_INTR;
3441 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type =
3442 BNA_INTR_T_MSIX;
3443 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.num = 1;
3444}
3445
3446/* Called during probe() */
3447void
3448bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
3449 struct bna_res_info *res_info)
3450{
3451 bna->bnad = bnad;
3452 bna->pcidev = *pcidev;
3453
3454 bna->stats.hw_stats = (struct bfi_ll_stats *)
3455 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
3456 bna->hw_stats_dma.msb =
3457 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
3458 bna->hw_stats_dma.lsb =
3459 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
3460 bna->stats.sw_stats = (struct bna_sw_stats *)
3461 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mdl[0].kva;
3462
3463 bna->regs.page_addr = bna->pcidev.pci_bar_kva +
3464 reg_offset[bna->pcidev.pci_func].page_addr;
3465 bna->regs.fn_int_status = bna->pcidev.pci_bar_kva +
3466 reg_offset[bna->pcidev.pci_func].fn_int_status;
3467 bna->regs.fn_int_mask = bna->pcidev.pci_bar_kva +
3468 reg_offset[bna->pcidev.pci_func].fn_int_mask;
3469
3470 if (bna->pcidev.pci_func < 3)
3471 bna->port_num = 0;
3472 else
3473 bna->port_num = 1;
3474
3475 /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
3476 bna_device_init(&bna->device, bna, res_info);
3477
3478 bna_port_init(&bna->port, bna);
3479
3480 bna_tx_mod_init(&bna->tx_mod, bna, res_info);
3481
3482 bna_rx_mod_init(&bna->rx_mod, bna, res_info);
3483
3484 bna_ib_mod_init(&bna->ib_mod, bna, res_info);
3485
3486 bna_rit_mod_init(&bna->rit_mod, res_info);
3487
3488 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
3489
3490 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
3491
3492 bna->rxf_default_id = BFI_MAX_RXF;
3493 bna->rxf_promisc_id = BFI_MAX_RXF;
3494
3495 /* Mbox q element for posting stat request to f/w */
3496 bfa_q_qe_init(&bna->mbox_qe.qe);
3497}
3498
3499void
3500bna_uninit(struct bna *bna)
3501{
3502 bna_mcam_mod_uninit(&bna->mcam_mod);
3503
3504 bna_ucam_mod_uninit(&bna->ucam_mod);
3505
3506 bna_rit_mod_uninit(&bna->rit_mod);
3507
3508 bna_ib_mod_uninit(&bna->ib_mod);
3509
3510 bna_rx_mod_uninit(&bna->rx_mod);
3511
3512 bna_tx_mod_uninit(&bna->tx_mod);
3513
3514 bna_port_uninit(&bna->port);
3515
3516 bna_device_uninit(&bna->device);
3517
3518 bna->bnad = NULL;
3519}
3520
3521struct bna_mac *
3522bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
3523{
3524 struct list_head *qe;
3525
3526 if (list_empty(&ucam_mod->free_q))
3527 return NULL;
3528
3529 bfa_q_deq(&ucam_mod->free_q, &qe);
3530
3531 return (struct bna_mac *)qe;
3532}
3533
3534void
3535bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
3536{
3537 list_add_tail(&mac->qe, &ucam_mod->free_q);
3538}
3539
3540struct bna_mac *
3541bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
3542{
3543 struct list_head *qe;
3544
3545 if (list_empty(&mcam_mod->free_q))
3546 return NULL;
3547
3548 bfa_q_deq(&mcam_mod->free_q, &qe);
3549
3550 return (struct bna_mac *)qe;
3551}
3552
3553void
3554bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
3555{
3556 list_add_tail(&mac->qe, &mcam_mod->free_q);
3557}
3558
3559/**
3560 * Note: This should be called in the same locking context as the call to
3561 * bna_rit_mod_seg_get()
3562 */
3563int
3564bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size)
3565{
3566 int i;
3567
3568 /* Select the pool for seg_size */
3569 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3570 if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
3571 break;
3572 }
3573
3574 if (i == BFI_RIT_SEG_TOTAL_POOLS)
3575 return 0;
3576
3577 if (list_empty(&rit_mod->rit_seg_pool[i]))
3578 return 0;
3579
3580 return 1;
3581}
3582
3583struct bna_rit_segment *
3584bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size)
3585{
3586 struct bna_rit_segment *seg;
3587 struct list_head *qe;
3588 int i;
3589
3590 /* Select the pool for seg_size */
3591 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3592 if (seg_size <= ritseg_pool_cfg[i].pool_entry_size)
3593 break;
3594 }
3595
3596 if (i == BFI_RIT_SEG_TOTAL_POOLS)
3597 return NULL;
3598
3599 if (list_empty(&rit_mod->rit_seg_pool[i]))
3600 return NULL;
3601
3602 bfa_q_deq(&rit_mod->rit_seg_pool[i], &qe);
3603 seg = (struct bna_rit_segment *)qe;
3604 bfa_q_qe_init(&seg->qe);
3605 seg->rit_size = seg_size;
3606
3607 return seg;
3608}
3609
3610void
3611bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
3612 struct bna_rit_segment *seg)
3613{
3614 int i;
3615
3616 /* Select the pool for seg->max_rit_size */
3617 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
3618 if (seg->max_rit_size == ritseg_pool_cfg[i].pool_entry_size)
3619 break;
3620 }
3621
3622 seg->rit_size = 0;
3623 list_add_tail(&seg->qe, &rit_mod->rit_seg_pool[i]);
3624}
diff --git a/drivers/net/bna/bna_hw.h b/drivers/net/bna/bna_hw.h
new file mode 100644
index 000000000000..67eb376c5c7e
--- /dev/null
+++ b/drivers/net/bna/bna_hw.h
@@ -0,0 +1,1491 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 *
18 * File for interrupt macros and functions
19 */
20
21#ifndef __BNA_HW_H__
22#define __BNA_HW_H__
23
24#include "bfi_ctreg.h"
25
26/**
27 *
28 * SW imposed limits
29 *
30 */
31
32#ifndef BNA_BIOS_BUILD
33
34#define BFI_MAX_TXQ 64
35#define BFI_MAX_RXQ 64
36#define BFI_MAX_RXF 64
37#define BFI_MAX_IB 128
38#define BFI_MAX_RIT_SIZE 256
39#define BFI_RSS_RIT_SIZE 64
40#define BFI_NONRSS_RIT_SIZE 1
41#define BFI_MAX_UCMAC 256
42#define BFI_MAX_MCMAC 512
43#define BFI_IBIDX_SIZE 4
44#define BFI_MAX_VLAN 4095
45
46/**
47 * There are 2 free IB index pools:
48 * pool1: 120 segments of 1 index each
49 * pool8: 1 segment of 8 indexes
50 */
51#define BFI_IBIDX_POOL1_SIZE 116
52#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
53#define BFI_IBIDX_POOL2_SIZE 2
54#define BFI_IBIDX_POOL2_ENTRY_SIZE 2
55#define BFI_IBIDX_POOL8_SIZE 1
56#define BFI_IBIDX_POOL8_ENTRY_SIZE 8
57#define BFI_IBIDX_TOTAL_POOLS 3
58#define BFI_IBIDX_TOTAL_SEGS 119 /* (POOL1 + POOL2 + POOL8)_SIZE */
59#define BFI_IBIDX_MAX_SEGSIZE 8
60#define init_ibidx_pool(name) \
61static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
62{ \
63 { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE }, \
64 { BFI_IBIDX_POOL2_SIZE, BFI_IBIDX_POOL2_ENTRY_SIZE }, \
65 { BFI_IBIDX_POOL8_SIZE, BFI_IBIDX_POOL8_ENTRY_SIZE } \
66}
67
68/**
69 * There are 2 free RIT segment pools:
70 * Pool1: 192 segments of 1 RIT entry each
71 * Pool2: 1 segment of 64 RIT entry
72 */
73#define BFI_RIT_SEG_POOL1_SIZE 192
74#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
75#define BFI_RIT_SEG_POOLRSS_SIZE 1
76#define BFI_RIT_SEG_POOLRSS_ENTRY_SIZE 64
77#define BFI_RIT_SEG_TOTAL_POOLS 2
78#define BFI_RIT_TOTAL_SEGS 193 /* POOL1_SIZE + POOLRSS_SIZE */
79#define init_ritseg_pool(name) \
80static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
81{ \
82 { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE }, \
83 { BFI_RIT_SEG_POOLRSS_SIZE, BFI_RIT_SEG_POOLRSS_ENTRY_SIZE } \
84}
85
86#else /* BNA_BIOS_BUILD */
87
88#define BFI_MAX_TXQ 1
89#define BFI_MAX_RXQ 1
90#define BFI_MAX_RXF 1
91#define BFI_MAX_IB 2
92#define BFI_MAX_RIT_SIZE 2
93#define BFI_RSS_RIT_SIZE 64
94#define BFI_NONRSS_RIT_SIZE 1
95#define BFI_MAX_UCMAC 1
96#define BFI_MAX_MCMAC 8
97#define BFI_IBIDX_SIZE 4
98#define BFI_MAX_VLAN 4095
99/* There is one free pool: 2 segments of 1 index each */
100#define BFI_IBIDX_POOL1_SIZE 2
101#define BFI_IBIDX_POOL1_ENTRY_SIZE 1
102#define BFI_IBIDX_TOTAL_POOLS 1
103#define BFI_IBIDX_TOTAL_SEGS 2 /* POOL1_SIZE */
104#define BFI_IBIDX_MAX_SEGSIZE 1
105#define init_ibidx_pool(name) \
106static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
107{ \
108 { BFI_IBIDX_POOL1_SIZE, BFI_IBIDX_POOL1_ENTRY_SIZE } \
109}
110
111#define BFI_RIT_SEG_POOL1_SIZE 1
112#define BFI_RIT_SEG_POOL1_ENTRY_SIZE 1
113#define BFI_RIT_SEG_TOTAL_POOLS 1
114#define BFI_RIT_TOTAL_SEGS 1 /* POOL1_SIZE */
115#define init_ritseg_pool(name) \
116static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
117{ \
118 { BFI_RIT_SEG_POOL1_SIZE, BFI_RIT_SEG_POOL1_ENTRY_SIZE } \
119}
120
121#endif /* BNA_BIOS_BUILD */
122
123#define BFI_RSS_HASH_KEY_LEN 10
124
125#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */
126#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */
127#define BFI_MAX_INTERPKT_COUNT 0xFF
128#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
129#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
130#define BFI_TX_INTERPKT_COUNT 32
131#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
132#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
133#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
134
135#define BFI_TXQ_WI_SIZE 64 /* bytes */
136#define BFI_RXQ_WI_SIZE 8 /* bytes */
137#define BFI_CQ_WI_SIZE 16 /* bytes */
138#define BFI_TX_MAX_WRR_QUOTA 0xFFF
139
140#define BFI_TX_MAX_VECTORS_PER_WI 4
141#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF
142#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF
143#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF
144
145/* Small Q buffer size */
146#define BFI_SMALL_RXBUF_SIZE 128
147
148/* Defined separately since BFA_FLASH_DMA_BUF_SZ is in bfa_flash.c */
149#define BFI_FLASH_DMA_BUF_SZ 0x010000 /* 64K DMA */
150#define BFI_HW_STATS_SIZE 0x4000 /* 16K DMA */
151
152/**
153 *
154 * HW register offsets, macros
155 *
156 */
157
158/* DMA Block Register Host Window Start Address */
159#define DMA_BLK_REG_ADDR 0x00013000
160
161/* DMA Block Internal Registers */
162#define DMA_CTRL_REG0 (DMA_BLK_REG_ADDR + 0x000)
163#define DMA_CTRL_REG1 (DMA_BLK_REG_ADDR + 0x004)
164#define DMA_ERR_INT_STATUS (DMA_BLK_REG_ADDR + 0x008)
165#define DMA_ERR_INT_ENABLE (DMA_BLK_REG_ADDR + 0x00c)
166#define DMA_ERR_INT_STATUS_SET (DMA_BLK_REG_ADDR + 0x010)
167
168/* APP Block Register Address Offset from BAR0 */
169#define APP_BLK_REG_ADDR 0x00014000
170
171/* Host Function Interrupt Mask Registers */
172#define HOSTFN0_INT_MASK (APP_BLK_REG_ADDR + 0x004)
173#define HOSTFN1_INT_MASK (APP_BLK_REG_ADDR + 0x104)
174#define HOSTFN2_INT_MASK (APP_BLK_REG_ADDR + 0x304)
175#define HOSTFN3_INT_MASK (APP_BLK_REG_ADDR + 0x404)
176
177/**
178 * Host Function PCIe Error Registers
179 * Duplicates "Correctable" & "Uncorrectable"
180 * registers in PCIe Config space.
181 */
182#define FN0_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x014)
183#define FN1_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x114)
184#define FN2_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x314)
185#define FN3_PCIE_ERR_REG (APP_BLK_REG_ADDR + 0x414)
186
187/* Host Function Error Type Status Registers */
188#define FN0_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x018)
189#define FN1_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x118)
190#define FN2_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x318)
191#define FN3_ERR_TYPE_STATUS_REG (APP_BLK_REG_ADDR + 0x418)
192
193/* Host Function Error Type Mask Registers */
194#define FN0_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x01c)
195#define FN1_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x11c)
196#define FN2_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x31c)
197#define FN3_ERR_TYPE_MSK_STATUS_REG (APP_BLK_REG_ADDR + 0x41c)
198
199/* Catapult Host Semaphore Status Registers (App block) */
200#define HOST_SEM_STS0_REG (APP_BLK_REG_ADDR + 0x630)
201#define HOST_SEM_STS1_REG (APP_BLK_REG_ADDR + 0x634)
202#define HOST_SEM_STS2_REG (APP_BLK_REG_ADDR + 0x638)
203#define HOST_SEM_STS3_REG (APP_BLK_REG_ADDR + 0x63c)
204#define HOST_SEM_STS4_REG (APP_BLK_REG_ADDR + 0x640)
205#define HOST_SEM_STS5_REG (APP_BLK_REG_ADDR + 0x644)
206#define HOST_SEM_STS6_REG (APP_BLK_REG_ADDR + 0x648)
207#define HOST_SEM_STS7_REG (APP_BLK_REG_ADDR + 0x64c)
208
209/* PCIe Misc Register */
210#define PCIE_MISC_REG (APP_BLK_REG_ADDR + 0x200)
211
212/* Temp Sensor Control Registers */
213#define TEMPSENSE_CNTL_REG (APP_BLK_REG_ADDR + 0x250)
214#define TEMPSENSE_STAT_REG (APP_BLK_REG_ADDR + 0x254)
215
216/* APP Block local error registers */
217#define APP_LOCAL_ERR_STAT (APP_BLK_REG_ADDR + 0x258)
218#define APP_LOCAL_ERR_MSK (APP_BLK_REG_ADDR + 0x25c)
219
220/* PCIe Link Error registers */
221#define PCIE_LNK_ERR_STAT (APP_BLK_REG_ADDR + 0x260)
222#define PCIE_LNK_ERR_MSK (APP_BLK_REG_ADDR + 0x264)
223
224/**
225 * FCoE/FIP Ethertype Register
226 * 31:16 -- Chip wide value for FIP type
227 * 15:0 -- Chip wide value for FCoE type
228 */
229#define FCOE_FIP_ETH_TYPE (APP_BLK_REG_ADDR + 0x280)
230
231/**
232 * Reserved Ethertype Register
233 * 31:16 -- Reserved
234 * 15:0 -- Other ethertype
235 */
236#define RESV_ETH_TYPE (APP_BLK_REG_ADDR + 0x284)
237
238/**
239 * Host Command Status Registers
240 * Each set consists of 3 registers :
241 * clear, set, cmd
242 * 16 such register sets in all
243 * See catapult_spec.pdf for detailed functionality
244 * Put each type in a single macro accessed by _num ?
245 */
246#define HOST_CMDSTS0_CLR_REG (APP_BLK_REG_ADDR + 0x500)
247#define HOST_CMDSTS0_SET_REG (APP_BLK_REG_ADDR + 0x504)
248#define HOST_CMDSTS0_REG (APP_BLK_REG_ADDR + 0x508)
249#define HOST_CMDSTS1_CLR_REG (APP_BLK_REG_ADDR + 0x510)
250#define HOST_CMDSTS1_SET_REG (APP_BLK_REG_ADDR + 0x514)
251#define HOST_CMDSTS1_REG (APP_BLK_REG_ADDR + 0x518)
252#define HOST_CMDSTS2_CLR_REG (APP_BLK_REG_ADDR + 0x520)
253#define HOST_CMDSTS2_SET_REG (APP_BLK_REG_ADDR + 0x524)
254#define HOST_CMDSTS2_REG (APP_BLK_REG_ADDR + 0x528)
255#define HOST_CMDSTS3_CLR_REG (APP_BLK_REG_ADDR + 0x530)
256#define HOST_CMDSTS3_SET_REG (APP_BLK_REG_ADDR + 0x534)
257#define HOST_CMDSTS3_REG (APP_BLK_REG_ADDR + 0x538)
258#define HOST_CMDSTS4_CLR_REG (APP_BLK_REG_ADDR + 0x540)
259#define HOST_CMDSTS4_SET_REG (APP_BLK_REG_ADDR + 0x544)
260#define HOST_CMDSTS4_REG (APP_BLK_REG_ADDR + 0x548)
261#define HOST_CMDSTS5_CLR_REG (APP_BLK_REG_ADDR + 0x550)
262#define HOST_CMDSTS5_SET_REG (APP_BLK_REG_ADDR + 0x554)
263#define HOST_CMDSTS5_REG (APP_BLK_REG_ADDR + 0x558)
264#define HOST_CMDSTS6_CLR_REG (APP_BLK_REG_ADDR + 0x560)
265#define HOST_CMDSTS6_SET_REG (APP_BLK_REG_ADDR + 0x564)
266#define HOST_CMDSTS6_REG (APP_BLK_REG_ADDR + 0x568)
267#define HOST_CMDSTS7_CLR_REG (APP_BLK_REG_ADDR + 0x570)
268#define HOST_CMDSTS7_SET_REG (APP_BLK_REG_ADDR + 0x574)
269#define HOST_CMDSTS7_REG (APP_BLK_REG_ADDR + 0x578)
270#define HOST_CMDSTS8_CLR_REG (APP_BLK_REG_ADDR + 0x580)
271#define HOST_CMDSTS8_SET_REG (APP_BLK_REG_ADDR + 0x584)
272#define HOST_CMDSTS8_REG (APP_BLK_REG_ADDR + 0x588)
273#define HOST_CMDSTS9_CLR_REG (APP_BLK_REG_ADDR + 0x590)
274#define HOST_CMDSTS9_SET_REG (APP_BLK_REG_ADDR + 0x594)
275#define HOST_CMDSTS9_REG (APP_BLK_REG_ADDR + 0x598)
276#define HOST_CMDSTS10_CLR_REG (APP_BLK_REG_ADDR + 0x5A0)
277#define HOST_CMDSTS10_SET_REG (APP_BLK_REG_ADDR + 0x5A4)
278#define HOST_CMDSTS10_REG (APP_BLK_REG_ADDR + 0x5A8)
279#define HOST_CMDSTS11_CLR_REG (APP_BLK_REG_ADDR + 0x5B0)
280#define HOST_CMDSTS11_SET_REG (APP_BLK_REG_ADDR + 0x5B4)
281#define HOST_CMDSTS11_REG (APP_BLK_REG_ADDR + 0x5B8)
282#define HOST_CMDSTS12_CLR_REG (APP_BLK_REG_ADDR + 0x5C0)
283#define HOST_CMDSTS12_SET_REG (APP_BLK_REG_ADDR + 0x5C4)
284#define HOST_CMDSTS12_REG (APP_BLK_REG_ADDR + 0x5C8)
285#define HOST_CMDSTS13_CLR_REG (APP_BLK_REG_ADDR + 0x5D0)
286#define HOST_CMDSTS13_SET_REG (APP_BLK_REG_ADDR + 0x5D4)
287#define HOST_CMDSTS13_REG (APP_BLK_REG_ADDR + 0x5D8)
288#define HOST_CMDSTS14_CLR_REG (APP_BLK_REG_ADDR + 0x5E0)
289#define HOST_CMDSTS14_SET_REG (APP_BLK_REG_ADDR + 0x5E4)
290#define HOST_CMDSTS14_REG (APP_BLK_REG_ADDR + 0x5E8)
291#define HOST_CMDSTS15_CLR_REG (APP_BLK_REG_ADDR + 0x5F0)
292#define HOST_CMDSTS15_SET_REG (APP_BLK_REG_ADDR + 0x5F4)
293#define HOST_CMDSTS15_REG (APP_BLK_REG_ADDR + 0x5F8)
294
295/**
296 * LPU0 Block Register Address Offset from BAR0
297 * Range 0x18000 - 0x18033
298 */
299#define LPU0_BLK_REG_ADDR 0x00018000
300
301/**
302 * LPU0 Registers
303 * Should they be directly used from host,
304 * except for diagnostics ?
305 * CTL_REG : Control register
306 * CMD_REG : Triggers exec. of cmd. in
307 * Mailbox memory
308 */
309#define LPU0_MBOX_CTL_REG (LPU0_BLK_REG_ADDR + 0x000)
310#define LPU0_MBOX_CMD_REG (LPU0_BLK_REG_ADDR + 0x004)
311#define LPU0_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x008)
312#define LPU1_MBOX_LINK_0REG (LPU0_BLK_REG_ADDR + 0x00c)
313#define LPU0_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x010)
314#define LPU1_MBOX_STATUS_0REG (LPU0_BLK_REG_ADDR + 0x014)
315#define LPU0_ERR_STATUS_REG (LPU0_BLK_REG_ADDR + 0x018)
316#define LPU0_ERR_SET_REG (LPU0_BLK_REG_ADDR + 0x020)
317
318/**
319 * LPU1 Block Register Address Offset from BAR0
320 * Range 0x18400 - 0x18433
321 */
322#define LPU1_BLK_REG_ADDR 0x00018400
323
324/**
325 * LPU1 Registers
326 * Same as LPU0 registers above
327 */
328#define LPU1_MBOX_CTL_REG (LPU1_BLK_REG_ADDR + 0x000)
329#define LPU1_MBOX_CMD_REG (LPU1_BLK_REG_ADDR + 0x004)
330#define LPU0_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x008)
331#define LPU1_MBOX_LINK_1REG (LPU1_BLK_REG_ADDR + 0x00c)
332#define LPU0_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x010)
333#define LPU1_MBOX_STATUS_1REG (LPU1_BLK_REG_ADDR + 0x014)
334#define LPU1_ERR_STATUS_REG (LPU1_BLK_REG_ADDR + 0x018)
335#define LPU1_ERR_SET_REG (LPU1_BLK_REG_ADDR + 0x020)
336
337/**
338 * PSS Block Register Address Offset from BAR0
339 * Range 0x18800 - 0x188DB
340 */
341#define PSS_BLK_REG_ADDR 0x00018800
342
343/**
344 * PSS Registers
345 * For details, see catapult_spec.pdf
346 * ERR_STATUS_REG : Indicates error in PSS module
347 * RAM_ERR_STATUS_REG : Indicates RAM module that detected error
348 */
349#define ERR_STATUS_SET (PSS_BLK_REG_ADDR + 0x018)
350#define PSS_RAM_ERR_STATUS_REG (PSS_BLK_REG_ADDR + 0x01C)
351
352/**
353 * PSS Semaphore Lock Registers, total 16
354 * First read when unlocked returns 0,
355 * and is set to 1, atomically.
356 * Subsequent reads returns 1.
357 * To clear set the value to 0.
358 * Range : 0x20 to 0x5c
359 */
360#define PSS_SEM_LOCK_REG(_num) \
361 (PSS_BLK_REG_ADDR + 0x020 + ((_num) << 2))
362
363/**
364 * PSS Semaphore Status Registers,
365 * corresponding to the lock registers above
366 */
367#define PSS_SEM_STATUS_REG(_num) \
368 (PSS_BLK_REG_ADDR + 0x060 + ((_num) << 2))
369
370/**
371 * Catapult CPQ Registers
372 * Defines for Mailbox Registers
373 * Used to send mailbox commands to firmware from
374 * host. The data part is written to the MBox
375 * memory, registers are used to indicate that
376 * a commnad is resident in memory.
377 *
378 * Note : LPU0<->LPU1 mailboxes are not listed here
379 */
380#define CPQ_BLK_REG_ADDR 0x00019000
381
382#define HOSTFN0_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x130)
383#define HOSTFN0_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x134)
384#define LPU0_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x138)
385#define LPU1_HOSTFN0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x13C)
386
387#define HOSTFN1_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x140)
388#define HOSTFN1_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x144)
389#define LPU0_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x148)
390#define LPU1_HOSTFN1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x14C)
391
392#define HOSTFN2_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x170)
393#define HOSTFN2_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x174)
394#define LPU0_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x178)
395#define LPU1_HOSTFN2_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x17C)
396
397#define HOSTFN3_LPU0_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x180)
398#define HOSTFN3_LPU1_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x184)
399#define LPU0_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x188)
400#define LPU1_HOSTFN3_MBOX1_CMD_STAT (CPQ_BLK_REG_ADDR + 0x18C)
401
402/* Host Function Force Parity Error Registers */
403#define HOSTFN0_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x120)
404#define HOSTFN1_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x124)
405#define HOSTFN2_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x128)
406#define HOSTFN3_LPU_FORCE_PERR (CPQ_BLK_REG_ADDR + 0x12C)
407
408/* LL Port[0|1] Halt Mask Registers */
409#define LL_HALT_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1A0)
410#define LL_HALT_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1B0)
411
412/* LL Port[0|1] Error Mask Registers */
413#define LL_ERR_MSK_P0 (CPQ_BLK_REG_ADDR + 0x1D0)
414#define LL_ERR_MSK_P1 (CPQ_BLK_REG_ADDR + 0x1D4)
415
416/* EMC FLI (Flash Controller) Block Register Address Offset from BAR0 */
417#define FLI_BLK_REG_ADDR 0x0001D000
418
419/* EMC FLI Registers */
420#define FLI_CMD_REG (FLI_BLK_REG_ADDR + 0x000)
421#define FLI_ADDR_REG (FLI_BLK_REG_ADDR + 0x004)
422#define FLI_CTL_REG (FLI_BLK_REG_ADDR + 0x008)
423#define FLI_WRDATA_REG (FLI_BLK_REG_ADDR + 0x00C)
424#define FLI_RDDATA_REG (FLI_BLK_REG_ADDR + 0x010)
425#define FLI_DEV_STATUS_REG (FLI_BLK_REG_ADDR + 0x014)
426#define FLI_SIG_WD_REG (FLI_BLK_REG_ADDR + 0x018)
427
428/**
429 * RO register
430 * 31:16 -- Vendor Id
431 * 15:0 -- Device Id
432 */
433#define FLI_DEV_VENDOR_REG (FLI_BLK_REG_ADDR + 0x01C)
434#define FLI_ERR_STATUS_REG (FLI_BLK_REG_ADDR + 0x020)
435
436/**
437 * RAD (RxAdm) Block Register Address Offset from BAR0
438 * RAD0 Range : 0x20000 - 0x203FF
439 * RAD1 Range : 0x20400 - 0x207FF
440 */
441#define RAD0_BLK_REG_ADDR 0x00020000
442#define RAD1_BLK_REG_ADDR 0x00020400
443
444/* RAD0 Registers */
445#define RAD0_CTL_REG (RAD0_BLK_REG_ADDR + 0x000)
446#define RAD0_PE_PARM_REG (RAD0_BLK_REG_ADDR + 0x004)
447#define RAD0_BCN_REG (RAD0_BLK_REG_ADDR + 0x008)
448
449/* Default function ID register */
450#define RAD0_DEFAULT_REG (RAD0_BLK_REG_ADDR + 0x00C)
451
452/* Default promiscuous ID register */
453#define RAD0_PROMISC_REG (RAD0_BLK_REG_ADDR + 0x010)
454
455#define RAD0_BCNQ_REG (RAD0_BLK_REG_ADDR + 0x014)
456
457/*
458 * This register selects 1 of 8 PM Q's using
459 * VLAN pri, for non-BCN packets without a VLAN tag
460 */
461#define RAD0_DEFAULTQ_REG (RAD0_BLK_REG_ADDR + 0x018)
462
463#define RAD0_ERR_STS (RAD0_BLK_REG_ADDR + 0x01C)
464#define RAD0_SET_ERR_STS (RAD0_BLK_REG_ADDR + 0x020)
465#define RAD0_ERR_INT_EN (RAD0_BLK_REG_ADDR + 0x024)
466#define RAD0_FIRST_ERR (RAD0_BLK_REG_ADDR + 0x028)
467#define RAD0_FORCE_ERR (RAD0_BLK_REG_ADDR + 0x02C)
468
469#define RAD0_IF_RCVD (RAD0_BLK_REG_ADDR + 0x030)
470#define RAD0_IF_RCVD_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x034)
471#define RAD0_IF_RCVD_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x038)
472#define RAD0_IF_RCVD_VLAN (RAD0_BLK_REG_ADDR + 0x03C)
473#define RAD0_IF_RCVD_UCAST (RAD0_BLK_REG_ADDR + 0x040)
474#define RAD0_IF_RCVD_UCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x044)
475#define RAD0_IF_RCVD_UCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x048)
476#define RAD0_IF_RCVD_UCAST_VLAN (RAD0_BLK_REG_ADDR + 0x04C)
477#define RAD0_IF_RCVD_MCAST (RAD0_BLK_REG_ADDR + 0x050)
478#define RAD0_IF_RCVD_MCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x054)
479#define RAD0_IF_RCVD_MCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x058)
480#define RAD0_IF_RCVD_MCAST_VLAN (RAD0_BLK_REG_ADDR + 0x05C)
481#define RAD0_IF_RCVD_BCAST (RAD0_BLK_REG_ADDR + 0x060)
482#define RAD0_IF_RCVD_BCAST_OCTETS_HIGH (RAD0_BLK_REG_ADDR + 0x064)
483#define RAD0_IF_RCVD_BCAST_OCTETS_LOW (RAD0_BLK_REG_ADDR + 0x068)
484#define RAD0_IF_RCVD_BCAST_VLAN (RAD0_BLK_REG_ADDR + 0x06C)
485#define RAD0_DROPPED_FRAMES (RAD0_BLK_REG_ADDR + 0x070)
486
487#define RAD0_MAC_MAN_1H (RAD0_BLK_REG_ADDR + 0x080)
488#define RAD0_MAC_MAN_1L (RAD0_BLK_REG_ADDR + 0x084)
489#define RAD0_MAC_MAN_2H (RAD0_BLK_REG_ADDR + 0x088)
490#define RAD0_MAC_MAN_2L (RAD0_BLK_REG_ADDR + 0x08C)
491#define RAD0_MAC_MAN_3H (RAD0_BLK_REG_ADDR + 0x090)
492#define RAD0_MAC_MAN_3L (RAD0_BLK_REG_ADDR + 0x094)
493#define RAD0_MAC_MAN_4H (RAD0_BLK_REG_ADDR + 0x098)
494#define RAD0_MAC_MAN_4L (RAD0_BLK_REG_ADDR + 0x09C)
495
496#define RAD0_LAST4_IP (RAD0_BLK_REG_ADDR + 0x100)
497
498/* RAD1 Registers */
499#define RAD1_CTL_REG (RAD1_BLK_REG_ADDR + 0x000)
500#define RAD1_PE_PARM_REG (RAD1_BLK_REG_ADDR + 0x004)
501#define RAD1_BCN_REG (RAD1_BLK_REG_ADDR + 0x008)
502
503/* Default function ID register */
504#define RAD1_DEFAULT_REG (RAD1_BLK_REG_ADDR + 0x00C)
505
506/* Promiscuous function ID register */
507#define RAD1_PROMISC_REG (RAD1_BLK_REG_ADDR + 0x010)
508
509#define RAD1_BCNQ_REG (RAD1_BLK_REG_ADDR + 0x014)
510
511/*
512 * This register selects 1 of 8 PM Q's using
513 * VLAN pri, for non-BCN packets without a VLAN tag
514 */
515#define RAD1_DEFAULTQ_REG (RAD1_BLK_REG_ADDR + 0x018)
516
517#define RAD1_ERR_STS (RAD1_BLK_REG_ADDR + 0x01C)
518#define RAD1_SET_ERR_STS (RAD1_BLK_REG_ADDR + 0x020)
519#define RAD1_ERR_INT_EN (RAD1_BLK_REG_ADDR + 0x024)
520
521/**
522 * TXA Block Register Address Offset from BAR0
523 * TXA0 Range : 0x21000 - 0x213FF
524 * TXA1 Range : 0x21400 - 0x217FF
525 */
526#define TXA0_BLK_REG_ADDR 0x00021000
527#define TXA1_BLK_REG_ADDR 0x00021400
528
529/* TXA Registers */
530#define TXA0_CTRL_REG (TXA0_BLK_REG_ADDR + 0x000)
531#define TXA1_CTRL_REG (TXA1_BLK_REG_ADDR + 0x000)
532
533/**
534 * TSO Sequence # Registers (RO)
535 * Total 8 (for 8 queues)
536 * Holds the last seq.# for TSO frames
537 * See catapult_spec.pdf for more details
538 */
539#define TXA0_TSO_TCP_SEQ_REG(_num) \
540 (TXA0_BLK_REG_ADDR + 0x020 + ((_num) << 2))
541
542#define TXA1_TSO_TCP_SEQ_REG(_num) \
543 (TXA1_BLK_REG_ADDR + 0x020 + ((_num) << 2))
544
545/**
546 * TSO IP ID # Registers (RO)
547 * Total 8 (for 8 queues)
548 * Holds the last IP ID for TSO frames
549 * See catapult_spec.pdf for more details
550 */
551#define TXA0_TSO_IP_INFO_REG(_num) \
552 (TXA0_BLK_REG_ADDR + 0x040 + ((_num) << 2))
553
554#define TXA1_TSO_IP_INFO_REG(_num) \
555 (TXA1_BLK_REG_ADDR + 0x040 + ((_num) << 2))
556
557/**
558 * RXA Block Register Address Offset from BAR0
559 * RXA0 Range : 0x21800 - 0x21BFF
560 * RXA1 Range : 0x21C00 - 0x21FFF
561 */
562#define RXA0_BLK_REG_ADDR 0x00021800
563#define RXA1_BLK_REG_ADDR 0x00021C00
564
565/* RXA Registers */
566#define RXA0_CTL_REG (RXA0_BLK_REG_ADDR + 0x040)
567#define RXA1_CTL_REG (RXA1_BLK_REG_ADDR + 0x040)
568
569/**
570 * PPLB Block Register Address Offset from BAR0
571 * PPLB0 Range : 0x22000 - 0x223FF
572 * PPLB1 Range : 0x22400 - 0x227FF
573 */
574#define PLB0_BLK_REG_ADDR 0x00022000
575#define PLB1_BLK_REG_ADDR 0x00022400
576
577/**
578 * PLB Registers
579 * Holds RL timer used time stamps in RLT tagged frames
580 */
581#define PLB0_ECM_TIMER_REG (PLB0_BLK_REG_ADDR + 0x05C)
582#define PLB1_ECM_TIMER_REG (PLB1_BLK_REG_ADDR + 0x05C)
583
584/* Controls the rate-limiter on each of the priority class */
585#define PLB0_RL_CTL (PLB0_BLK_REG_ADDR + 0x060)
586#define PLB1_RL_CTL (PLB1_BLK_REG_ADDR + 0x060)
587
588/**
589 * Max byte register, total 8, 0-7
590 * see catapult_spec.pdf for details
591 */
592#define PLB0_RL_MAX_BC(_num) \
593 (PLB0_BLK_REG_ADDR + 0x064 + ((_num) << 2))
594#define PLB1_RL_MAX_BC(_num) \
595 (PLB1_BLK_REG_ADDR + 0x064 + ((_num) << 2))
596
597/**
598 * RL Time Unit Register for priority 0-7
599 * 4 bits per priority
600 * (2^rl_unit)*1us is the actual time period
601 */
602#define PLB0_RL_TU_PRIO (PLB0_BLK_REG_ADDR + 0x084)
603#define PLB1_RL_TU_PRIO (PLB1_BLK_REG_ADDR + 0x084)
604
605/**
606 * RL byte count register,
607 * bytes transmitted in (rl_unit*1)us time period
608 * 1 per priority, 8 in all, 0-7.
609 */
610#define PLB0_RL_BYTE_CNT(_num) \
611 (PLB0_BLK_REG_ADDR + 0x088 + ((_num) << 2))
612#define PLB1_RL_BYTE_CNT(_num) \
613 (PLB1_BLK_REG_ADDR + 0x088 + ((_num) << 2))
614
615/**
616 * RL Min factor register
617 * 2 bits per priority,
618 * 4 factors possible: 1, 0.5, 0.25, 0
619 * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
620 */
621#define PLB0_RL_MIN_REG (PLB0_BLK_REG_ADDR + 0x0A8)
622#define PLB1_RL_MIN_REG (PLB1_BLK_REG_ADDR + 0x0A8)
623
624/**
625 * RL Max factor register
626 * 2 bits per priority,
627 * 4 factors possible: 1, 0.5, 0.25, 0
628 * 2'b00 - 0; 2'b01 - 0.25; 2'b10 - 0.5; 2'b11 - 1
629 */
630#define PLB0_RL_MAX_REG (PLB0_BLK_REG_ADDR + 0x0AC)
631#define PLB1_RL_MAX_REG (PLB1_BLK_REG_ADDR + 0x0AC)
632
633/* MAC SERDES Address Paging register */
634#define PLB0_EMS_ADD_REG (PLB0_BLK_REG_ADDR + 0xD0)
635#define PLB1_EMS_ADD_REG (PLB1_BLK_REG_ADDR + 0xD0)
636
637/* LL EMS Registers */
638#define LL_EMS0_BLK_REG_ADDR 0x00026800
639#define LL_EMS1_BLK_REG_ADDR 0x00026C00
640
641/**
642 * BPC Block Register Address Offset from BAR0
643 * BPC0 Range : 0x23000 - 0x233FF
644 * BPC1 Range : 0x23400 - 0x237FF
645 */
646#define BPC0_BLK_REG_ADDR 0x00023000
647#define BPC1_BLK_REG_ADDR 0x00023400
648
649/**
650 * PMM Block Register Address Offset from BAR0
651 * PMM0 Range : 0x23800 - 0x23BFF
652 * PMM1 Range : 0x23C00 - 0x23FFF
653 */
654#define PMM0_BLK_REG_ADDR 0x00023800
655#define PMM1_BLK_REG_ADDR 0x00023C00
656
657/**
658 * HQM Block Register Address Offset from BAR0
659 * HQM0 Range : 0x24000 - 0x243FF
660 * HQM1 Range : 0x24400 - 0x247FF
661 */
662#define HQM0_BLK_REG_ADDR 0x00024000
663#define HQM1_BLK_REG_ADDR 0x00024400
664
665/**
666 * HQM Control Register
667 * Controls some aspects of IB
668 * See catapult_spec.pdf for details
669 */
670#define HQM0_CTL_REG (HQM0_BLK_REG_ADDR + 0x000)
671#define HQM1_CTL_REG (HQM1_BLK_REG_ADDR + 0x000)
672
673/**
674 * HQM Stop Q Semaphore Registers.
675 * Only one Queue resource can be stopped at
676 * any given time. This register controls access
677 * to the single stop Q resource.
678 * See catapult_spec.pdf for details
679 */
680#define HQM0_RXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x028)
681#define HQM0_TXQ_STOP_SEM (HQM0_BLK_REG_ADDR + 0x02C)
682#define HQM1_RXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x028)
683#define HQM1_TXQ_STOP_SEM (HQM1_BLK_REG_ADDR + 0x02C)
684
685/**
686 * LUT Block Register Address Offset from BAR0
687 * LUT0 Range : 0x25800 - 0x25BFF
688 * LUT1 Range : 0x25C00 - 0x25FFF
689 */
690#define LUT0_BLK_REG_ADDR 0x00025800
691#define LUT1_BLK_REG_ADDR 0x00025C00
692
693/**
694 * LUT Registers
695 * See catapult_spec.pdf for details
696 */
697#define LUT0_ERR_STS (LUT0_BLK_REG_ADDR + 0x000)
698#define LUT1_ERR_STS (LUT1_BLK_REG_ADDR + 0x000)
699#define LUT0_SET_ERR_STS (LUT0_BLK_REG_ADDR + 0x004)
700#define LUT1_SET_ERR_STS (LUT1_BLK_REG_ADDR + 0x004)
701
702/**
703 * TRC (Debug/Trace) Register Offset from BAR0
704 * Range : 0x26000 -- 0x263FFF
705 */
706#define TRC_BLK_REG_ADDR 0x00026000
707
708/**
709 * TRC Registers
710 * See catapult_spec.pdf for details of each
711 */
712#define TRC_CTL_REG (TRC_BLK_REG_ADDR + 0x000)
713#define TRC_MODS_REG (TRC_BLK_REG_ADDR + 0x004)
714#define TRC_TRGC_REG (TRC_BLK_REG_ADDR + 0x008)
715#define TRC_CNT1_REG (TRC_BLK_REG_ADDR + 0x010)
716#define TRC_CNT2_REG (TRC_BLK_REG_ADDR + 0x014)
717#define TRC_NXTS_REG (TRC_BLK_REG_ADDR + 0x018)
718#define TRC_DIRR_REG (TRC_BLK_REG_ADDR + 0x01C)
719
720/**
721 * TRC Trigger match filters, total 10
722 * Determines the trigger condition
723 */
724#define TRC_TRGM_REG(_num) \
725 (TRC_BLK_REG_ADDR + 0x040 + ((_num) << 2))
726
727/**
728 * TRC Next State filters, total 10
729 * Determines the next state conditions
730 */
731#define TRC_NXTM_REG(_num) \
732 (TRC_BLK_REG_ADDR + 0x080 + ((_num) << 2))
733
734/**
735 * TRC Store Match filters, total 10
736 * Determines the store conditions
737 */
738#define TRC_STRM_REG(_num) \
739 (TRC_BLK_REG_ADDR + 0x0C0 + ((_num) << 2))
740
741/* DOORBELLS ACCESS */
742
743/**
744 * Catapult doorbells
745 * Each doorbell-queue set has
746 * 1 RxQ, 1 TxQ, 2 IBs in that order
747 * Size of each entry in 32 bytes, even though only 1 word
748 * is used. For Non-VM case each doorbell-q set is
749 * separated by 128 bytes, for VM case it is separated
750 * by 4K bytes
751 * Non VM case Range : 0x38000 - 0x39FFF
752 * VM case Range : 0x100000 - 0x11FFFF
753 * The range applies to both HQMs
754 */
755#define HQM_DOORBELL_BLK_BASE_ADDR 0x00038000
756#define HQM_DOORBELL_VM_BLK_BASE_ADDR 0x00100000
757
758/* MEMORY ACCESS */
759
760/**
761 * Catapult H/W Block Memory Access Address
762 * To the host a memory space of 32K (page) is visible
763 * at a time. The address range is from 0x08000 to 0x0FFFF
764 */
765#define HW_BLK_HOST_MEM_ADDR 0x08000
766
767/**
768 * Catapult LUT Memory Access Page Numbers
769 * Range : LUT0 0xa0-0xa1
770 * LUT1 0xa2-0xa3
771 */
772#define LUT0_MEM_BLK_BASE_PG_NUM 0x000000A0
773#define LUT1_MEM_BLK_BASE_PG_NUM 0x000000A2
774
775/**
776 * Catapult RxFn Database Memory Block Base Offset
777 *
778 * The Rx function database exists in LUT block.
779 * In PCIe space this is accessible as a 256x32
780 * bit block. Each entry in this database is 4
781 * (4 byte) words. Max. entries is 64.
782 * Address of an entry corresponding to a function
783 * = base_addr + (function_no. * 16)
784 */
785#define RX_FNDB_RAM_BASE_OFFSET 0x0000B400
786
787/**
788 * Catapult TxFn Database Memory Block Base Offset Address
789 *
790 * The Tx function database exists in LUT block.
791 * In PCIe space this is accessible as a 64x32
792 * bit block. Each entry in this database is 1
793 * (4 byte) word. Max. entries is 64.
794 * Address of an entry corresponding to a function
795 * = base_addr + (function_no. * 4)
796 */
797#define TX_FNDB_RAM_BASE_OFFSET 0x0000B800
798
799/**
800 * Catapult Unicast CAM Base Offset Address
801 *
802 * Exists in LUT memory space.
803 * Shared by both the LL & FCoE driver.
804 * Size is 256x48 bits; mapped to PCIe space
805 * 512x32 bit blocks. For each address, bits
806 * are written in the order : [47:32] and then
807 * [31:0].
808 */
809#define UCAST_CAM_BASE_OFFSET 0x0000A800
810
811/**
812 * Catapult Unicast RAM Base Offset Address
813 *
814 * Exists in LUT memory space.
815 * Shared by both the LL & FCoE driver.
816 * Size is 256x9 bits.
817 */
818#define UCAST_RAM_BASE_OFFSET 0x0000B000
819
820/**
821 * Catapult Mulicast CAM Base Offset Address
822 *
823 * Exists in LUT memory space.
824 * Shared by both the LL & FCoE driver.
825 * Size is 256x48 bits; mapped to PCIe space
826 * 512x32 bit blocks. For each address, bits
827 * are written in the order : [47:32] and then
828 * [31:0].
829 */
830#define MCAST_CAM_BASE_OFFSET 0x0000A000
831
832/**
833 * Catapult VLAN RAM Base Offset Address
834 *
835 * Exists in LUT memory space.
836 * Size is 4096x66 bits; mapped to PCIe space as
837 * 8192x32 bit blocks.
838 * All the 4K entries are within the address range
839 * 0x0000 to 0x8000, so in the first LUT page.
840 */
841#define VLAN_RAM_BASE_OFFSET 0x00000000
842
843/**
844 * Catapult Tx Stats RAM Base Offset Address
845 *
846 * Exists in LUT memory space.
847 * Size is 1024x33 bits;
848 * Each Tx function has 64 bytes of space
849 */
850#define TX_STATS_RAM_BASE_OFFSET 0x00009000
851
852/**
853 * Catapult Rx Stats RAM Base Offset Address
854 *
855 * Exists in LUT memory space.
856 * Size is 1024x33 bits;
857 * Each Rx function has 64 bytes of space
858 */
859#define RX_STATS_RAM_BASE_OFFSET 0x00008000
860
861/* Catapult RXA Memory Access Page Numbers */
862#define RXA0_MEM_BLK_BASE_PG_NUM 0x0000008C
863#define RXA1_MEM_BLK_BASE_PG_NUM 0x0000008D
864
865/**
866 * Catapult Multicast Vector Table Base Offset Address
867 *
868 * Exists in RxA memory space.
869 * Organized as 512x65 bit block.
870 * However for each entry 16 bytes allocated (power of 2)
871 * Total size 512*16 bytes.
872 * There are two logical divisions, 256 entries each :
873 * a) Entries 0x00 to 0xff (256) -- Approx. MVT
874 * Offset 0x000 to 0xFFF
875 * b) Entries 0x100 to 0x1ff (256) -- Exact MVT
876 * Offsets 0x1000 to 0x1FFF
877 */
878#define MCAST_APPROX_MVT_BASE_OFFSET 0x00000000
879#define MCAST_EXACT_MVT_BASE_OFFSET 0x00001000
880
881/**
882 * Catapult RxQ Translate Table (RIT) Base Offset Address
883 *
884 * Exists in RxA memory space
885 * Total no. of entries 64
886 * Each entry is 1 (4 byte) word.
887 * 31:12 -- Reserved
888 * 11:0 -- Two 6 bit RxQ Ids
889 */
890#define FUNCTION_TO_RXQ_TRANSLATE 0x00002000
891
892/* Catapult RxAdm (RAD) Memory Access Page Numbers */
893#define RAD0_MEM_BLK_BASE_PG_NUM 0x00000086
894#define RAD1_MEM_BLK_BASE_PG_NUM 0x00000087
895
896/**
897 * Catapult RSS Table Base Offset Address
898 *
899 * Exists in RAD memory space.
900 * Each entry is 352 bits, but alligned on
901 * 64 byte (512 bit) boundary. Accessed
902 * 4 byte words, the whole entry can be
903 * broken into 11 word accesses.
904 */
905#define RSS_TABLE_BASE_OFFSET 0x00000800
906
907/**
908 * Catapult CPQ Block Page Number
909 * This value is written to the page number registers
910 * to access the memory associated with the mailboxes.
911 */
912#define CPQ_BLK_PG_NUM 0x00000005
913
914/**
915 * Clarification :
916 * LL functions are 2 & 3; can HostFn0/HostFn1
917 * <-> LPU0/LPU1 memories be used ?
918 */
919/**
920 * Catapult HostFn0/HostFn1 to LPU0/LPU1 Mbox memory
921 * Per catapult_spec.pdf, the offset of the mbox
922 * memory is in the register space at an offset of 0x200
923 */
924#define CPQ_BLK_REG_MBOX_ADDR (CPQ_BLK_REG_ADDR + 0x200)
925
926#define HOSTFN_LPU_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x000)
927
928/* Catapult LPU0/LPU1 to HostFn0/HostFn1 Mbox memory */
929#define LPU_HOSTFN_MBOX (CPQ_BLK_REG_MBOX_ADDR + 0x080)
930
931/**
932 * Catapult HQM Block Page Number
933 * This is written to the page number register for
934 * the appropriate function to access the memory
935 * associated with HQM
936 */
937#define HQM0_BLK_PG_NUM 0x00000096
938#define HQM1_BLK_PG_NUM 0x00000097
939
940/**
941 * Note that TxQ and RxQ entries are interlaced
942 * the HQM memory, i.e RXQ0, TXQ0, RXQ1, TXQ1.. etc.
943 */
944
945#define HQM_RXTX_Q_RAM_BASE_OFFSET 0x00004000
946
947/**
948 * CQ Memory
949 * Exists in HQM Memory space
950 * Each entry is 16 (4 byte) words of which
951 * only 12 words are used for configuration
952 * Total 64 entries per HQM memory space
953 */
954#define HQM_CQ_RAM_BASE_OFFSET 0x00006000
955
956/**
957 * Interrupt Block (IB) Memory
958 * Exists in HQM Memory space
959 * Each entry is 8 (4 byte) words of which
960 * only 5 words are used for configuration
961 * Total 128 entries per HQM memory space
962 */
963#define HQM_IB_RAM_BASE_OFFSET 0x00001000
964
965/**
966 * Index Table (IT) Memory
967 * Exists in HQM Memory space
968 * Each entry is 1 (4 byte) word which
969 * is used for configuration
970 * Total 128 entries per HQM memory space
971 */
972#define HQM_INDX_TBL_RAM_BASE_OFFSET 0x00002000
973
974/**
975 * PSS Block Memory Page Number
976 * This is written to the appropriate page number
977 * register to access the CPU memory.
978 * Also known as the PSS secondary memory (SMEM).
979 * Range : 0x180 to 0x1CF
980 * See catapult_spec.pdf for details
981 */
982#define PSS_BLK_PG_NUM 0x00000180
983
984/**
985 * Offsets of different instances of PSS SMEM
986 * 2.5M of continuous 1T memory space : 2 blocks
987 * of 1M each (32 pages each, page=32KB) and 4 smaller
988 * blocks of 128K each (4 pages each, page=32KB)
989 * PSS_LMEM_INST0 is used for firmware download
990 */
991#define PSS_LMEM_INST0 0x00000000
992#define PSS_LMEM_INST1 0x00100000
993#define PSS_LMEM_INST2 0x00200000
994#define PSS_LMEM_INST3 0x00220000
995#define PSS_LMEM_INST4 0x00240000
996#define PSS_LMEM_INST5 0x00260000
997
998#define BNA_PCI_REG_CT_ADDRSZ (0x40000)
999
1000#define BNA_GET_PAGE_NUM(_base_page, _offset) \
1001 ((_base_page) + ((_offset) >> 15))
1002
1003#define BNA_GET_PAGE_OFFSET(_offset) \
1004 ((_offset) & 0x7fff)
1005
1006#define BNA_GET_MEM_BASE_ADDR(_bar0, _base_offset) \
1007 ((_bar0) + HW_BLK_HOST_MEM_ADDR \
1008 + BNA_GET_PAGE_OFFSET((_base_offset)))
1009
1010#define BNA_GET_VLAN_MEM_ENTRY_ADDR(_bar0, _fn_id, _vlan_id)\
1011 (_bar0 + (HW_BLK_HOST_MEM_ADDR) \
1012 + (BNA_GET_PAGE_OFFSET(VLAN_RAM_BASE_OFFSET)) \
1013 + (((_fn_id) & 0x3f) << 9) \
1014 + (((_vlan_id) & 0xfe0) >> 3))
1015
1016/**
1017 *
1018 * Interrupt related bits, flags and macros
1019 *
1020 */
1021
1022#define __LPU02HOST_MBOX0_STATUS_BITS 0x00100000
1023#define __LPU12HOST_MBOX0_STATUS_BITS 0x00200000
1024#define __LPU02HOST_MBOX1_STATUS_BITS 0x00400000
1025#define __LPU12HOST_MBOX1_STATUS_BITS 0x00800000
1026
1027#define __LPU02HOST_MBOX0_MASK_BITS 0x00100000
1028#define __LPU12HOST_MBOX0_MASK_BITS 0x00200000
1029#define __LPU02HOST_MBOX1_MASK_BITS 0x00400000
1030#define __LPU12HOST_MBOX1_MASK_BITS 0x00800000
1031
1032#define __LPU2HOST_MBOX_MASK_BITS \
1033 (__LPU02HOST_MBOX0_MASK_BITS | __LPU02HOST_MBOX1_MASK_BITS | \
1034 __LPU12HOST_MBOX0_MASK_BITS | __LPU12HOST_MBOX1_MASK_BITS)
1035
1036#define __LPU2HOST_IB_STATUS_BITS 0x0000ffff
1037
1038#define BNA_IS_LPU0_MBOX_INTR(_intr_status) \
1039 ((_intr_status) & (__LPU02HOST_MBOX0_STATUS_BITS | \
1040 __LPU02HOST_MBOX1_STATUS_BITS))
1041
1042#define BNA_IS_LPU1_MBOX_INTR(_intr_status) \
1043 ((_intr_status) & (__LPU12HOST_MBOX0_STATUS_BITS | \
1044 __LPU12HOST_MBOX1_STATUS_BITS))
1045
1046#define BNA_IS_MBOX_INTR(_intr_status) \
1047 ((_intr_status) & \
1048 (__LPU02HOST_MBOX0_STATUS_BITS | \
1049 __LPU02HOST_MBOX1_STATUS_BITS | \
1050 __LPU12HOST_MBOX0_STATUS_BITS | \
1051 __LPU12HOST_MBOX1_STATUS_BITS))
1052
1053#define __EMC_ERROR_STATUS_BITS 0x00010000
1054#define __LPU0_ERROR_STATUS_BITS 0x00020000
1055#define __LPU1_ERROR_STATUS_BITS 0x00040000
1056#define __PSS_ERROR_STATUS_BITS 0x00080000
1057
1058#define __HALT_STATUS_BITS 0x01000000
1059
1060#define __EMC_ERROR_MASK_BITS 0x00010000
1061#define __LPU0_ERROR_MASK_BITS 0x00020000
1062#define __LPU1_ERROR_MASK_BITS 0x00040000
1063#define __PSS_ERROR_MASK_BITS 0x00080000
1064
1065#define __HALT_MASK_BITS 0x01000000
1066
1067#define __ERROR_MASK_BITS \
1068 (__EMC_ERROR_MASK_BITS | __LPU0_ERROR_MASK_BITS | \
1069 __LPU1_ERROR_MASK_BITS | __PSS_ERROR_MASK_BITS | \
1070 __HALT_MASK_BITS)
1071
1072#define BNA_IS_ERR_INTR(_intr_status) \
1073 ((_intr_status) & \
1074 (__EMC_ERROR_STATUS_BITS | \
1075 __LPU0_ERROR_STATUS_BITS | \
1076 __LPU1_ERROR_STATUS_BITS | \
1077 __PSS_ERROR_STATUS_BITS | \
1078 __HALT_STATUS_BITS))
1079
1080#define BNA_IS_MBOX_ERR_INTR(_intr_status) \
1081 (BNA_IS_MBOX_INTR((_intr_status)) | \
1082 BNA_IS_ERR_INTR((_intr_status)))
1083
1084#define BNA_IS_INTX_DATA_INTR(_intr_status) \
1085 ((_intr_status) & __LPU2HOST_IB_STATUS_BITS)
1086
1087#define BNA_INTR_STATUS_MBOX_CLR(_intr_status) \
1088do { \
1089 (_intr_status) &= ~(__LPU02HOST_MBOX0_STATUS_BITS | \
1090 __LPU02HOST_MBOX1_STATUS_BITS | \
1091 __LPU12HOST_MBOX0_STATUS_BITS | \
1092 __LPU12HOST_MBOX1_STATUS_BITS); \
1093} while (0)
1094
1095#define BNA_INTR_STATUS_ERR_CLR(_intr_status) \
1096do { \
1097 (_intr_status) &= ~(__EMC_ERROR_STATUS_BITS | \
1098 __LPU0_ERROR_STATUS_BITS | \
1099 __LPU1_ERROR_STATUS_BITS | \
1100 __PSS_ERROR_STATUS_BITS | \
1101 __HALT_STATUS_BITS); \
1102} while (0)
1103
1104#define bna_intx_disable(_bna, _cur_mask) \
1105{ \
1106 (_cur_mask) = readl((_bna)->regs.fn_int_mask);\
1107 writel(0xffffffff, (_bna)->regs.fn_int_mask);\
1108}
1109
1110#define bna_intx_enable(bna, new_mask) \
1111 writel((new_mask), (bna)->regs.fn_int_mask)
1112
1113#define bna_mbox_intr_disable(bna) \
1114 writel((readl((bna)->regs.fn_int_mask) | \
1115 (__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
1116 (bna)->regs.fn_int_mask)
1117
1118#define bna_mbox_intr_enable(bna) \
1119 writel((readl((bna)->regs.fn_int_mask) & \
1120 ~(__LPU2HOST_MBOX_MASK_BITS | __ERROR_MASK_BITS)), \
1121 (bna)->regs.fn_int_mask)
1122
1123#define bna_intr_status_get(_bna, _status) \
1124{ \
1125 (_status) = readl((_bna)->regs.fn_int_status); \
1126 if ((_status)) { \
1127 writel((_status) & ~(__LPU02HOST_MBOX0_STATUS_BITS |\
1128 __LPU02HOST_MBOX1_STATUS_BITS |\
1129 __LPU12HOST_MBOX0_STATUS_BITS |\
1130 __LPU12HOST_MBOX1_STATUS_BITS), \
1131 (_bna)->regs.fn_int_status);\
1132 } \
1133}
1134
1135#define bna_intr_status_get_no_clr(_bna, _status) \
1136 (_status) = readl((_bna)->regs.fn_int_status)
1137
1138#define bna_intr_mask_get(bna, mask) \
1139 (*mask) = readl((bna)->regs.fn_int_mask)
1140
1141#define bna_intr_ack(bna, intr_bmap) \
1142 writel((intr_bmap), (bna)->regs.fn_int_status)
1143
1144#define bna_ib_intx_disable(bna, ib_id) \
1145 writel(readl((bna)->regs.fn_int_mask) | \
1146 (1 << (ib_id)), \
1147 (bna)->regs.fn_int_mask)
1148
1149#define bna_ib_intx_enable(bna, ib_id) \
1150 writel(readl((bna)->regs.fn_int_mask) & \
1151 ~(1 << (ib_id)), \
1152 (bna)->regs.fn_int_mask)
1153
1154#define bna_mbox_msix_idx_set(_device) \
1155do {\
1156 writel(((_device)->vector & 0x000001FF), \
1157 (_device)->bna->pcidev.pci_bar_kva + \
1158 reg_offset[(_device)->bna->pcidev.pci_func].msix_idx);\
1159} while (0)
1160
1161/**
1162 *
1163 * TxQ, RxQ, CQ related bits, offsets, macros
1164 *
1165 */
1166
1167#define BNA_Q_IDLE_STATE 0x00008001
1168
1169#define BNA_GET_DOORBELL_BASE_ADDR(_bar0) \
1170 ((_bar0) + HQM_DOORBELL_BLK_BASE_ADDR)
1171
1172#define BNA_GET_DOORBELL_ENTRY_OFFSET(_entry) \
1173 ((HQM_DOORBELL_BLK_BASE_ADDR) \
1174 + (_entry << 7))
1175
1176#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
1177 (0x80000000 | ((_timeout) << 16) | (_events))
1178
1179#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
1180
1181/* TxQ Entry Opcodes */
1182#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
1183#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
1184#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
1185
1186/* TxQ Entry Control Flags */
1187#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
1188#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
1189#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
1190#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
1191#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
1192#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
1193#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
1194
1195#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
1196 (((_hdr_size) << 10) | ((_offset) & 0x3FF))
1197
1198/*
1199 * Completion Q defines
1200 */
1201/* CQ Entry Flags */
1202#define BNA_CQ_EF_MAC_ERROR (1 << 0)
1203#define BNA_CQ_EF_FCS_ERROR (1 << 1)
1204#define BNA_CQ_EF_TOO_LONG (1 << 2)
1205#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
1206
1207#define BNA_CQ_EF_RSVD1 (1 << 4)
1208#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
1209#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
1210#define BNA_CQ_EF_HDS_HEADER (1 << 7)
1211
1212#define BNA_CQ_EF_UDP (1 << 8)
1213#define BNA_CQ_EF_TCP (1 << 9)
1214#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
1215#define BNA_CQ_EF_IPV6 (1 << 11)
1216
1217#define BNA_CQ_EF_IPV4 (1 << 12)
1218#define BNA_CQ_EF_VLAN (1 << 13)
1219#define BNA_CQ_EF_RSS (1 << 14)
1220#define BNA_CQ_EF_RSVD2 (1 << 15)
1221
1222#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
1223#define BNA_CQ_EF_MCAST (1 << 17)
1224#define BNA_CQ_EF_BCAST (1 << 18)
1225#define BNA_CQ_EF_REMOTE (1 << 19)
1226
1227#define BNA_CQ_EF_LOCAL (1 << 20)
1228
1229/**
1230 *
1231 * Data structures
1232 *
1233 */
1234
1235enum txf_flags {
1236 BFI_TXF_CF_ENABLE = 1 << 0,
1237 BFI_TXF_CF_VLAN_FILTER = 1 << 8,
1238 BFI_TXF_CF_VLAN_ADMIT = 1 << 9,
1239 BFI_TXF_CF_VLAN_INSERT = 1 << 10,
1240 BFI_TXF_CF_RSVD1 = 1 << 11,
1241 BFI_TXF_CF_MAC_SA_CHECK = 1 << 12,
1242 BFI_TXF_CF_VLAN_WI_BASED = 1 << 13,
1243 BFI_TXF_CF_VSWITCH_MCAST = 1 << 14,
1244 BFI_TXF_CF_VSWITCH_UCAST = 1 << 15,
1245 BFI_TXF_CF_RSVD2 = 0x7F << 1
1246};
1247
1248enum ib_flags {
1249 BFI_IB_CF_MASTER_ENABLE = (1 << 0),
1250 BFI_IB_CF_MSIX_MODE = (1 << 1),
1251 BFI_IB_CF_COALESCING_MODE = (1 << 2),
1252 BFI_IB_CF_INTER_PKT_ENABLE = (1 << 3),
1253 BFI_IB_CF_INT_ENABLE = (1 << 4),
1254 BFI_IB_CF_INTER_PKT_DMA = (1 << 5),
1255 BFI_IB_CF_ACK_PENDING = (1 << 6),
1256 BFI_IB_CF_RESERVED1 = (1 << 7)
1257};
1258
1259enum rss_hash_type {
1260 BFI_RSS_T_V4_TCP = (1 << 11),
1261 BFI_RSS_T_V4_IP = (1 << 10),
1262 BFI_RSS_T_V6_TCP = (1 << 9),
1263 BFI_RSS_T_V6_IP = (1 << 8)
1264};
1265enum hds_header_type {
1266 BNA_HDS_T_V4_TCP = (1 << 11),
1267 BNA_HDS_T_V4_UDP = (1 << 10),
1268 BNA_HDS_T_V6_TCP = (1 << 9),
1269 BNA_HDS_T_V6_UDP = (1 << 8),
1270 BNA_HDS_FORCED = (1 << 7),
1271};
1272enum rxf_flags {
1273 BNA_RXF_CF_SM_LG_RXQ = (1 << 15),
1274 BNA_RXF_CF_DEFAULT_VLAN = (1 << 14),
1275 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE = (1 << 13),
1276 BNA_RXF_CF_VLAN_STRIP = (1 << 12),
1277 BNA_RXF_CF_RSS_ENABLE = (1 << 8)
1278};
1279struct bna_chip_regs_offset {
1280 u32 page_addr;
1281 u32 fn_int_status;
1282 u32 fn_int_mask;
1283 u32 msix_idx;
1284};
1285extern const struct bna_chip_regs_offset reg_offset[];
1286
1287struct bna_chip_regs {
1288 void __iomem *page_addr;
1289 void __iomem *fn_int_status;
1290 void __iomem *fn_int_mask;
1291};
1292
1293struct bna_txq_mem {
1294 u32 pg_tbl_addr_lo;
1295 u32 pg_tbl_addr_hi;
1296 u32 cur_q_entry_lo;
1297 u32 cur_q_entry_hi;
1298 u32 reserved1;
1299 u32 reserved2;
1300 u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
1301 /* 15:0 ->producer pointer (index?) */
1302 u32 entry_n_pg_size; /* 31:16->entry size */
1303 /* 15:0 ->page size */
1304 u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
1305 /* 23:16->Int Blk Offset */
1306 /* 15:0 ->consumer pointer(index?) */
1307 u32 cns_ptr2_n_q_state; /* 31:16->cons. ptr 2; 15:0-> Q state */
1308 u32 nxt_qid_n_fid_n_pri; /* 17:10->next */
1309 /* QId;9:3->FID;2:0->Priority */
1310 u32 wvc_n_cquota_n_rquota; /* 31:24->WI Vector Count; */
1311 /* 23:12->Cfg Quota; */
1312 /* 11:0 ->Run Quota */
1313 u32 reserved3[4];
1314};
1315
1316struct bna_rxq_mem {
1317 u32 pg_tbl_addr_lo;
1318 u32 pg_tbl_addr_hi;
1319 u32 cur_q_entry_lo;
1320 u32 cur_q_entry_hi;
1321 u32 reserved1;
1322 u32 reserved2;
1323 u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
1324 /* 15:0 ->producer pointer (index?) */
1325 u32 entry_n_pg_size; /* 31:16->entry size */
1326 /* 15:0 ->page size */
1327 u32 sg_n_cq_n_cns_ptr; /* 31:28->reserved; 27:24->sg count */
1328 /* 23:16->CQ; */
1329 /* 15:0->consumer pointer(index?) */
1330 u32 buf_sz_n_q_state; /* 31:16->buffer size; 15:0-> Q state */
1331 u32 next_qid; /* 17:10->next QId */
1332 u32 reserved3;
1333 u32 reserved4[4];
1334};
1335
1336struct bna_rxtx_q_mem {
1337 struct bna_rxq_mem rxq;
1338 struct bna_txq_mem txq;
1339};
1340
1341struct bna_cq_mem {
1342 u32 pg_tbl_addr_lo;
1343 u32 pg_tbl_addr_hi;
1344 u32 cur_q_entry_lo;
1345 u32 cur_q_entry_hi;
1346
1347 u32 reserved1;
1348 u32 reserved2;
1349 u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
1350 /* 15:0 ->producer pointer (index?) */
1351 u32 entry_n_pg_size; /* 31:16->entry size */
1352 /* 15:0 ->page size */
1353 u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
1354 /* 23:16->Int Blk Offset */
1355 /* 15:0 ->consumer pointer(index?) */
1356 u32 q_state; /* 31:16->reserved; 15:0-> Q state */
1357 u32 reserved3[2];
1358 u32 reserved4[4];
1359};
1360
1361struct bna_ib_blk_mem {
1362 u32 host_addr_lo;
1363 u32 host_addr_hi;
1364 u32 clsc_n_ctrl_n_msix; /* 31:24->coalescing; */
1365 /* 23:16->coalescing cfg; */
1366 /* 15:8 ->control; */
1367 /* 7:0 ->msix; */
1368 u32 ipkt_n_ent_n_idxof;
1369 u32 ipkt_cnt_cfg_n_unacked;
1370
1371 u32 reserved[3];
1372};
1373
1374struct bna_idx_tbl_mem {
1375 u32 idx; /* !< 31:16->res;15:0->idx; */
1376};
1377
1378struct bna_doorbell_qset {
1379 u32 rxq[0x20 >> 2];
1380 u32 txq[0x20 >> 2];
1381 u32 ib0[0x20 >> 2];
1382 u32 ib1[0x20 >> 2];
1383};
1384
1385struct bna_rx_fndb_ram {
1386 u32 rss_prop;
1387 u32 size_routing_props;
1388 u32 rit_hds_mcastq;
1389 u32 control_flags;
1390};
1391
1392struct bna_tx_fndb_ram {
1393 u32 vlan_n_ctrl_flags;
1394};
1395
1396/**
1397 * @brief
1398 * Structure which maps to RxFn Indirection Table (RIT)
1399 * Size : 1 word
1400 * See catapult_spec.pdf, RxA for details
1401 */
1402struct bna_rit_mem {
1403 u32 rxq_ids; /* !< 31:12->res;11:0->two 6 bit RxQ Ids */
1404};
1405
1406/**
1407 * @brief
1408 * Structure which maps to RSS Table entry
1409 * Size : 16 words
1410 * See catapult_spec.pdf, RAD for details
1411 */
1412struct bna_rss_mem {
1413 /*
1414 * 31:12-> res
1415 * 11:8 -> protocol type
1416 * 7:0 -> hash index
1417 */
1418 u32 type_n_hash;
1419 u32 hash_key[10]; /* !< 40 byte Toeplitz hash key */
1420 u32 reserved[5];
1421};
1422
1423/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
1424struct bna_dma_addr {
1425 u32 msb;
1426 u32 lsb;
1427};
1428
1429struct bna_txq_wi_vector {
1430 u16 reserved;
1431 u16 length; /* Only 14 LSB are valid */
1432 struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
1433};
1434
1435typedef u16 bna_txq_wi_opcode_t;
1436
1437typedef u16 bna_txq_wi_ctrl_flag_t;
1438
1439/**
1440 * TxQ Entry Structure
1441 *
1442 * BEWARE: Load values into this structure with correct endianess.
1443 */
1444struct bna_txq_entry {
1445 union {
1446 struct {
1447 u8 reserved;
1448 u8 num_vectors; /* number of vectors present */
1449 bna_txq_wi_opcode_t opcode; /* Either */
1450 /* BNA_TXQ_WI_SEND or */
1451 /* BNA_TXQ_WI_SEND_LSO */
1452 bna_txq_wi_ctrl_flag_t flags; /* OR of all the flags */
1453 u16 l4_hdr_size_n_offset;
1454 u16 vlan_tag;
1455 u16 lso_mss; /* Only 14 LSB are valid */
1456 u32 frame_length; /* Only 24 LSB are valid */
1457 } wi;
1458
1459 struct {
1460 u16 reserved;
1461 bna_txq_wi_opcode_t opcode; /* Must be */
1462 /* BNA_TXQ_WI_EXTENSION */
1463 u32 reserved2[3]; /* Place holder for */
1464 /* removed vector (12 bytes) */
1465 } wi_ext;
1466 } hdr;
1467 struct bna_txq_wi_vector vector[4];
1468};
1469#define wi_hdr hdr.wi
1470#define wi_ext_hdr hdr.wi_ext
1471
1472/* RxQ Entry Structure */
1473struct bna_rxq_entry { /* Rx-Buffer */
1474 struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */
1475};
1476
1477typedef u32 bna_cq_e_flag_t;
1478
1479/* CQ Entry Structure */
1480struct bna_cq_entry {
1481 bna_cq_e_flag_t flags;
1482 u16 vlan_tag;
1483 u16 length;
1484 u32 rss_hash;
1485 u8 valid;
1486 u8 reserved1;
1487 u8 reserved2;
1488 u8 rxq_id;
1489};
1490
1491#endif /* __BNA_HW_H__ */
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
new file mode 100644
index 000000000000..890846d55502
--- /dev/null
+++ b/drivers/net/bna/bna_txrx.c
@@ -0,0 +1,4209 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include "bna.h"
19#include "bfa_sm.h"
20#include "bfi.h"
21
22/**
23 * IB
24 */
25#define bna_ib_find_free_ibidx(_mask, _pos)\
26do {\
27 (_pos) = 0;\
28 while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
29 ((1 << (_pos)) & (_mask)))\
30 (_pos)++;\
31} while (0)
32
33#define bna_ib_count_ibidx(_mask, _count)\
34do {\
35 int pos = 0;\
36 (_count) = 0;\
37 while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
38 if ((1 << pos) & (_mask))\
39 (_count) = pos + 1;\
40 pos++;\
41 } \
42} while (0)
43
44#define bna_ib_select_segpool(_count, _q_idx)\
45do {\
46 int i;\
47 (_q_idx) = -1;\
48 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
49 if ((_count <= ibidx_pool[i].pool_entry_size)) {\
50 (_q_idx) = i;\
51 break;\
52 } \
53 } \
54} while (0)
55
56struct bna_ibidx_pool {
57 int pool_size;
58 int pool_entry_size;
59};
60init_ibidx_pool(ibidx_pool);
61
62static struct bna_intr *
63bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
64 int vector)
65{
66 struct bna_intr *intr;
67 struct list_head *qe;
68
69 list_for_each(qe, &ib_mod->intr_active_q) {
70 intr = (struct bna_intr *)qe;
71
72 if ((intr->intr_type == intr_type) &&
73 (intr->vector == vector)) {
74 intr->ref_count++;
75 return intr;
76 }
77 }
78
79 if (list_empty(&ib_mod->intr_free_q))
80 return NULL;
81
82 bfa_q_deq(&ib_mod->intr_free_q, &intr);
83 bfa_q_qe_init(&intr->qe);
84
85 intr->ref_count = 1;
86 intr->intr_type = intr_type;
87 intr->vector = vector;
88
89 list_add_tail(&intr->qe, &ib_mod->intr_active_q);
90
91 return intr;
92}
93
94static void
95bna_intr_put(struct bna_ib_mod *ib_mod,
96 struct bna_intr *intr)
97{
98 intr->ref_count--;
99
100 if (intr->ref_count == 0) {
101 intr->ib = NULL;
102 list_del(&intr->qe);
103 bfa_q_qe_init(&intr->qe);
104 list_add_tail(&intr->qe, &ib_mod->intr_free_q);
105 }
106}
107
108void
109bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
110 struct bna_res_info *res_info)
111{
112 int i;
113 int j;
114 int count;
115 u8 offset;
116 struct bna_doorbell_qset *qset;
117 unsigned long off;
118
119 ib_mod->bna = bna;
120
121 ib_mod->ib = (struct bna_ib *)
122 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
123 ib_mod->intr = (struct bna_intr *)
124 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
125 ib_mod->idx_seg = (struct bna_ibidx_seg *)
126 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
127
128 INIT_LIST_HEAD(&ib_mod->ib_free_q);
129 INIT_LIST_HEAD(&ib_mod->intr_free_q);
130 INIT_LIST_HEAD(&ib_mod->intr_active_q);
131
132 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
133 INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
134
135 for (i = 0; i < BFI_MAX_IB; i++) {
136 ib_mod->ib[i].ib_id = i;
137
138 ib_mod->ib[i].ib_seg_host_addr_kva =
139 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
140 ib_mod->ib[i].ib_seg_host_addr.lsb =
141 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
142 ib_mod->ib[i].ib_seg_host_addr.msb =
143 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
144
145 qset = (struct bna_doorbell_qset *)0;
146 off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
147 * (0x20 >> 2)]);
148 ib_mod->ib[i].door_bell.doorbell_addr = off +
149 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
150
151 bfa_q_qe_init(&ib_mod->ib[i].qe);
152 list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
153
154 bfa_q_qe_init(&ib_mod->intr[i].qe);
155 list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
156 }
157
158 count = 0;
159 offset = 0;
160 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
161 for (j = 0; j < ibidx_pool[i].pool_size; j++) {
162 bfa_q_qe_init(&ib_mod->idx_seg[count]);
163 ib_mod->idx_seg[count].ib_seg_size =
164 ibidx_pool[i].pool_entry_size;
165 ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
166 list_add_tail(&ib_mod->idx_seg[count].qe,
167 &ib_mod->ibidx_seg_pool[i]);
168 count++;
169 offset += ibidx_pool[i].pool_entry_size;
170 }
171 }
172}
173
174void
175bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
176{
177 int i;
178 int j;
179 struct list_head *qe;
180
181 i = 0;
182 list_for_each(qe, &ib_mod->ib_free_q)
183 i++;
184
185 i = 0;
186 list_for_each(qe, &ib_mod->intr_free_q)
187 i++;
188
189 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
190 j = 0;
191 list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
192 j++;
193 }
194
195 ib_mod->bna = NULL;
196}
197
198struct bna_ib *
199bna_ib_get(struct bna_ib_mod *ib_mod,
200 enum bna_intr_type intr_type,
201 int vector)
202{
203 struct bna_ib *ib;
204 struct bna_intr *intr;
205
206 if (intr_type == BNA_INTR_T_INTX)
207 vector = (1 << vector);
208
209 intr = bna_intr_get(ib_mod, intr_type, vector);
210 if (intr == NULL)
211 return NULL;
212
213 if (intr->ib) {
214 if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
215 bna_intr_put(ib_mod, intr);
216 return NULL;
217 }
218 intr->ib->ref_count++;
219 return intr->ib;
220 }
221
222 if (list_empty(&ib_mod->ib_free_q)) {
223 bna_intr_put(ib_mod, intr);
224 return NULL;
225 }
226
227 bfa_q_deq(&ib_mod->ib_free_q, &ib);
228 bfa_q_qe_init(&ib->qe);
229
230 ib->ref_count = 1;
231 ib->start_count = 0;
232 ib->idx_mask = 0;
233
234 ib->intr = intr;
235 ib->idx_seg = NULL;
236 intr->ib = ib;
237
238 ib->bna = ib_mod->bna;
239
240 return ib;
241}
242
243void
244bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
245{
246 bna_intr_put(ib_mod, ib->intr);
247
248 ib->ref_count--;
249
250 if (ib->ref_count == 0) {
251 ib->intr = NULL;
252 ib->bna = NULL;
253 list_add_tail(&ib->qe, &ib_mod->ib_free_q);
254 }
255}
256
257/* Returns index offset - starting from 0 */
258int
259bna_ib_reserve_idx(struct bna_ib *ib)
260{
261 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
262 struct bna_ibidx_seg *idx_seg;
263 int idx;
264 int num_idx;
265 int q_idx;
266
267 /* Find the first free index position */
268 bna_ib_find_free_ibidx(ib->idx_mask, idx);
269 if (idx == BFI_IBIDX_MAX_SEGSIZE)
270 return -1;
271
272 /*
273 * Calculate the total number of indexes held by this IB,
274 * including the index newly reserved above.
275 */
276 bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
277
278 /* See if there is a free space in the index segment held by this IB */
279 if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
280 ib->idx_mask |= (1 << idx);
281 return idx;
282 }
283
284 if (ib->start_count)
285 return -1;
286
287 /* Allocate a new segment */
288 bna_ib_select_segpool(num_idx, q_idx);
289 while (1) {
290 if (q_idx == BFI_IBIDX_TOTAL_POOLS)
291 return -1;
292 if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
293 break;
294 q_idx++;
295 }
296 bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
297 bfa_q_qe_init(&idx_seg->qe);
298
299 /* Free the old segment */
300 if (ib->idx_seg) {
301 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
302 list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
303 }
304
305 ib->idx_seg = idx_seg;
306
307 ib->idx_mask |= (1 << idx);
308
309 return idx;
310}
311
312void
313bna_ib_release_idx(struct bna_ib *ib, int idx)
314{
315 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
316 struct bna_ibidx_seg *idx_seg;
317 int num_idx;
318 int cur_q_idx;
319 int new_q_idx;
320
321 ib->idx_mask &= ~(1 << idx);
322
323 if (ib->start_count)
324 return;
325
326 bna_ib_count_ibidx(ib->idx_mask, num_idx);
327
328 /*
329 * Free the segment, if there are no more indexes in the segment
330 * held by this IB
331 */
332 if (!num_idx) {
333 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
334 list_add_tail(&ib->idx_seg->qe,
335 &ib_mod->ibidx_seg_pool[cur_q_idx]);
336 ib->idx_seg = NULL;
337 return;
338 }
339
340 /* See if we can move to a smaller segment */
341 bna_ib_select_segpool(num_idx, new_q_idx);
342 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
343 while (new_q_idx < cur_q_idx) {
344 if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
345 break;
346 new_q_idx++;
347 }
348 if (new_q_idx < cur_q_idx) {
349 /* Select the new smaller segment */
350 bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
351 bfa_q_qe_init(&idx_seg->qe);
352 /* Free the old segment */
353 list_add_tail(&ib->idx_seg->qe,
354 &ib_mod->ibidx_seg_pool[cur_q_idx]);
355 ib->idx_seg = idx_seg;
356 }
357}
358
359int
360bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
361{
362 if (ib->start_count)
363 return -1;
364
365 ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
366 ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
367 ib->ib_config.interpkt_count = ib_config->interpkt_count;
368 ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
369
370 ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
371 if (ib->intr->intr_type == BNA_INTR_T_MSIX)
372 ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
373
374 return 0;
375}
376
377void
378bna_ib_start(struct bna_ib *ib)
379{
380 struct bna_ib_blk_mem ib_cfg;
381 struct bna_ib_blk_mem *ib_mem;
382 u32 pg_num;
383 u32 intx_mask;
384 int i;
385 void __iomem *base_addr;
386 unsigned long off;
387
388 ib->start_count++;
389
390 if (ib->start_count > 1)
391 return;
392
393 ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
394 ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
395
396 ib_cfg.clsc_n_ctrl_n_msix = (((u32)
397 ib->ib_config.coalescing_timeo << 16) |
398 ((u32)ib->ib_config.ctrl_flags << 8) |
399 (ib->intr->vector));
400 ib_cfg.ipkt_n_ent_n_idxof =
401 ((u32)
402 (ib->ib_config.interpkt_timeo & 0xf) << 16) |
403 ((u32)ib->idx_seg->ib_seg_size << 8) |
404 (ib->idx_seg->ib_idx_tbl_offset);
405 ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
406 ib->ib_config.interpkt_count << 24);
407
408 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
409 HQM_IB_RAM_BASE_OFFSET);
410 writel(pg_num, ib->bna->regs.page_addr);
411
412 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
413 HQM_IB_RAM_BASE_OFFSET);
414
415 ib_mem = (struct bna_ib_blk_mem *)0;
416 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
417 writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
418
419 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
420 writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
421
422 off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
423 writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
424
425 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
426 writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
427
428 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
429 writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
430
431 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
432 (u32)ib->ib_config.coalescing_timeo, 0);
433
434 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
435 HQM_INDX_TBL_RAM_BASE_OFFSET);
436 writel(pg_num, ib->bna->regs.page_addr);
437
438 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
439 HQM_INDX_TBL_RAM_BASE_OFFSET);
440 for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
441 off = (unsigned long)
442 ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
443 writel(0, base_addr + off);
444 }
445
446 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
447 bna_intx_disable(ib->bna, intx_mask);
448 intx_mask &= ~(ib->intr->vector);
449 bna_intx_enable(ib->bna, intx_mask);
450 }
451}
452
453void
454bna_ib_stop(struct bna_ib *ib)
455{
456 u32 intx_mask;
457
458 ib->start_count--;
459
460 if (ib->start_count == 0) {
461 writel(BNA_DOORBELL_IB_INT_DISABLE,
462 ib->door_bell.doorbell_addr);
463 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
464 bna_intx_disable(ib->bna, intx_mask);
465 intx_mask |= (ib->intr->vector);
466 bna_intx_enable(ib->bna, intx_mask);
467 }
468 }
469}
470
471void
472bna_ib_fail(struct bna_ib *ib)
473{
474 ib->start_count = 0;
475}
476
477/**
478 * RXF
479 */
480static void rxf_enable(struct bna_rxf *rxf);
481static void rxf_disable(struct bna_rxf *rxf);
482static void __rxf_config_set(struct bna_rxf *rxf);
483static void __rxf_rit_set(struct bna_rxf *rxf);
484static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
485static int rxf_process_packet_filter(struct bna_rxf *rxf);
486static int rxf_clear_packet_filter(struct bna_rxf *rxf);
487static void rxf_reset_packet_filter(struct bna_rxf *rxf);
488static void rxf_cb_enabled(void *arg, int status);
489static void rxf_cb_disabled(void *arg, int status);
490static void bna_rxf_cb_stats_cleared(void *arg, int status);
491static void __rxf_enable(struct bna_rxf *rxf);
492static void __rxf_disable(struct bna_rxf *rxf);
493
494bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
495 enum bna_rxf_event);
496bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
497 enum bna_rxf_event);
498bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
499 enum bna_rxf_event);
500bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
501 enum bna_rxf_event);
502bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
503 enum bna_rxf_event);
504bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
505 enum bna_rxf_event);
506bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
507 enum bna_rxf_event);
508bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
509 enum bna_rxf_event);
510bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
511 enum bna_rxf_event);
512
513static struct bfa_sm_table rxf_sm_table[] = {
514 {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
515 {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
516 {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
517 {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
518 {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
519 {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
520 {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
521 {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
522 {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
523};
524
525static void
526bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
527{
528 call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
529}
530
531static void
532bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
533{
534 switch (event) {
535 case RXF_E_START:
536 bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
537 break;
538
539 case RXF_E_STOP:
540 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
541 break;
542
543 case RXF_E_FAIL:
544 /* No-op */
545 break;
546
547 case RXF_E_CAM_FLTR_MOD:
548 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
549 break;
550
551 case RXF_E_STARTED:
552 case RXF_E_STOPPED:
553 case RXF_E_CAM_FLTR_RESP:
554 /**
555 * These events are received due to flushing of mbox
556 * when device fails
557 */
558 /* No-op */
559 break;
560
561 case RXF_E_PAUSE:
562 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
563 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
564 break;
565
566 case RXF_E_RESUME:
567 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
568 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
569 break;
570
571 default:
572 bfa_sm_fault(rxf->rx->bna, event);
573 }
574}
575
576static void
577bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
578{
579 __rxf_config_set(rxf);
580 __rxf_rit_set(rxf);
581 rxf_enable(rxf);
582}
583
584static void
585bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
586{
587 switch (event) {
588 case RXF_E_STOP:
589 /**
590 * STOP is originated from bnad. When this happens,
591 * it can not be waiting for filter update
592 */
593 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
594 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
595 break;
596
597 case RXF_E_FAIL:
598 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
599 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
600 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
601 break;
602
603 case RXF_E_CAM_FLTR_MOD:
604 /* No-op */
605 break;
606
607 case RXF_E_STARTED:
608 /**
609 * Force rxf_process_filter() to go through initial
610 * config
611 */
612 if ((rxf->ucast_active_mac != NULL) &&
613 (rxf->ucast_pending_set == 0))
614 rxf->ucast_pending_set = 1;
615
616 if (rxf->rss_status == BNA_STATUS_T_ENABLED)
617 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
618
619 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
620
621 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
622 break;
623
624 case RXF_E_PAUSE:
625 case RXF_E_RESUME:
626 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
627 break;
628
629 default:
630 bfa_sm_fault(rxf->rx->bna, event);
631 }
632}
633
634static void
635bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
636{
637 if (!rxf_process_packet_filter(rxf)) {
638 /* No more pending CAM entries to update */
639 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
640 }
641}
642
643static void
644bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
645{
646 switch (event) {
647 case RXF_E_STOP:
648 /**
649 * STOP is originated from bnad. When this happens,
650 * it can not be waiting for filter update
651 */
652 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
653 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
654 break;
655
656 case RXF_E_FAIL:
657 rxf_reset_packet_filter(rxf);
658 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
659 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
660 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
661 break;
662
663 case RXF_E_CAM_FLTR_MOD:
664 /* No-op */
665 break;
666
667 case RXF_E_CAM_FLTR_RESP:
668 if (!rxf_process_packet_filter(rxf)) {
669 /* No more pending CAM entries to update */
670 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
671 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
672 }
673 break;
674
675 case RXF_E_PAUSE:
676 case RXF_E_RESUME:
677 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
678 break;
679
680 default:
681 bfa_sm_fault(rxf->rx->bna, event);
682 }
683}
684
685static void
686bna_rxf_sm_started_entry(struct bna_rxf *rxf)
687{
688 call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
689
690 if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
691 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
692 bfa_fsm_send_event(rxf, RXF_E_PAUSE);
693 else
694 bfa_fsm_send_event(rxf, RXF_E_RESUME);
695 }
696
697}
698
699static void
700bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
701{
702 switch (event) {
703 case RXF_E_STOP:
704 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
705 /* Hack to get FSM start clearing CAM entries */
706 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
707 break;
708
709 case RXF_E_FAIL:
710 rxf_reset_packet_filter(rxf);
711 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
712 break;
713
714 case RXF_E_CAM_FLTR_MOD:
715 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
716 break;
717
718 case RXF_E_PAUSE:
719 bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
720 break;
721
722 case RXF_E_RESUME:
723 bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
724 break;
725
726 default:
727 bfa_sm_fault(rxf->rx->bna, event);
728 }
729}
730
731static void
732bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
733{
734 /**
735 * Note: Do not add rxf_clear_packet_filter here.
736 * It will overstep mbox when this transition happens:
737 * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
738 */
739}
740
741static void
742bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
743{
744 switch (event) {
745 case RXF_E_FAIL:
746 /**
747 * FSM was in the process of stopping, initiated by
748 * bnad. When this happens, no one can be waiting for
749 * start or filter update
750 */
751 rxf_reset_packet_filter(rxf);
752 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
753 break;
754
755 case RXF_E_CAM_FLTR_RESP:
756 if (!rxf_clear_packet_filter(rxf)) {
757 /* No more pending CAM entries to clear */
758 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
759 rxf_disable(rxf);
760 }
761 break;
762
763 default:
764 bfa_sm_fault(rxf->rx->bna, event);
765 }
766}
767
768static void
769bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
770{
771 /**
772 * NOTE: Do not add rxf_disable here.
773 * It will overstep mbox when this transition happens:
774 * start_wait -> stop_wait on RXF_E_STOP event
775 */
776}
777
778static void
779bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
780{
781 switch (event) {
782 case RXF_E_FAIL:
783 /**
784 * FSM was in the process of stopping, initiated by
785 * bnad. When this happens, no one can be waiting for
786 * start or filter update
787 */
788 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
789 break;
790
791 case RXF_E_STARTED:
792 /**
793 * This event is received due to abrupt transition from
794 * bna_rxf_sm_start_wait state on receiving
795 * RXF_E_STOP event
796 */
797 rxf_disable(rxf);
798 break;
799
800 case RXF_E_STOPPED:
801 /**
802 * FSM was in the process of stopping, initiated by
803 * bnad. When this happens, no one can be waiting for
804 * start or filter update
805 */
806 bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
807 break;
808
809 case RXF_E_PAUSE:
810 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
811 break;
812
813 case RXF_E_RESUME:
814 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
815 break;
816
817 default:
818 bfa_sm_fault(rxf->rx->bna, event);
819 }
820}
821
822static void
823bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
824{
825 rxf->rxf_flags &=
826 ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
827 __rxf_disable(rxf);
828}
829
830static void
831bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
832{
833 switch (event) {
834 case RXF_E_FAIL:
835 /**
836 * FSM was in the process of disabling rxf, initiated by
837 * bnad.
838 */
839 call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
840 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
841 break;
842
843 case RXF_E_STOPPED:
844 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
845 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
846 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
847 break;
848
849 /*
850 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
851 * any other event during these states
852 */
853 default:
854 bfa_sm_fault(rxf->rx->bna, event);
855 }
856}
857
858static void
859bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
860{
861 rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
862 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
863 __rxf_enable(rxf);
864}
865
866static void
867bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
868{
869 switch (event) {
870 case RXF_E_FAIL:
871 /**
872 * FSM was in the process of disabling rxf, initiated by
873 * bnad.
874 */
875 call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
876 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
877 break;
878
879 case RXF_E_STARTED:
880 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
881 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
882 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
883 break;
884
885 /*
886 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
887 * any other event during these states
888 */
889 default:
890 bfa_sm_fault(rxf->rx->bna, event);
891 }
892}
893
894static void
895bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
896{
897 __bna_rxf_stat_clr(rxf);
898}
899
900static void
901bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
902{
903 switch (event) {
904 case RXF_E_FAIL:
905 case RXF_E_STAT_CLEARED:
906 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
907 break;
908
909 default:
910 bfa_sm_fault(rxf->rx->bna, event);
911 }
912}
913
914static void
915__rxf_enable(struct bna_rxf *rxf)
916{
917 struct bfi_ll_rxf_multi_req ll_req;
918 u32 bm[2] = {0, 0};
919
920 if (rxf->rxf_id < 32)
921 bm[0] = 1 << rxf->rxf_id;
922 else
923 bm[1] = 1 << (rxf->rxf_id - 32);
924
925 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
926 ll_req.rxf_id_mask[0] = htonl(bm[0]);
927 ll_req.rxf_id_mask[1] = htonl(bm[1]);
928 ll_req.enable = 1;
929
930 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
931 rxf_cb_enabled, rxf);
932
933 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
934}
935
936static void
937__rxf_disable(struct bna_rxf *rxf)
938{
939 struct bfi_ll_rxf_multi_req ll_req;
940 u32 bm[2] = {0, 0};
941
942 if (rxf->rxf_id < 32)
943 bm[0] = 1 << rxf->rxf_id;
944 else
945 bm[1] = 1 << (rxf->rxf_id - 32);
946
947 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
948 ll_req.rxf_id_mask[0] = htonl(bm[0]);
949 ll_req.rxf_id_mask[1] = htonl(bm[1]);
950 ll_req.enable = 0;
951
952 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
953 rxf_cb_disabled, rxf);
954
955 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
956}
957
958static void
959__rxf_config_set(struct bna_rxf *rxf)
960{
961 u32 i;
962 struct bna_rss_mem *rss_mem;
963 struct bna_rx_fndb_ram *rx_fndb_ram;
964 struct bna *bna = rxf->rx->bna;
965 void __iomem *base_addr;
966 unsigned long off;
967
968 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
969 RSS_TABLE_BASE_OFFSET);
970
971 rss_mem = (struct bna_rss_mem *)0;
972
973 /* Configure RSS if required */
974 if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
975 /* configure RSS Table */
976 writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
977 bna->port_num, RSS_TABLE_BASE_OFFSET),
978 bna->regs.page_addr);
979
980 /* temporarily disable RSS, while hash value is written */
981 off = (unsigned long)&rss_mem[0].type_n_hash;
982 writel(0, base_addr + off);
983
984 for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
985 off = (unsigned long)
986 &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
987 writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
988 base_addr + off);
989 }
990
991 off = (unsigned long)&rss_mem[0].type_n_hash;
992 writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
993 base_addr + off);
994 }
995
996 /* Configure RxF */
997 writel(BNA_GET_PAGE_NUM(
998 LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
999 RX_FNDB_RAM_BASE_OFFSET),
1000 bna->regs.page_addr);
1001
1002 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1003 RX_FNDB_RAM_BASE_OFFSET);
1004
1005 rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
1006
1007 /* We always use RSS table 0 */
1008 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
1009 writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
1010 base_addr + off);
1011
1012 /* small large buffer enable/disable */
1013 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
1014 writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
1015 base_addr + off);
1016
1017 /* RIT offset, HDS forced offset, multicast RxQ Id */
1018 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
1019 writel((rxf->rit_segment->rit_offset << 16) |
1020 (rxf->forced_offset << 8) |
1021 (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
1022 base_addr + off);
1023
1024 /*
1025 * default vlan tag, default function enable, strip vlan bytes,
1026 * HDS type, header size
1027 */
1028
1029 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
1030 writel(((u32)rxf->default_vlan_tag << 16) |
1031 (rxf->ctrl_flags &
1032 (BNA_RXF_CF_DEFAULT_VLAN |
1033 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
1034 BNA_RXF_CF_VLAN_STRIP)) |
1035 (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
1036 rxf->hds_cfg.header_size,
1037 base_addr + off);
1038}
1039
1040void
1041__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
1042{
1043 struct bna *bna = rxf->rx->bna;
1044 int i;
1045
1046 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
1047 (bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
1048 bna->regs.page_addr);
1049
1050 if (status == BNA_STATUS_T_ENABLED) {
1051 /* enable VLAN filtering on this function */
1052 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1053 writel(rxf->vlan_filter_table[i],
1054 BNA_GET_VLAN_MEM_ENTRY_ADDR
1055 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1056 i * 32));
1057 }
1058 } else {
1059 /* disable VLAN filtering on this function */
1060 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1061 writel(0xffffffff,
1062 BNA_GET_VLAN_MEM_ENTRY_ADDR
1063 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1064 i * 32));
1065 }
1066 }
1067}
1068
1069static void
1070__rxf_rit_set(struct bna_rxf *rxf)
1071{
1072 struct bna *bna = rxf->rx->bna;
1073 struct bna_rit_mem *rit_mem;
1074 int i;
1075 void __iomem *base_addr;
1076 unsigned long off;
1077
1078 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1079 FUNCTION_TO_RXQ_TRANSLATE);
1080
1081 rit_mem = (struct bna_rit_mem *)0;
1082
1083 writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
1084 FUNCTION_TO_RXQ_TRANSLATE),
1085 bna->regs.page_addr);
1086
1087 for (i = 0; i < rxf->rit_segment->rit_size; i++) {
1088 off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
1089 writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
1090 rxf->rit_segment->rit[i].small_rxq_id,
1091 base_addr + off);
1092 }
1093}
1094
1095static void
1096__bna_rxf_stat_clr(struct bna_rxf *rxf)
1097{
1098 struct bfi_ll_stats_req ll_req;
1099 u32 bm[2] = {0, 0};
1100
1101 if (rxf->rxf_id < 32)
1102 bm[0] = 1 << rxf->rxf_id;
1103 else
1104 bm[1] = 1 << (rxf->rxf_id - 32);
1105
1106 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
1107 ll_req.stats_mask = 0;
1108 ll_req.txf_id_mask[0] = 0;
1109 ll_req.txf_id_mask[1] = 0;
1110
1111 ll_req.rxf_id_mask[0] = htonl(bm[0]);
1112 ll_req.rxf_id_mask[1] = htonl(bm[1]);
1113
1114 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
1115 bna_rxf_cb_stats_cleared, rxf);
1116 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1117}
1118
1119static void
1120rxf_enable(struct bna_rxf *rxf)
1121{
1122 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1123 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1124 else {
1125 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
1126 __rxf_enable(rxf);
1127 }
1128}
1129
1130static void
1131rxf_cb_enabled(void *arg, int status)
1132{
1133 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1134
1135 bfa_q_qe_init(&rxf->mbox_qe.qe);
1136 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1137}
1138
1139static void
1140rxf_disable(struct bna_rxf *rxf)
1141{
1142 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1143 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1144 else
1145 rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
1146 __rxf_disable(rxf);
1147}
1148
1149static void
1150rxf_cb_disabled(void *arg, int status)
1151{
1152 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1153
1154 bfa_q_qe_init(&rxf->mbox_qe.qe);
1155 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1156}
1157
1158void
1159rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
1160{
1161 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1162
1163 bfa_q_qe_init(&rxf->mbox_qe.qe);
1164
1165 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
1166}
1167
1168static void
1169bna_rxf_cb_stats_cleared(void *arg, int status)
1170{
1171 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1172
1173 bfa_q_qe_init(&rxf->mbox_qe.qe);
1174 bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
1175}
1176
1177void
1178rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
1179 const struct bna_mac *mac_addr)
1180{
1181 struct bfi_ll_mac_addr_req req;
1182
1183 bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
1184
1185 req.rxf_id = rxf->rxf_id;
1186 memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
1187
1188 bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
1189 rxf_cb_cam_fltr_mbox_cmd, rxf);
1190
1191 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1192}
1193
1194static int
1195rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
1196{
1197 struct bna_mac *mac = NULL;
1198 struct list_head *qe;
1199
1200 /* Add multicast entries */
1201 if (!list_empty(&rxf->mcast_pending_add_q)) {
1202 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1203 bfa_q_qe_init(qe);
1204 mac = (struct bna_mac *)qe;
1205 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
1206 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1207 return 1;
1208 }
1209
1210 /* Delete multicast entries previousely added */
1211 if (!list_empty(&rxf->mcast_pending_del_q)) {
1212 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1213 bfa_q_qe_init(qe);
1214 mac = (struct bna_mac *)qe;
1215 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1216 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1217 return 1;
1218 }
1219
1220 return 0;
1221}
1222
1223static int
1224rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
1225{
1226 /* Apply the VLAN filter */
1227 if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
1228 rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
1229 if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
1230 !(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
1231 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
1232 }
1233
1234 /* Apply RSS configuration */
1235 if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
1236 rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
1237 if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
1238 /* RSS is being disabled */
1239 rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
1240 __rxf_rit_set(rxf);
1241 __rxf_config_set(rxf);
1242 } else {
1243 /* RSS is being enabled or reconfigured */
1244 rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
1245 __rxf_rit_set(rxf);
1246 __rxf_config_set(rxf);
1247 }
1248 }
1249
1250 return 0;
1251}
1252
1253/**
1254 * Processes pending ucast, mcast entry addition/deletion and issues mailbox
1255 * command. Also processes pending filter configuration - promiscuous mode,
1256 * default mode, allmutli mode and issues mailbox command or directly applies
1257 * to h/w
1258 */
1259static int
1260rxf_process_packet_filter(struct bna_rxf *rxf)
1261{
1262 /* Set the default MAC first */
1263 if (rxf->ucast_pending_set > 0) {
1264 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
1265 rxf->ucast_active_mac);
1266 rxf->ucast_pending_set--;
1267 return 1;
1268 }
1269
1270 if (rxf_process_packet_filter_ucast(rxf))
1271 return 1;
1272
1273 if (rxf_process_packet_filter_mcast(rxf))
1274 return 1;
1275
1276 if (rxf_process_packet_filter_promisc(rxf))
1277 return 1;
1278
1279 if (rxf_process_packet_filter_default(rxf))
1280 return 1;
1281
1282 if (rxf_process_packet_filter_allmulti(rxf))
1283 return 1;
1284
1285 if (rxf_process_packet_filter_vlan(rxf))
1286 return 1;
1287
1288 return 0;
1289}
1290
1291static int
1292rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
1293{
1294 struct bna_mac *mac = NULL;
1295 struct list_head *qe;
1296
1297 /* 3. delete pending mcast entries */
1298 if (!list_empty(&rxf->mcast_pending_del_q)) {
1299 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1300 bfa_q_qe_init(qe);
1301 mac = (struct bna_mac *)qe;
1302 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1303 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1304 return 1;
1305 }
1306
1307 /* 4. clear active mcast entries; move them to pending_add_q */
1308 if (!list_empty(&rxf->mcast_active_q)) {
1309 bfa_q_deq(&rxf->mcast_active_q, &qe);
1310 bfa_q_qe_init(qe);
1311 mac = (struct bna_mac *)qe;
1312 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1313 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1314 return 1;
1315 }
1316
1317 return 0;
1318}
1319
1320/**
1321 * In the rxf stop path, processes pending ucast/mcast delete queue and issues
1322 * the mailbox command. Moves the active ucast/mcast entries to pending add q,
1323 * so that they are added to CAM again in the rxf start path. Moves the current
1324 * filter settings - promiscuous, default, allmutli - to pending filter
1325 * configuration
1326 */
1327static int
1328rxf_clear_packet_filter(struct bna_rxf *rxf)
1329{
1330 if (rxf_clear_packet_filter_ucast(rxf))
1331 return 1;
1332
1333 if (rxf_clear_packet_filter_mcast(rxf))
1334 return 1;
1335
1336 /* 5. clear active default MAC in the CAM */
1337 if (rxf->ucast_pending_set > 0)
1338 rxf->ucast_pending_set = 0;
1339
1340 if (rxf_clear_packet_filter_promisc(rxf))
1341 return 1;
1342
1343 if (rxf_clear_packet_filter_default(rxf))
1344 return 1;
1345
1346 if (rxf_clear_packet_filter_allmulti(rxf))
1347 return 1;
1348
1349 return 0;
1350}
1351
1352static void
1353rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
1354{
1355 struct list_head *qe;
1356 struct bna_mac *mac;
1357
1358 /* 3. Move active mcast entries to pending_add_q */
1359 while (!list_empty(&rxf->mcast_active_q)) {
1360 bfa_q_deq(&rxf->mcast_active_q, &qe);
1361 bfa_q_qe_init(qe);
1362 list_add_tail(qe, &rxf->mcast_pending_add_q);
1363 }
1364
1365 /* 4. Throw away delete pending mcast entries */
1366 while (!list_empty(&rxf->mcast_pending_del_q)) {
1367 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1368 bfa_q_qe_init(qe);
1369 mac = (struct bna_mac *)qe;
1370 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1371 }
1372}
1373
1374/**
1375 * In the rxf fail path, throws away the ucast/mcast entries pending for
1376 * deletion, moves all active ucast/mcast entries to pending queue so that
1377 * they are added back to CAM in the rxf start path. Also moves the current
1378 * filter configuration to pending filter configuration.
1379 */
1380static void
1381rxf_reset_packet_filter(struct bna_rxf *rxf)
1382{
1383 rxf_reset_packet_filter_ucast(rxf);
1384
1385 rxf_reset_packet_filter_mcast(rxf);
1386
1387 /* 5. Turn off ucast set flag */
1388 rxf->ucast_pending_set = 0;
1389
1390 rxf_reset_packet_filter_promisc(rxf);
1391
1392 rxf_reset_packet_filter_default(rxf);
1393
1394 rxf_reset_packet_filter_allmulti(rxf);
1395}
1396
1397void
1398bna_rxf_init(struct bna_rxf *rxf,
1399 struct bna_rx *rx,
1400 struct bna_rx_config *q_config)
1401{
1402 struct list_head *qe;
1403 struct bna_rxp *rxp;
1404
1405 /* rxf_id is initialized during rx_mod init */
1406 rxf->rx = rx;
1407
1408 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
1409 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
1410 rxf->ucast_pending_set = 0;
1411 INIT_LIST_HEAD(&rxf->ucast_active_q);
1412 rxf->ucast_active_mac = NULL;
1413
1414 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
1415 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
1416 INIT_LIST_HEAD(&rxf->mcast_active_q);
1417
1418 bfa_q_qe_init(&rxf->mbox_qe.qe);
1419
1420 if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
1421 rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
1422
1423 rxf->rxf_oper_state = (q_config->paused) ?
1424 BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
1425
1426 bna_rxf_adv_init(rxf, rx, q_config);
1427
1428 rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
1429 q_config->num_paths);
1430
1431 list_for_each(qe, &rx->rxp_q) {
1432 rxp = (struct bna_rxp *)qe;
1433 if (q_config->rxp_type == BNA_RXP_SINGLE)
1434 rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
1435 else
1436 rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
1437 break;
1438 }
1439
1440 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
1441 memset(rxf->vlan_filter_table, 0,
1442 (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
1443
1444 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
1445}
1446
1447void
1448bna_rxf_uninit(struct bna_rxf *rxf)
1449{
1450 struct bna_mac *mac;
1451
1452 bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
1453 rxf->rit_segment = NULL;
1454
1455 rxf->ucast_pending_set = 0;
1456
1457 while (!list_empty(&rxf->ucast_pending_add_q)) {
1458 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
1459 bfa_q_qe_init(&mac->qe);
1460 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1461 }
1462
1463 if (rxf->ucast_active_mac) {
1464 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1465 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
1466 rxf->ucast_active_mac);
1467 rxf->ucast_active_mac = NULL;
1468 }
1469
1470 while (!list_empty(&rxf->mcast_pending_add_q)) {
1471 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
1472 bfa_q_qe_init(&mac->qe);
1473 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1474 }
1475
1476 rxf->rx = NULL;
1477}
1478
1479void
1480bna_rxf_start(struct bna_rxf *rxf)
1481{
1482 rxf->start_cbfn = bna_rx_cb_rxf_started;
1483 rxf->start_cbarg = rxf->rx;
1484 rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
1485 bfa_fsm_send_event(rxf, RXF_E_START);
1486}
1487
1488void
1489bna_rxf_stop(struct bna_rxf *rxf)
1490{
1491 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
1492 rxf->stop_cbarg = rxf->rx;
1493 bfa_fsm_send_event(rxf, RXF_E_STOP);
1494}
1495
1496void
1497bna_rxf_fail(struct bna_rxf *rxf)
1498{
1499 rxf->rxf_flags |= BNA_RXF_FL_FAILED;
1500 bfa_fsm_send_event(rxf, RXF_E_FAIL);
1501}
1502
1503int
1504bna_rxf_state_get(struct bna_rxf *rxf)
1505{
1506 return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
1507}
1508
1509enum bna_cb_status
1510bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
1511 void (*cbfn)(struct bnad *, struct bna_rx *,
1512 enum bna_cb_status))
1513{
1514 struct bna_rxf *rxf = &rx->rxf;
1515
1516 if (rxf->ucast_active_mac == NULL) {
1517 rxf->ucast_active_mac =
1518 bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
1519 if (rxf->ucast_active_mac == NULL)
1520 return BNA_CB_UCAST_CAM_FULL;
1521 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1522 }
1523
1524 memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
1525 rxf->ucast_pending_set++;
1526 rxf->cam_fltr_cbfn = cbfn;
1527 rxf->cam_fltr_cbarg = rx->bna->bnad;
1528
1529 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1530
1531 return BNA_CB_SUCCESS;
1532}
1533
1534enum bna_cb_status
1535bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
1536 void (*cbfn)(struct bnad *, struct bna_rx *,
1537 enum bna_cb_status))
1538{
1539 struct bna_rxf *rxf = &rx->rxf;
1540 struct list_head *qe;
1541 struct bna_mac *mac;
1542
1543 /* Check if already added */
1544 list_for_each(qe, &rxf->mcast_active_q) {
1545 mac = (struct bna_mac *)qe;
1546 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1547 if (cbfn)
1548 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1549 return BNA_CB_SUCCESS;
1550 }
1551 }
1552
1553 /* Check if pending addition */
1554 list_for_each(qe, &rxf->mcast_pending_add_q) {
1555 mac = (struct bna_mac *)qe;
1556 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1557 if (cbfn)
1558 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1559 return BNA_CB_SUCCESS;
1560 }
1561 }
1562
1563 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1564 if (mac == NULL)
1565 return BNA_CB_MCAST_LIST_FULL;
1566 bfa_q_qe_init(&mac->qe);
1567 memcpy(mac->addr, addr, ETH_ALEN);
1568 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1569
1570 rxf->cam_fltr_cbfn = cbfn;
1571 rxf->cam_fltr_cbarg = rx->bna->bnad;
1572
1573 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1574
1575 return BNA_CB_SUCCESS;
1576}
1577
1578enum bna_cb_status
1579bna_rx_mcast_del(struct bna_rx *rx, u8 *addr,
1580 void (*cbfn)(struct bnad *, struct bna_rx *,
1581 enum bna_cb_status))
1582{
1583 struct bna_rxf *rxf = &rx->rxf;
1584 struct list_head *qe;
1585 struct bna_mac *mac;
1586
1587 list_for_each(qe, &rxf->mcast_pending_add_q) {
1588 mac = (struct bna_mac *)qe;
1589 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1590 list_del(qe);
1591 bfa_q_qe_init(qe);
1592 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1593 if (cbfn)
1594 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1595 return BNA_CB_SUCCESS;
1596 }
1597 }
1598
1599 list_for_each(qe, &rxf->mcast_active_q) {
1600 mac = (struct bna_mac *)qe;
1601 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1602 list_del(qe);
1603 bfa_q_qe_init(qe);
1604 list_add_tail(qe, &rxf->mcast_pending_del_q);
1605 rxf->cam_fltr_cbfn = cbfn;
1606 rxf->cam_fltr_cbarg = rx->bna->bnad;
1607 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1608 return BNA_CB_SUCCESS;
1609 }
1610 }
1611
1612 return BNA_CB_INVALID_MAC;
1613}
1614
1615enum bna_cb_status
1616bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
1617 void (*cbfn)(struct bnad *, struct bna_rx *,
1618 enum bna_cb_status))
1619{
1620 struct bna_rxf *rxf = &rx->rxf;
1621 struct list_head list_head;
1622 struct list_head *qe;
1623 u8 *mcaddr;
1624 struct bna_mac *mac;
1625 struct bna_mac *mac1;
1626 int skip;
1627 int delete;
1628 int need_hw_config = 0;
1629 int i;
1630
1631 /* Allocate nodes */
1632 INIT_LIST_HEAD(&list_head);
1633 for (i = 0, mcaddr = mclist; i < count; i++) {
1634 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1635 if (mac == NULL)
1636 goto err_return;
1637 bfa_q_qe_init(&mac->qe);
1638 memcpy(mac->addr, mcaddr, ETH_ALEN);
1639 list_add_tail(&mac->qe, &list_head);
1640
1641 mcaddr += ETH_ALEN;
1642 }
1643
1644 /* Schedule for addition */
1645 while (!list_empty(&list_head)) {
1646 bfa_q_deq(&list_head, &qe);
1647 mac = (struct bna_mac *)qe;
1648 bfa_q_qe_init(&mac->qe);
1649
1650 skip = 0;
1651
1652 /* Skip if already added */
1653 list_for_each(qe, &rxf->mcast_active_q) {
1654 mac1 = (struct bna_mac *)qe;
1655 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1656 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1657 mac);
1658 skip = 1;
1659 break;
1660 }
1661 }
1662
1663 if (skip)
1664 continue;
1665
1666 /* Skip if pending addition */
1667 list_for_each(qe, &rxf->mcast_pending_add_q) {
1668 mac1 = (struct bna_mac *)qe;
1669 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1670 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1671 mac);
1672 skip = 1;
1673 break;
1674 }
1675 }
1676
1677 if (skip)
1678 continue;
1679
1680 need_hw_config = 1;
1681 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1682 }
1683
1684 /**
1685 * Delete the entries that are in the pending_add_q but not
1686 * in the new list
1687 */
1688 while (!list_empty(&rxf->mcast_pending_add_q)) {
1689 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1690 mac = (struct bna_mac *)qe;
1691 bfa_q_qe_init(&mac->qe);
1692 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1693 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1694 delete = 0;
1695 break;
1696 }
1697 mcaddr += ETH_ALEN;
1698 }
1699 if (delete)
1700 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1701 else
1702 list_add_tail(&mac->qe, &list_head);
1703 }
1704 while (!list_empty(&list_head)) {
1705 bfa_q_deq(&list_head, &qe);
1706 mac = (struct bna_mac *)qe;
1707 bfa_q_qe_init(&mac->qe);
1708 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1709 }
1710
1711 /**
1712 * Schedule entries for deletion that are in the active_q but not
1713 * in the new list
1714 */
1715 while (!list_empty(&rxf->mcast_active_q)) {
1716 bfa_q_deq(&rxf->mcast_active_q, &qe);
1717 mac = (struct bna_mac *)qe;
1718 bfa_q_qe_init(&mac->qe);
1719 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1720 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1721 delete = 0;
1722 break;
1723 }
1724 mcaddr += ETH_ALEN;
1725 }
1726 if (delete) {
1727 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
1728 need_hw_config = 1;
1729 } else {
1730 list_add_tail(&mac->qe, &list_head);
1731 }
1732 }
1733 while (!list_empty(&list_head)) {
1734 bfa_q_deq(&list_head, &qe);
1735 mac = (struct bna_mac *)qe;
1736 bfa_q_qe_init(&mac->qe);
1737 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1738 }
1739
1740 if (need_hw_config) {
1741 rxf->cam_fltr_cbfn = cbfn;
1742 rxf->cam_fltr_cbarg = rx->bna->bnad;
1743 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1744 } else if (cbfn)
1745 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1746
1747 return BNA_CB_SUCCESS;
1748
1749err_return:
1750 while (!list_empty(&list_head)) {
1751 bfa_q_deq(&list_head, &qe);
1752 mac = (struct bna_mac *)qe;
1753 bfa_q_qe_init(&mac->qe);
1754 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1755 }
1756
1757 return BNA_CB_MCAST_LIST_FULL;
1758}
1759
1760void
1761bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1762{
1763 struct bna_rxf *rxf = &rx->rxf;
1764 int index = (vlan_id >> 5);
1765 int bit = (1 << (vlan_id & 0x1F));
1766
1767 rxf->vlan_filter_table[index] |= bit;
1768 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1769 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1770 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1771 }
1772}
1773
1774void
1775bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1776{
1777 struct bna_rxf *rxf = &rx->rxf;
1778 int index = (vlan_id >> 5);
1779 int bit = (1 << (vlan_id & 0x1F));
1780
1781 rxf->vlan_filter_table[index] &= ~bit;
1782 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1783 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1784 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1785 }
1786}
1787
1788/**
1789 * RX
1790 */
1791#define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
1792 struct bna_doorbell_qset *_qset; \
1793 unsigned long off; \
1794 (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
1795 (q)->rcb->q_depth = (qdepth); \
1796 (q)->rcb->unmap_q = unmapq_mem; \
1797 (q)->rcb->rxq = (q); \
1798 (q)->rcb->cq = &(rxp)->cq; \
1799 (q)->rcb->bnad = (bna)->bnad; \
1800 _qset = (struct bna_doorbell_qset *)0; \
1801 off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
1802 (q)->rcb->q_dbell = off + \
1803 BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
1804 (q)->rcb->id = _id; \
1805} while (0)
1806
1807#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1808 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1809
1810#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1811 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1812
1813#define call_rx_stop_callback(rx, status) \
1814 if ((rx)->stop_cbfn) { \
1815 (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
1816 (rx)->stop_cbfn = NULL; \
1817 (rx)->stop_cbarg = NULL; \
1818 }
1819
1820/*
1821 * Since rx_enable is synchronous callback, there is no start_cbfn required.
1822 * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
1823 * for each rxpath.
1824 */
1825
1826#define call_rx_disable_cbfn(rx, status) \
1827 if ((rx)->disable_cbfn) { \
1828 (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
1829 status); \
1830 (rx)->disable_cbfn = NULL; \
1831 (rx)->disable_cbarg = NULL; \
1832 } \
1833
1834#define rxqs_reqd(type, num_rxqs) \
1835 (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
1836
1837#define rx_ib_fail(rx) \
1838do { \
1839 struct bna_rxp *rxp; \
1840 struct list_head *qe; \
1841 list_for_each(qe, &(rx)->rxp_q) { \
1842 rxp = (struct bna_rxp *)qe; \
1843 bna_ib_fail(rxp->cq.ib); \
1844 } \
1845} while (0)
1846
1847static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
1848static void __bna_rxq_start(struct bna_rxq *rxq);
1849static void __bna_cq_start(struct bna_cq *cq);
1850static void bna_rit_create(struct bna_rx *rx);
1851static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
1852static void bna_rx_cb_rxq_stopped_all(void *arg);
1853
1854bfa_fsm_state_decl(bna_rx, stopped,
1855 struct bna_rx, enum bna_rx_event);
1856bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1857 struct bna_rx, enum bna_rx_event);
1858bfa_fsm_state_decl(bna_rx, started,
1859 struct bna_rx, enum bna_rx_event);
1860bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1861 struct bna_rx, enum bna_rx_event);
1862bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
1863 struct bna_rx, enum bna_rx_event);
1864
1865static struct bfa_sm_table rx_sm_table[] = {
1866 {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
1867 {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
1868 {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
1869 {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
1870 {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
1871};
1872
1873static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1874{
1875 struct bna_rxp *rxp;
1876 struct list_head *qe_rxp;
1877
1878 list_for_each(qe_rxp, &rx->rxp_q) {
1879 rxp = (struct bna_rxp *)qe_rxp;
1880 rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
1881 }
1882
1883 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1884}
1885
1886static void bna_rx_sm_stopped(struct bna_rx *rx,
1887 enum bna_rx_event event)
1888{
1889 switch (event) {
1890 case RX_E_START:
1891 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1892 break;
1893 case RX_E_STOP:
1894 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1895 break;
1896 case RX_E_FAIL:
1897 /* no-op */
1898 break;
1899 default:
1900 bfa_sm_fault(rx->bna, event);
1901 break;
1902 }
1903
1904}
1905
1906static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1907{
1908 struct bna_rxp *rxp;
1909 struct list_head *qe_rxp;
1910 struct bna_rxq *q0 = NULL, *q1 = NULL;
1911
1912 /* Setup the RIT */
1913 bna_rit_create(rx);
1914
1915 list_for_each(qe_rxp, &rx->rxp_q) {
1916 rxp = (struct bna_rxp *)qe_rxp;
1917 bna_ib_start(rxp->cq.ib);
1918 GET_RXQS(rxp, q0, q1);
1919 q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
1920 __bna_rxq_start(q0);
1921 rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
1922 if (q1) {
1923 __bna_rxq_start(q1);
1924 rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
1925 }
1926 __bna_cq_start(&rxp->cq);
1927 }
1928
1929 bna_rxf_start(&rx->rxf);
1930}
1931
1932static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1933 enum bna_rx_event event)
1934{
1935 switch (event) {
1936 case RX_E_STOP:
1937 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1938 break;
1939 case RX_E_FAIL:
1940 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1941 rx_ib_fail(rx);
1942 bna_rxf_fail(&rx->rxf);
1943 break;
1944 case RX_E_RXF_STARTED:
1945 bfa_fsm_set_state(rx, bna_rx_sm_started);
1946 break;
1947 default:
1948 bfa_sm_fault(rx->bna, event);
1949 break;
1950 }
1951}
1952
1953void
1954bna_rx_sm_started_entry(struct bna_rx *rx)
1955{
1956 struct bna_rxp *rxp;
1957 struct list_head *qe_rxp;
1958
1959 /* Start IB */
1960 list_for_each(qe_rxp, &rx->rxp_q) {
1961 rxp = (struct bna_rxp *)qe_rxp;
1962 bna_ib_ack(&rxp->cq.ib->door_bell, 0);
1963 }
1964
1965 bna_llport_admin_up(&rx->bna->port.llport);
1966}
1967
1968void
1969bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1970{
1971 switch (event) {
1972 case RX_E_FAIL:
1973 bna_llport_admin_down(&rx->bna->port.llport);
1974 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1975 rx_ib_fail(rx);
1976 bna_rxf_fail(&rx->rxf);
1977 break;
1978 case RX_E_STOP:
1979 bna_llport_admin_down(&rx->bna->port.llport);
1980 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1981 break;
1982 default:
1983 bfa_sm_fault(rx->bna, event);
1984 break;
1985 }
1986}
1987
1988void
1989bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1990{
1991 bna_rxf_stop(&rx->rxf);
1992}
1993
1994void
1995bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1996{
1997 switch (event) {
1998 case RX_E_RXF_STOPPED:
1999 bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
2000 break;
2001 case RX_E_RXF_STARTED:
2002 /**
2003 * RxF was in the process of starting up when
2004 * RXF_E_STOP was issued. Ignore this event
2005 */
2006 break;
2007 case RX_E_FAIL:
2008 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2009 rx_ib_fail(rx);
2010 bna_rxf_fail(&rx->rxf);
2011 break;
2012 default:
2013 bfa_sm_fault(rx->bna, event);
2014 break;
2015 }
2016
2017}
2018
2019void
2020bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
2021{
2022 struct bna_rxp *rxp = NULL;
2023 struct bna_rxq *q0 = NULL;
2024 struct bna_rxq *q1 = NULL;
2025 struct list_head *qe;
2026 u32 rxq_mask[2] = {0, 0};
2027
2028 /* Only one call to multi-rxq-stop for all RXPs in this RX */
2029 bfa_wc_up(&rx->rxq_stop_wc);
2030 list_for_each(qe, &rx->rxp_q) {
2031 rxp = (struct bna_rxp *)qe;
2032 GET_RXQS(rxp, q0, q1);
2033 if (q0->rxq_id < 32)
2034 rxq_mask[0] |= ((u32)1 << q0->rxq_id);
2035 else
2036 rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
2037 if (q1) {
2038 if (q1->rxq_id < 32)
2039 rxq_mask[0] |= ((u32)1 << q1->rxq_id);
2040 else
2041 rxq_mask[1] |= ((u32)
2042 1 << (q1->rxq_id - 32));
2043 }
2044 }
2045
2046 __bna_multi_rxq_stop(rxp, rxq_mask);
2047}
2048
2049void
2050bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
2051{
2052 struct bna_rxp *rxp = NULL;
2053 struct list_head *qe;
2054
2055 switch (event) {
2056 case RX_E_RXQ_STOPPED:
2057 list_for_each(qe, &rx->rxp_q) {
2058 rxp = (struct bna_rxp *)qe;
2059 bna_ib_stop(rxp->cq.ib);
2060 }
2061 /* Fall through */
2062 case RX_E_FAIL:
2063 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2064 break;
2065 default:
2066 bfa_sm_fault(rx->bna, event);
2067 break;
2068 }
2069}
2070
2071void
2072__bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
2073{
2074 struct bfi_ll_q_stop_req ll_req;
2075
2076 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
2077 ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
2078 ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
2079 bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
2080 bna_rx_cb_multi_rxq_stopped, rxp);
2081 bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
2082}
2083
2084void
2085__bna_rxq_start(struct bna_rxq *rxq)
2086{
2087 struct bna_rxtx_q_mem *q_mem;
2088 struct bna_rxq_mem rxq_cfg, *rxq_mem;
2089 struct bna_dma_addr cur_q_addr;
2090 /* struct bna_doorbell_qset *qset; */
2091 struct bna_qpt *qpt;
2092 u32 pg_num;
2093 struct bna *bna = rxq->rx->bna;
2094 void __iomem *base_addr;
2095 unsigned long off;
2096
2097 qpt = &rxq->qpt;
2098 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2099
2100 rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2101 rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2102 rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2103 rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2104
2105 rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
2106 rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
2107 (qpt->page_size >> 2);
2108 rxq_cfg.sg_n_cq_n_cns_ptr =
2109 ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
2110 rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
2111 BNA_Q_IDLE_STATE;
2112 rxq_cfg.next_qid = 0x0 | (0x3 << 8);
2113
2114 /* Write the page number register */
2115 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2116 HQM_RXTX_Q_RAM_BASE_OFFSET);
2117 writel(pg_num, bna->regs.page_addr);
2118
2119 /* Write to h/w */
2120 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2121 HQM_RXTX_Q_RAM_BASE_OFFSET);
2122
2123 q_mem = (struct bna_rxtx_q_mem *)0;
2124 rxq_mem = &q_mem[rxq->rxq_id].rxq;
2125
2126 off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
2127 writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
2128
2129 off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
2130 writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
2131
2132 off = (unsigned long)&rxq_mem->cur_q_entry_lo;
2133 writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
2134
2135 off = (unsigned long)&rxq_mem->cur_q_entry_hi;
2136 writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
2137
2138 off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
2139 writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2140
2141 off = (unsigned long)&rxq_mem->entry_n_pg_size;
2142 writel(rxq_cfg.entry_n_pg_size, base_addr + off);
2143
2144 off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
2145 writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
2146
2147 off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
2148 writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
2149
2150 off = (unsigned long)&rxq_mem->next_qid;
2151 writel(rxq_cfg.next_qid, base_addr + off);
2152
2153 rxq->rcb->producer_index = 0;
2154 rxq->rcb->consumer_index = 0;
2155}
2156
2157void
2158__bna_cq_start(struct bna_cq *cq)
2159{
2160 struct bna_cq_mem cq_cfg, *cq_mem;
2161 const struct bna_qpt *qpt;
2162 struct bna_dma_addr cur_q_addr;
2163 u32 pg_num;
2164 struct bna *bna = cq->rx->bna;
2165 void __iomem *base_addr;
2166 unsigned long off;
2167
2168 qpt = &cq->qpt;
2169 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2170
2171 /*
2172 * Fill out structure, to be subsequently written
2173 * to hardware
2174 */
2175 cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2176 cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2177 cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2178 cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2179
2180 cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
2181 cq_cfg.entry_n_pg_size =
2182 ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
2183 cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
2184 ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0);
2185 cq_cfg.q_state = BNA_Q_IDLE_STATE;
2186
2187 /* Write the page number register */
2188 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2189 HQM_CQ_RAM_BASE_OFFSET);
2190
2191 writel(pg_num, bna->regs.page_addr);
2192
2193 /* H/W write */
2194 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2195 HQM_CQ_RAM_BASE_OFFSET);
2196
2197 cq_mem = (struct bna_cq_mem *)0;
2198
2199 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
2200 writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
2201
2202 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
2203 writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
2204
2205 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
2206 writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
2207
2208 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
2209 writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
2210
2211 off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
2212 writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2213
2214 off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
2215 writel(cq_cfg.entry_n_pg_size, base_addr + off);
2216
2217 off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
2218 writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
2219
2220 off = (unsigned long)&cq_mem[cq->cq_id].q_state;
2221 writel(cq_cfg.q_state, base_addr + off);
2222
2223 cq->ccb->producer_index = 0;
2224 *(cq->ccb->hw_producer_index) = 0;
2225}
2226
2227void
2228bna_rit_create(struct bna_rx *rx)
2229{
2230 struct list_head *qe_rxp;
2231 struct bna *bna;
2232 struct bna_rxp *rxp;
2233 struct bna_rxq *q0 = NULL;
2234 struct bna_rxq *q1 = NULL;
2235 int offset;
2236
2237 bna = rx->bna;
2238
2239 offset = 0;
2240 list_for_each(qe_rxp, &rx->rxp_q) {
2241 rxp = (struct bna_rxp *)qe_rxp;
2242 GET_RXQS(rxp, q0, q1);
2243 rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
2244 rx->rxf.rit_segment->rit[offset].small_rxq_id =
2245 (q1 ? q1->rxq_id : 0);
2246 offset++;
2247 }
2248}
2249
2250int
2251_rx_can_satisfy(struct bna_rx_mod *rx_mod,
2252 struct bna_rx_config *rx_cfg)
2253{
2254 if ((rx_mod->rx_free_count == 0) ||
2255 (rx_mod->rxp_free_count == 0) ||
2256 (rx_mod->rxq_free_count == 0))
2257 return 0;
2258
2259 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
2260 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2261 (rx_mod->rxq_free_count < rx_cfg->num_paths))
2262 return 0;
2263 } else {
2264 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2265 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
2266 return 0;
2267 }
2268
2269 if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
2270 return 0;
2271
2272 return 1;
2273}
2274
2275struct bna_rxq *
2276_get_free_rxq(struct bna_rx_mod *rx_mod)
2277{
2278 struct bna_rxq *rxq = NULL;
2279 struct list_head *qe = NULL;
2280
2281 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
2282 if (qe) {
2283 rx_mod->rxq_free_count--;
2284 rxq = (struct bna_rxq *)qe;
2285 }
2286 return rxq;
2287}
2288
2289void
2290_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
2291{
2292 bfa_q_qe_init(&rxq->qe);
2293 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
2294 rx_mod->rxq_free_count++;
2295}
2296
2297struct bna_rxp *
2298_get_free_rxp(struct bna_rx_mod *rx_mod)
2299{
2300 struct list_head *qe = NULL;
2301 struct bna_rxp *rxp = NULL;
2302
2303 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
2304 if (qe) {
2305 rx_mod->rxp_free_count--;
2306
2307 rxp = (struct bna_rxp *)qe;
2308 }
2309
2310 return rxp;
2311}
2312
2313void
2314_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
2315{
2316 bfa_q_qe_init(&rxp->qe);
2317 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
2318 rx_mod->rxp_free_count++;
2319}
2320
2321struct bna_rx *
2322_get_free_rx(struct bna_rx_mod *rx_mod)
2323{
2324 struct list_head *qe = NULL;
2325 struct bna_rx *rx = NULL;
2326
2327 bfa_q_deq(&rx_mod->rx_free_q, &qe);
2328 if (qe) {
2329 rx_mod->rx_free_count--;
2330
2331 rx = (struct bna_rx *)qe;
2332 bfa_q_qe_init(qe);
2333 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2334 }
2335
2336 return rx;
2337}
2338
2339void
2340_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2341{
2342 bfa_q_qe_init(&rx->qe);
2343 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2344 rx_mod->rx_free_count++;
2345}
2346
2347void
2348_rx_init(struct bna_rx *rx, struct bna *bna)
2349{
2350 rx->bna = bna;
2351 rx->rx_flags = 0;
2352
2353 INIT_LIST_HEAD(&rx->rxp_q);
2354
2355 rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
2356 rx->rxq_stop_wc.wc_cbarg = rx;
2357 rx->rxq_stop_wc.wc_count = 0;
2358
2359 rx->stop_cbfn = NULL;
2360 rx->stop_cbarg = NULL;
2361}
2362
2363void
2364_rxp_add_rxqs(struct bna_rxp *rxp,
2365 struct bna_rxq *q0,
2366 struct bna_rxq *q1)
2367{
2368 switch (rxp->type) {
2369 case BNA_RXP_SINGLE:
2370 rxp->rxq.single.only = q0;
2371 rxp->rxq.single.reserved = NULL;
2372 break;
2373 case BNA_RXP_SLR:
2374 rxp->rxq.slr.large = q0;
2375 rxp->rxq.slr.small = q1;
2376 break;
2377 case BNA_RXP_HDS:
2378 rxp->rxq.hds.data = q0;
2379 rxp->rxq.hds.hdr = q1;
2380 break;
2381 default:
2382 break;
2383 }
2384}
2385
2386void
2387_rxq_qpt_init(struct bna_rxq *rxq,
2388 struct bna_rxp *rxp,
2389 u32 page_count,
2390 u32 page_size,
2391 struct bna_mem_descr *qpt_mem,
2392 struct bna_mem_descr *swqpt_mem,
2393 struct bna_mem_descr *page_mem)
2394{
2395 int i;
2396
2397 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2398 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2399 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2400 rxq->qpt.page_count = page_count;
2401 rxq->qpt.page_size = page_size;
2402
2403 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2404
2405 for (i = 0; i < rxq->qpt.page_count; i++) {
2406 rxq->rcb->sw_qpt[i] = page_mem[i].kva;
2407 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2408 page_mem[i].dma.lsb;
2409 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2410 page_mem[i].dma.msb;
2411
2412 }
2413}
2414
2415void
2416_rxp_cqpt_setup(struct bna_rxp *rxp,
2417 u32 page_count,
2418 u32 page_size,
2419 struct bna_mem_descr *qpt_mem,
2420 struct bna_mem_descr *swqpt_mem,
2421 struct bna_mem_descr *page_mem)
2422{
2423 int i;
2424
2425 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2426 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2427 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2428 rxp->cq.qpt.page_count = page_count;
2429 rxp->cq.qpt.page_size = page_size;
2430
2431 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2432
2433 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2434 rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
2435
2436 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2437 page_mem[i].dma.lsb;
2438 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2439 page_mem[i].dma.msb;
2440
2441 }
2442}
2443
2444void
2445_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
2446{
2447 list_add_tail(&rxp->qe, &rx->rxp_q);
2448}
2449
2450void
2451_init_rxmod_queues(struct bna_rx_mod *rx_mod)
2452{
2453 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2454 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2455 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2456 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2457
2458 rx_mod->rx_free_count = 0;
2459 rx_mod->rxq_free_count = 0;
2460 rx_mod->rxp_free_count = 0;
2461}
2462
2463void
2464_rx_ctor(struct bna_rx *rx, int id)
2465{
2466 bfa_q_qe_init(&rx->qe);
2467 INIT_LIST_HEAD(&rx->rxp_q);
2468 rx->bna = NULL;
2469
2470 rx->rxf.rxf_id = id;
2471
2472 /* FIXME: mbox_qe ctor()?? */
2473 bfa_q_qe_init(&rx->mbox_qe.qe);
2474
2475 rx->stop_cbfn = NULL;
2476 rx->stop_cbarg = NULL;
2477}
2478
2479void
2480bna_rx_cb_multi_rxq_stopped(void *arg, int status)
2481{
2482 struct bna_rxp *rxp = (struct bna_rxp *)arg;
2483
2484 bfa_wc_down(&rxp->rx->rxq_stop_wc);
2485}
2486
2487void
2488bna_rx_cb_rxq_stopped_all(void *arg)
2489{
2490 struct bna_rx *rx = (struct bna_rx *)arg;
2491
2492 bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
2493}
2494
2495void
2496bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
2497 enum bna_cb_status status)
2498{
2499 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2500
2501 bfa_wc_down(&rx_mod->rx_stop_wc);
2502}
2503
2504void
2505bna_rx_mod_cb_rx_stopped_all(void *arg)
2506{
2507 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2508
2509 if (rx_mod->stop_cbfn)
2510 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2511 rx_mod->stop_cbfn = NULL;
2512}
2513
2514void
2515bna_rx_start(struct bna_rx *rx)
2516{
2517 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2518 if (rx->rx_flags & BNA_RX_F_ENABLE)
2519 bfa_fsm_send_event(rx, RX_E_START);
2520}
2521
2522void
2523bna_rx_stop(struct bna_rx *rx)
2524{
2525 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2526 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2527 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
2528 else {
2529 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2530 rx->stop_cbarg = &rx->bna->rx_mod;
2531 bfa_fsm_send_event(rx, RX_E_STOP);
2532 }
2533}
2534
2535void
2536bna_rx_fail(struct bna_rx *rx)
2537{
2538 /* Indicate port is not enabled, and failed */
2539 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2540 rx->rx_flags |= BNA_RX_F_PORT_FAILED;
2541 bfa_fsm_send_event(rx, RX_E_FAIL);
2542}
2543
2544void
2545bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
2546{
2547 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
2548 if (rx->rxf.rxf_id < 32)
2549 rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
2550 else
2551 rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
2552 1 << (rx->rxf.rxf_id - 32));
2553}
2554
2555void
2556bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
2557{
2558 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
2559 if (rx->rxf.rxf_id < 32)
2560 rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
2561 else
2562 rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
2563 1 << (rx->rxf.rxf_id - 32);
2564}
2565
2566void
2567bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2568{
2569 struct bna_rx *rx;
2570 struct list_head *qe;
2571
2572 rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
2573 if (type == BNA_RX_T_LOOPBACK)
2574 rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
2575
2576 list_for_each(qe, &rx_mod->rx_active_q) {
2577 rx = (struct bna_rx *)qe;
2578 if (rx->type == type)
2579 bna_rx_start(rx);
2580 }
2581}
2582
2583void
2584bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2585{
2586 struct bna_rx *rx;
2587 struct list_head *qe;
2588
2589 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2590 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2591
2592 rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
2593
2594 /**
2595 * Before calling bna_rx_stop(), increment rx_stop_wc as many times
2596 * as we are going to call bna_rx_stop
2597 */
2598 list_for_each(qe, &rx_mod->rx_active_q) {
2599 rx = (struct bna_rx *)qe;
2600 if (rx->type == type)
2601 bfa_wc_up(&rx_mod->rx_stop_wc);
2602 }
2603
2604 if (rx_mod->rx_stop_wc.wc_count == 0) {
2605 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2606 rx_mod->stop_cbfn = NULL;
2607 return;
2608 }
2609
2610 list_for_each(qe, &rx_mod->rx_active_q) {
2611 rx = (struct bna_rx *)qe;
2612 if (rx->type == type)
2613 bna_rx_stop(rx);
2614 }
2615}
2616
2617void
2618bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2619{
2620 struct bna_rx *rx;
2621 struct list_head *qe;
2622
2623 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2624 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2625
2626 list_for_each(qe, &rx_mod->rx_active_q) {
2627 rx = (struct bna_rx *)qe;
2628 bna_rx_fail(rx);
2629 }
2630}
2631
2632void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2633 struct bna_res_info *res_info)
2634{
2635 int index;
2636 struct bna_rx *rx_ptr;
2637 struct bna_rxp *rxp_ptr;
2638 struct bna_rxq *rxq_ptr;
2639
2640 rx_mod->bna = bna;
2641 rx_mod->flags = 0;
2642
2643 rx_mod->rx = (struct bna_rx *)
2644 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2645 rx_mod->rxp = (struct bna_rxp *)
2646 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2647 rx_mod->rxq = (struct bna_rxq *)
2648 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2649
2650 /* Initialize the queues */
2651 _init_rxmod_queues(rx_mod);
2652
2653 /* Build RX queues */
2654 for (index = 0; index < BFI_MAX_RXQ; index++) {
2655 rx_ptr = &rx_mod->rx[index];
2656 _rx_ctor(rx_ptr, index);
2657 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2658 rx_mod->rx_free_count++;
2659 }
2660
2661 /* build RX-path queue */
2662 for (index = 0; index < BFI_MAX_RXQ; index++) {
2663 rxp_ptr = &rx_mod->rxp[index];
2664 rxp_ptr->cq.cq_id = index;
2665 bfa_q_qe_init(&rxp_ptr->qe);
2666 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2667 rx_mod->rxp_free_count++;
2668 }
2669
2670 /* build RXQ queue */
2671 for (index = 0; index < BFI_MAX_RXQ; index++) {
2672 rxq_ptr = &rx_mod->rxq[index];
2673 rxq_ptr->rxq_id = index;
2674
2675 bfa_q_qe_init(&rxq_ptr->qe);
2676 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2677 rx_mod->rxq_free_count++;
2678 }
2679
2680 rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
2681 rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
2682 rx_mod->rx_stop_wc.wc_count = 0;
2683}
2684
2685void
2686bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2687{
2688 struct list_head *qe;
2689 int i;
2690
2691 i = 0;
2692 list_for_each(qe, &rx_mod->rx_free_q)
2693 i++;
2694
2695 i = 0;
2696 list_for_each(qe, &rx_mod->rxp_free_q)
2697 i++;
2698
2699 i = 0;
2700 list_for_each(qe, &rx_mod->rxq_free_q)
2701 i++;
2702
2703 rx_mod->bna = NULL;
2704}
2705
2706int
2707bna_rx_state_get(struct bna_rx *rx)
2708{
2709 return bfa_sm_to_state(rx_sm_table, rx->fsm);
2710}
2711
2712void
2713bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2714{
2715 u32 cq_size, hq_size, dq_size;
2716 u32 cpage_count, hpage_count, dpage_count;
2717 struct bna_mem_info *mem_info;
2718 u32 cq_depth;
2719 u32 hq_depth;
2720 u32 dq_depth;
2721
2722 dq_depth = q_cfg->q_depth;
2723 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2724 cq_depth = dq_depth + hq_depth;
2725
2726 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2727 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2728 cq_size = ALIGN(cq_size, PAGE_SIZE);
2729 cpage_count = SIZE_TO_PAGES(cq_size);
2730
2731 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2732 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2733 dq_size = ALIGN(dq_size, PAGE_SIZE);
2734 dpage_count = SIZE_TO_PAGES(dq_size);
2735
2736 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2737 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2738 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2739 hq_size = ALIGN(hq_size, PAGE_SIZE);
2740 hpage_count = SIZE_TO_PAGES(hq_size);
2741 } else {
2742 hpage_count = 0;
2743 }
2744
2745 /* CCB structures */
2746 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2747 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2748 mem_info->mem_type = BNA_MEM_T_KVA;
2749 mem_info->len = sizeof(struct bna_ccb);
2750 mem_info->num = q_cfg->num_paths;
2751
2752 /* RCB structures */
2753 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2754 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2755 mem_info->mem_type = BNA_MEM_T_KVA;
2756 mem_info->len = sizeof(struct bna_rcb);
2757 mem_info->num = BNA_GET_RXQS(q_cfg);
2758
2759 /* Completion QPT */
2760 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2761 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2762 mem_info->mem_type = BNA_MEM_T_DMA;
2763 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2764 mem_info->num = q_cfg->num_paths;
2765
2766 /* Completion s/w QPT */
2767 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2768 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2769 mem_info->mem_type = BNA_MEM_T_KVA;
2770 mem_info->len = cpage_count * sizeof(void *);
2771 mem_info->num = q_cfg->num_paths;
2772
2773 /* Completion QPT pages */
2774 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2775 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2776 mem_info->mem_type = BNA_MEM_T_DMA;
2777 mem_info->len = PAGE_SIZE;
2778 mem_info->num = cpage_count * q_cfg->num_paths;
2779
2780 /* Data QPTs */
2781 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2782 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2783 mem_info->mem_type = BNA_MEM_T_DMA;
2784 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2785 mem_info->num = q_cfg->num_paths;
2786
2787 /* Data s/w QPTs */
2788 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2789 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2790 mem_info->mem_type = BNA_MEM_T_KVA;
2791 mem_info->len = dpage_count * sizeof(void *);
2792 mem_info->num = q_cfg->num_paths;
2793
2794 /* Data QPT pages */
2795 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2796 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2797 mem_info->mem_type = BNA_MEM_T_DMA;
2798 mem_info->len = PAGE_SIZE;
2799 mem_info->num = dpage_count * q_cfg->num_paths;
2800
2801 /* Hdr QPTs */
2802 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2803 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2804 mem_info->mem_type = BNA_MEM_T_DMA;
2805 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2806 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2807
2808 /* Hdr s/w QPTs */
2809 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2810 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2811 mem_info->mem_type = BNA_MEM_T_KVA;
2812 mem_info->len = hpage_count * sizeof(void *);
2813 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2814
2815 /* Hdr QPT pages */
2816 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2817 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2818 mem_info->mem_type = BNA_MEM_T_DMA;
2819 mem_info->len = (hpage_count ? PAGE_SIZE : 0);
2820 mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
2821
2822 /* RX Interrupts */
2823 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2824 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2825 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2826}
2827
2828struct bna_rx *
2829bna_rx_create(struct bna *bna, struct bnad *bnad,
2830 struct bna_rx_config *rx_cfg,
2831 struct bna_rx_event_cbfn *rx_cbfn,
2832 struct bna_res_info *res_info,
2833 void *priv)
2834{
2835 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2836 struct bna_rx *rx;
2837 struct bna_rxp *rxp;
2838 struct bna_rxq *q0;
2839 struct bna_rxq *q1;
2840 struct bna_intr_info *intr_info;
2841 u32 page_count;
2842 struct bna_mem_descr *ccb_mem;
2843 struct bna_mem_descr *rcb_mem;
2844 struct bna_mem_descr *unmapq_mem;
2845 struct bna_mem_descr *cqpt_mem;
2846 struct bna_mem_descr *cswqpt_mem;
2847 struct bna_mem_descr *cpage_mem;
2848 struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */
2849 struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */
2850 struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */
2851 struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
2852 struct bna_mem_descr *hpage_mem; /* hdr page mem */
2853 struct bna_mem_descr *dpage_mem; /* data page mem */
2854 int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0, ret;
2855 int dpage_count, hpage_count, rcb_idx;
2856 struct bna_ib_config ibcfg;
2857 /* Fail if we don't have enough RXPs, RXQs */
2858 if (!_rx_can_satisfy(rx_mod, rx_cfg))
2859 return NULL;
2860
2861 /* Initialize resource pointers */
2862 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2863 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2864 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2865 unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2866 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2867 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2868 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2869 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2870 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2871 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2872 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2873 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2874 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2875
2876 /* Compute q depth & page count */
2877 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
2878 rx_cfg->num_paths;
2879
2880 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
2881 rx_cfg->num_paths;
2882
2883 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
2884 rx_cfg->num_paths;
2885 /* Get RX pointer */
2886 rx = _get_free_rx(rx_mod);
2887 _rx_init(rx, bna);
2888 rx->priv = priv;
2889 rx->type = rx_cfg->rx_type;
2890
2891 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2892 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2893 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2894 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2895 /* Following callbacks are mandatory */
2896 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2897 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2898
2899 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
2900 switch (rx->type) {
2901 case BNA_RX_T_REGULAR:
2902 if (!(rx->bna->rx_mod.flags &
2903 BNA_RX_MOD_F_PORT_LOOPBACK))
2904 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2905 break;
2906 case BNA_RX_T_LOOPBACK:
2907 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
2908 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2909 break;
2910 }
2911 }
2912
2913 for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
2914 rxp = _get_free_rxp(rx_mod);
2915 rxp->type = rx_cfg->rxp_type;
2916 rxp->rx = rx;
2917 rxp->cq.rx = rx;
2918
2919 /* Get required RXQs, and queue them to rx-path */
2920 q0 = _get_free_rxq(rx_mod);
2921 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2922 q1 = NULL;
2923 else
2924 q1 = _get_free_rxq(rx_mod);
2925
2926 /* Initialize IB */
2927 if (1 == intr_info->num) {
2928 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2929 intr_info->intr_type,
2930 intr_info->idl[0].vector);
2931 rxp->vector = intr_info->idl[0].vector;
2932 } else {
2933 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2934 intr_info->intr_type,
2935 intr_info->idl[i].vector);
2936
2937 /* Map the MSI-x vector used for this RXP */
2938 rxp->vector = intr_info->idl[i].vector;
2939 }
2940
2941 rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
2942
2943 ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2944 ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
2945 ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2946 ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
2947
2948 ret = bna_ib_config(rxp->cq.ib, &ibcfg);
2949
2950 /* Link rxqs to rxp */
2951 _rxp_add_rxqs(rxp, q0, q1);
2952
2953 /* Link rxp to rx */
2954 _rx_add_rxp(rx, rxp);
2955
2956 q0->rx = rx;
2957 q0->rxp = rxp;
2958
2959 /* Initialize RCB for the large / data q */
2960 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2961 RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
2962 (void *)unmapq_mem[rcb_idx].kva);
2963 rcb_idx++;
2964 (q0)->rx_packets = (q0)->rx_bytes = 0;
2965 (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
2966
2967 /* Initialize RXQs */
2968 _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
2969 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
2970 q0->rcb->page_idx = dpage_idx;
2971 q0->rcb->page_count = dpage_count;
2972 dpage_idx += dpage_count;
2973
2974 /* Call bnad to complete rcb setup */
2975 if (rx->rcb_setup_cbfn)
2976 rx->rcb_setup_cbfn(bnad, q0->rcb);
2977
2978 if (q1) {
2979 q1->rx = rx;
2980 q1->rxp = rxp;
2981
2982 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2983 RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
2984 (void *)unmapq_mem[rcb_idx].kva);
2985 rcb_idx++;
2986 (q1)->buffer_size = (rx_cfg)->small_buff_size;
2987 (q1)->rx_packets = (q1)->rx_bytes = 0;
2988 (q1)->rx_packets_with_error =
2989 (q1)->rxbuf_alloc_failed = 0;
2990
2991 _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
2992 &hqpt_mem[i], &hsqpt_mem[i],
2993 &hpage_mem[hpage_idx]);
2994 q1->rcb->page_idx = hpage_idx;
2995 q1->rcb->page_count = hpage_count;
2996 hpage_idx += hpage_count;
2997
2998 /* Call bnad to complete rcb setup */
2999 if (rx->rcb_setup_cbfn)
3000 rx->rcb_setup_cbfn(bnad, q1->rcb);
3001 }
3002 /* Setup RXP::CQ */
3003 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
3004 _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
3005 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
3006 rxp->cq.ccb->page_idx = cpage_idx;
3007 rxp->cq.ccb->page_count = page_count;
3008 cpage_idx += page_count;
3009
3010 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
3011 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
3012
3013 rxp->cq.ccb->producer_index = 0;
3014 rxp->cq.ccb->q_depth = rx_cfg->q_depth +
3015 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
3016 0 : rx_cfg->q_depth);
3017 rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
3018 rxp->cq.ccb->rcb[0] = q0->rcb;
3019 if (q1)
3020 rxp->cq.ccb->rcb[1] = q1->rcb;
3021 rxp->cq.ccb->cq = &rxp->cq;
3022 rxp->cq.ccb->bnad = bna->bnad;
3023 rxp->cq.ccb->hw_producer_index =
3024 ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
3025 (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
3026 *(rxp->cq.ccb->hw_producer_index) = 0;
3027 rxp->cq.ccb->intr_type = intr_info->intr_type;
3028 rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
3029 intr_info->idl[0].vector :
3030 intr_info->idl[i].vector;
3031 rxp->cq.ccb->rx_coalescing_timeo =
3032 rxp->cq.ib->ib_config.coalescing_timeo;
3033 rxp->cq.ccb->id = i;
3034
3035 /* Call bnad to complete CCB setup */
3036 if (rx->ccb_setup_cbfn)
3037 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
3038
3039 } /* for each rx-path */
3040
3041 bna_rxf_init(&rx->rxf, rx, rx_cfg);
3042
3043 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
3044
3045 return rx;
3046}
3047
3048void
3049bna_rx_destroy(struct bna_rx *rx)
3050{
3051 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
3052 struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
3053 struct bna_rxq *q0 = NULL;
3054 struct bna_rxq *q1 = NULL;
3055 struct bna_rxp *rxp;
3056 struct list_head *qe;
3057
3058 bna_rxf_uninit(&rx->rxf);
3059
3060 while (!list_empty(&rx->rxp_q)) {
3061 bfa_q_deq(&rx->rxp_q, &rxp);
3062 GET_RXQS(rxp, q0, q1);
3063 /* Callback to bnad for destroying RCB */
3064 if (rx->rcb_destroy_cbfn)
3065 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
3066 q0->rcb = NULL;
3067 q0->rxp = NULL;
3068 q0->rx = NULL;
3069 _put_free_rxq(rx_mod, q0);
3070 if (q1) {
3071 /* Callback to bnad for destroying RCB */
3072 if (rx->rcb_destroy_cbfn)
3073 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
3074 q1->rcb = NULL;
3075 q1->rxp = NULL;
3076 q1->rx = NULL;
3077 _put_free_rxq(rx_mod, q1);
3078 }
3079 rxp->rxq.slr.large = NULL;
3080 rxp->rxq.slr.small = NULL;
3081 if (rxp->cq.ib) {
3082 if (rxp->cq.ib_seg_offset != 0xff)
3083 bna_ib_release_idx(rxp->cq.ib,
3084 rxp->cq.ib_seg_offset);
3085 bna_ib_put(ib_mod, rxp->cq.ib);
3086 rxp->cq.ib = NULL;
3087 }
3088 /* Callback to bnad for destroying CCB */
3089 if (rx->ccb_destroy_cbfn)
3090 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
3091 rxp->cq.ccb = NULL;
3092 rxp->rx = NULL;
3093 _put_free_rxp(rx_mod, rxp);
3094 }
3095
3096 list_for_each(qe, &rx_mod->rx_active_q) {
3097 if (qe == &rx->qe) {
3098 list_del(&rx->qe);
3099 bfa_q_qe_init(&rx->qe);
3100 break;
3101 }
3102 }
3103
3104 rx->bna = NULL;
3105 rx->priv = NULL;
3106 _put_free_rx(rx_mod, rx);
3107}
3108
3109void
3110bna_rx_enable(struct bna_rx *rx)
3111{
3112 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
3113 return;
3114
3115 rx->rx_flags |= BNA_RX_F_ENABLE;
3116 if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
3117 bfa_fsm_send_event(rx, RX_E_START);
3118}
3119
3120void
3121bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
3122 void (*cbfn)(void *, struct bna_rx *,
3123 enum bna_cb_status))
3124{
3125 if (type == BNA_SOFT_CLEANUP) {
3126 /* h/w should not be accessed. Treat we're stopped */
3127 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
3128 } else {
3129 rx->stop_cbfn = cbfn;
3130 rx->stop_cbarg = rx->bna->bnad;
3131
3132 rx->rx_flags &= ~BNA_RX_F_ENABLE;
3133
3134 bfa_fsm_send_event(rx, RX_E_STOP);
3135 }
3136}
3137
3138/**
3139 * TX
3140 */
3141#define call_tx_stop_cbfn(tx, status)\
3142do {\
3143 if ((tx)->stop_cbfn)\
3144 (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
3145 (tx)->stop_cbfn = NULL;\
3146 (tx)->stop_cbarg = NULL;\
3147} while (0)
3148
3149#define call_tx_prio_change_cbfn(tx, status)\
3150do {\
3151 if ((tx)->prio_change_cbfn)\
3152 (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
3153 (tx)->prio_change_cbfn = NULL;\
3154} while (0)
3155
3156static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
3157 enum bna_cb_status status);
3158static void bna_tx_cb_txq_stopped(void *arg, int status);
3159static void bna_tx_cb_stats_cleared(void *arg, int status);
3160static void __bna_tx_stop(struct bna_tx *tx);
3161static void __bna_tx_start(struct bna_tx *tx);
3162static void __bna_txf_stat_clr(struct bna_tx *tx);
3163
3164enum bna_tx_event {
3165 TX_E_START = 1,
3166 TX_E_STOP = 2,
3167 TX_E_FAIL = 3,
3168 TX_E_TXQ_STOPPED = 4,
3169 TX_E_PRIO_CHANGE = 5,
3170 TX_E_STAT_CLEARED = 6,
3171};
3172
3173enum bna_tx_state {
3174 BNA_TX_STOPPED = 1,
3175 BNA_TX_STARTED = 2,
3176 BNA_TX_TXQ_STOP_WAIT = 3,
3177 BNA_TX_PRIO_STOP_WAIT = 4,
3178 BNA_TX_STAT_CLR_WAIT = 5,
3179};
3180
3181bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
3182 enum bna_tx_event);
3183bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
3184 enum bna_tx_event);
3185bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
3186 enum bna_tx_event);
3187bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3188 enum bna_tx_event);
3189bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
3190 enum bna_tx_event);
3191
3192static struct bfa_sm_table tx_sm_table[] = {
3193 {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
3194 {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
3195 {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
3196 {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
3197 {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
3198};
3199
3200static void
3201bna_tx_sm_stopped_entry(struct bna_tx *tx)
3202{
3203 struct bna_txq *txq;
3204 struct list_head *qe;
3205
3206 list_for_each(qe, &tx->txq_q) {
3207 txq = (struct bna_txq *)qe;
3208 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3209 }
3210
3211 call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
3212}
3213
3214static void
3215bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3216{
3217 switch (event) {
3218 case TX_E_START:
3219 bfa_fsm_set_state(tx, bna_tx_sm_started);
3220 break;
3221
3222 case TX_E_STOP:
3223 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3224 break;
3225
3226 case TX_E_FAIL:
3227 /* No-op */
3228 break;
3229
3230 case TX_E_PRIO_CHANGE:
3231 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3232 break;
3233
3234 case TX_E_TXQ_STOPPED:
3235 /**
3236 * This event is received due to flushing of mbox when
3237 * device fails
3238 */
3239 /* No-op */
3240 break;
3241
3242 default:
3243 bfa_sm_fault(tx->bna, event);
3244 }
3245}
3246
3247static void
3248bna_tx_sm_started_entry(struct bna_tx *tx)
3249{
3250 struct bna_txq *txq;
3251 struct list_head *qe;
3252
3253 __bna_tx_start(tx);
3254
3255 /* Start IB */
3256 list_for_each(qe, &tx->txq_q) {
3257 txq = (struct bna_txq *)qe;
3258 bna_ib_ack(&txq->ib->door_bell, 0);
3259 }
3260}
3261
3262static void
3263bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3264{
3265 struct bna_txq *txq;
3266 struct list_head *qe;
3267
3268 switch (event) {
3269 case TX_E_STOP:
3270 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3271 __bna_tx_stop(tx);
3272 break;
3273
3274 case TX_E_FAIL:
3275 list_for_each(qe, &tx->txq_q) {
3276 txq = (struct bna_txq *)qe;
3277 bna_ib_fail(txq->ib);
3278 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3279 }
3280 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3281 break;
3282
3283 case TX_E_PRIO_CHANGE:
3284 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3285 break;
3286
3287 default:
3288 bfa_sm_fault(tx->bna, event);
3289 }
3290}
3291
3292static void
3293bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
3294{
3295}
3296
3297static void
3298bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3299{
3300 struct bna_txq *txq;
3301 struct list_head *qe;
3302
3303 switch (event) {
3304 case TX_E_FAIL:
3305 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3306 break;
3307
3308 case TX_E_TXQ_STOPPED:
3309 list_for_each(qe, &tx->txq_q) {
3310 txq = (struct bna_txq *)qe;
3311 bna_ib_stop(txq->ib);
3312 }
3313 bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
3314 break;
3315
3316 case TX_E_PRIO_CHANGE:
3317 /* No-op */
3318 break;
3319
3320 default:
3321 bfa_sm_fault(tx->bna, event);
3322 }
3323}
3324
3325static void
3326bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3327{
3328 __bna_tx_stop(tx);
3329}
3330
3331static void
3332bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3333{
3334 struct bna_txq *txq;
3335 struct list_head *qe;
3336
3337 switch (event) {
3338 case TX_E_STOP:
3339 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3340 break;
3341
3342 case TX_E_FAIL:
3343 call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
3344 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3345 break;
3346
3347 case TX_E_TXQ_STOPPED:
3348 list_for_each(qe, &tx->txq_q) {
3349 txq = (struct bna_txq *)qe;
3350 bna_ib_stop(txq->ib);
3351 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3352 }
3353 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3354 bfa_fsm_set_state(tx, bna_tx_sm_started);
3355 break;
3356
3357 case TX_E_PRIO_CHANGE:
3358 /* No-op */
3359 break;
3360
3361 default:
3362 bfa_sm_fault(tx->bna, event);
3363 }
3364}
3365
3366static void
3367bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
3368{
3369 __bna_txf_stat_clr(tx);
3370}
3371
3372static void
3373bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
3374{
3375 switch (event) {
3376 case TX_E_FAIL:
3377 case TX_E_STAT_CLEARED:
3378 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3379 break;
3380
3381 default:
3382 bfa_sm_fault(tx->bna, event);
3383 }
3384}
3385
3386static void
3387__bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
3388{
3389 struct bna_rxtx_q_mem *q_mem;
3390 struct bna_txq_mem txq_cfg;
3391 struct bna_txq_mem *txq_mem;
3392 struct bna_dma_addr cur_q_addr;
3393 u32 pg_num;
3394 void __iomem *base_addr;
3395 unsigned long off;
3396
3397 /* Fill out structure, to be subsequently written to hardware */
3398 txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
3399 txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
3400 cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
3401 txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
3402 txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
3403
3404 txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
3405
3406 txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
3407 (txq->qpt.page_size >> 2);
3408 txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
3409 ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
3410
3411 txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
3412 txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
3413 (txq->priority & 0x3));
3414 txq_cfg.wvc_n_cquota_n_rquota =
3415 ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
3416 (BFI_TX_MAX_WRR_QUOTA & 0xfff));
3417
3418 /* Setup the page and write to H/W */
3419
3420 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
3421 HQM_RXTX_Q_RAM_BASE_OFFSET);
3422 writel(pg_num, tx->bna->regs.page_addr);
3423
3424 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3425 HQM_RXTX_Q_RAM_BASE_OFFSET);
3426 q_mem = (struct bna_rxtx_q_mem *)0;
3427 txq_mem = &q_mem[txq->txq_id].txq;
3428
3429 /*
3430 * The following 4 lines, is a hack b'cos the H/W needs to read
3431 * these DMA addresses as little endian
3432 */
3433
3434 off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
3435 writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
3436
3437 off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
3438 writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
3439
3440 off = (unsigned long)&txq_mem->cur_q_entry_lo;
3441 writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
3442
3443 off = (unsigned long)&txq_mem->cur_q_entry_hi;
3444 writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
3445
3446 off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
3447 writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
3448
3449 off = (unsigned long)&txq_mem->entry_n_pg_size;
3450 writel(txq_cfg.entry_n_pg_size, base_addr + off);
3451
3452 off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
3453 writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
3454
3455 off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
3456 writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
3457
3458 off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
3459 writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
3460
3461 off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
3462 writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
3463
3464 txq->tcb->producer_index = 0;
3465 txq->tcb->consumer_index = 0;
3466 *(txq->tcb->hw_consumer_index) = 0;
3467
3468}
3469
3470static void
3471__bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
3472{
3473 struct bfi_ll_q_stop_req ll_req;
3474 u32 bit_mask[2] = {0, 0};
3475 if (txq->txq_id < 32)
3476 bit_mask[0] = (u32)1 << txq->txq_id;
3477 else
3478 bit_mask[1] = (u32)1 << (txq->txq_id - 32);
3479
3480 memset(&ll_req, 0, sizeof(ll_req));
3481 ll_req.mh.msg_class = BFI_MC_LL;
3482 ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
3483 ll_req.mh.mtag.h2i.lpu_id = 0;
3484 ll_req.q_id_mask[0] = htonl(bit_mask[0]);
3485 ll_req.q_id_mask[1] = htonl(bit_mask[1]);
3486
3487 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3488 bna_tx_cb_txq_stopped, tx);
3489
3490 bna_mbox_send(tx->bna, &tx->mbox_qe);
3491}
3492
3493static void
3494__bna_txf_start(struct bna_tx *tx)
3495{
3496 struct bna_tx_fndb_ram *tx_fndb;
3497 struct bna_txf *txf = &tx->txf;
3498 void __iomem *base_addr;
3499 unsigned long off;
3500
3501 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3502 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
3503 tx->bna->regs.page_addr);
3504
3505 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3506 TX_FNDB_RAM_BASE_OFFSET);
3507
3508 tx_fndb = (struct bna_tx_fndb_ram *)0;
3509 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3510
3511 writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
3512 base_addr + off);
3513
3514 if (tx->txf.txf_id < 32)
3515 tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
3516 else
3517 tx->bna->tx_mod.txf_bmap[1] |= ((u32)
3518 1 << (tx->txf.txf_id - 32));
3519}
3520
3521static void
3522__bna_txf_stop(struct bna_tx *tx)
3523{
3524 struct bna_tx_fndb_ram *tx_fndb;
3525 u32 page_num;
3526 u32 ctl_flags;
3527 struct bna_txf *txf = &tx->txf;
3528 void __iomem *base_addr;
3529 unsigned long off;
3530
3531 /* retrieve the running txf_flags & turn off enable bit */
3532 page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3533 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
3534 writel(page_num, tx->bna->regs.page_addr);
3535
3536 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3537 TX_FNDB_RAM_BASE_OFFSET);
3538 tx_fndb = (struct bna_tx_fndb_ram *)0;
3539 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3540
3541 ctl_flags = readl(base_addr + off);
3542 ctl_flags &= ~BFI_TXF_CF_ENABLE;
3543
3544 writel(ctl_flags, base_addr + off);
3545
3546 if (tx->txf.txf_id < 32)
3547 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
3548 else
3549 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
3550 1 << (tx->txf.txf_id - 32));
3551}
3552
3553static void
3554__bna_txf_stat_clr(struct bna_tx *tx)
3555{
3556 struct bfi_ll_stats_req ll_req;
3557 u32 txf_bmap[2] = {0, 0};
3558 if (tx->txf.txf_id < 32)
3559 txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
3560 else
3561 txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
3562 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
3563 ll_req.stats_mask = 0;
3564 ll_req.rxf_id_mask[0] = 0;
3565 ll_req.rxf_id_mask[1] = 0;
3566 ll_req.txf_id_mask[0] = htonl(txf_bmap[0]);
3567 ll_req.txf_id_mask[1] = htonl(txf_bmap[1]);
3568
3569 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3570 bna_tx_cb_stats_cleared, tx);
3571 bna_mbox_send(tx->bna, &tx->mbox_qe);
3572}
3573
3574static void
3575__bna_tx_start(struct bna_tx *tx)
3576{
3577 struct bna_txq *txq;
3578 struct list_head *qe;
3579
3580 list_for_each(qe, &tx->txq_q) {
3581 txq = (struct bna_txq *)qe;
3582 bna_ib_start(txq->ib);
3583 __bna_txq_start(tx, txq);
3584 }
3585
3586 __bna_txf_start(tx);
3587
3588 list_for_each(qe, &tx->txq_q) {
3589 txq = (struct bna_txq *)qe;
3590 txq->tcb->priority = txq->priority;
3591 (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
3592 }
3593}
3594
3595static void
3596__bna_tx_stop(struct bna_tx *tx)
3597{
3598 struct bna_txq *txq;
3599 struct list_head *qe;
3600
3601 list_for_each(qe, &tx->txq_q) {
3602 txq = (struct bna_txq *)qe;
3603 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3604 }
3605
3606 __bna_txf_stop(tx);
3607
3608 list_for_each(qe, &tx->txq_q) {
3609 txq = (struct bna_txq *)qe;
3610 bfa_wc_up(&tx->txq_stop_wc);
3611 }
3612
3613 list_for_each(qe, &tx->txq_q) {
3614 txq = (struct bna_txq *)qe;
3615 __bna_txq_stop(tx, txq);
3616 }
3617}
3618
3619static void
3620bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3621 struct bna_mem_descr *qpt_mem,
3622 struct bna_mem_descr *swqpt_mem,
3623 struct bna_mem_descr *page_mem)
3624{
3625 int i;
3626
3627 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3628 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3629 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3630 txq->qpt.page_count = page_count;
3631 txq->qpt.page_size = page_size;
3632
3633 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3634
3635 for (i = 0; i < page_count; i++) {
3636 txq->tcb->sw_qpt[i] = page_mem[i].kva;
3637
3638 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3639 page_mem[i].dma.lsb;
3640 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3641 page_mem[i].dma.msb;
3642
3643 }
3644}
3645
3646static void
3647bna_tx_free(struct bna_tx *tx)
3648{
3649 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3650 struct bna_txq *txq;
3651 struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
3652 struct list_head *qe;
3653
3654 while (!list_empty(&tx->txq_q)) {
3655 bfa_q_deq(&tx->txq_q, &txq);
3656 bfa_q_qe_init(&txq->qe);
3657 if (txq->ib) {
3658 if (txq->ib_seg_offset != -1)
3659 bna_ib_release_idx(txq->ib,
3660 txq->ib_seg_offset);
3661 bna_ib_put(ib_mod, txq->ib);
3662 txq->ib = NULL;
3663 }
3664 txq->tcb = NULL;
3665 txq->tx = NULL;
3666 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3667 }
3668
3669 list_for_each(qe, &tx_mod->tx_active_q) {
3670 if (qe == &tx->qe) {
3671 list_del(&tx->qe);
3672 bfa_q_qe_init(&tx->qe);
3673 break;
3674 }
3675 }
3676
3677 tx->bna = NULL;
3678 tx->priv = NULL;
3679 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3680}
3681
3682static void
3683bna_tx_cb_txq_stopped(void *arg, int status)
3684{
3685 struct bna_tx *tx = (struct bna_tx *)arg;
3686
3687 bfa_q_qe_init(&tx->mbox_qe.qe);
3688 bfa_wc_down(&tx->txq_stop_wc);
3689}
3690
3691static void
3692bna_tx_cb_txq_stopped_all(void *arg)
3693{
3694 struct bna_tx *tx = (struct bna_tx *)arg;
3695
3696 bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
3697}
3698
3699static void
3700bna_tx_cb_stats_cleared(void *arg, int status)
3701{
3702 struct bna_tx *tx = (struct bna_tx *)arg;
3703
3704 bfa_q_qe_init(&tx->mbox_qe.qe);
3705
3706 bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
3707}
3708
3709static void
3710bna_tx_start(struct bna_tx *tx)
3711{
3712 tx->flags |= BNA_TX_F_PORT_STARTED;
3713 if (tx->flags & BNA_TX_F_ENABLED)
3714 bfa_fsm_send_event(tx, TX_E_START);
3715}
3716
3717static void
3718bna_tx_stop(struct bna_tx *tx)
3719{
3720 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3721 tx->stop_cbarg = &tx->bna->tx_mod;
3722
3723 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3724 bfa_fsm_send_event(tx, TX_E_STOP);
3725}
3726
3727static void
3728bna_tx_fail(struct bna_tx *tx)
3729{
3730 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3731 bfa_fsm_send_event(tx, TX_E_FAIL);
3732}
3733
3734void
3735bna_tx_prio_changed(struct bna_tx *tx, int prio)
3736{
3737 struct bna_txq *txq;
3738 struct list_head *qe;
3739
3740 list_for_each(qe, &tx->txq_q) {
3741 txq = (struct bna_txq *)qe;
3742 txq->priority = prio;
3743 }
3744
3745 bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
3746}
3747
3748static void
3749bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
3750{
3751 if (cee_link)
3752 tx->flags |= BNA_TX_F_PRIO_LOCK;
3753 else
3754 tx->flags &= ~BNA_TX_F_PRIO_LOCK;
3755}
3756
3757static void
3758bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
3759 enum bna_cb_status status)
3760{
3761 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3762
3763 bfa_wc_down(&tx_mod->tx_stop_wc);
3764}
3765
3766static void
3767bna_tx_mod_cb_tx_stopped_all(void *arg)
3768{
3769 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3770
3771 if (tx_mod->stop_cbfn)
3772 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
3773 tx_mod->stop_cbfn = NULL;
3774}
3775
3776void
3777bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3778{
3779 u32 q_size;
3780 u32 page_count;
3781 struct bna_mem_info *mem_info;
3782
3783 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3784 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3785 mem_info->mem_type = BNA_MEM_T_KVA;
3786 mem_info->len = sizeof(struct bna_tcb);
3787 mem_info->num = num_txq;
3788
3789 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3790 q_size = ALIGN(q_size, PAGE_SIZE);
3791 page_count = q_size >> PAGE_SHIFT;
3792
3793 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3794 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3795 mem_info->mem_type = BNA_MEM_T_DMA;
3796 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3797 mem_info->num = num_txq;
3798
3799 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3800 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3801 mem_info->mem_type = BNA_MEM_T_KVA;
3802 mem_info->len = page_count * sizeof(void *);
3803 mem_info->num = num_txq;
3804
3805 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3806 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3807 mem_info->mem_type = BNA_MEM_T_DMA;
3808 mem_info->len = PAGE_SIZE;
3809 mem_info->num = num_txq * page_count;
3810
3811 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3812 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3813 BNA_INTR_T_MSIX;
3814 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3815}
3816
3817struct bna_tx *
3818bna_tx_create(struct bna *bna, struct bnad *bnad,
3819 struct bna_tx_config *tx_cfg,
3820 struct bna_tx_event_cbfn *tx_cbfn,
3821 struct bna_res_info *res_info, void *priv)
3822{
3823 struct bna_intr_info *intr_info;
3824 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3825 struct bna_tx *tx;
3826 struct bna_txq *txq;
3827 struct list_head *qe;
3828 struct bna_ib_mod *ib_mod = &bna->ib_mod;
3829 struct bna_doorbell_qset *qset;
3830 struct bna_ib_config ib_config;
3831 int page_count;
3832 int page_size;
3833 int page_idx;
3834 int i;
3835 unsigned long off;
3836
3837 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3838 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
3839 tx_cfg->num_txq;
3840 page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
3841
3842 /**
3843 * Get resources
3844 */
3845
3846 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3847 return NULL;
3848
3849 /* Tx */
3850
3851 if (list_empty(&tx_mod->tx_free_q))
3852 return NULL;
3853 bfa_q_deq(&tx_mod->tx_free_q, &tx);
3854 bfa_q_qe_init(&tx->qe);
3855
3856 /* TxQs */
3857
3858 INIT_LIST_HEAD(&tx->txq_q);
3859 for (i = 0; i < tx_cfg->num_txq; i++) {
3860 if (list_empty(&tx_mod->txq_free_q))
3861 goto err_return;
3862
3863 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3864 bfa_q_qe_init(&txq->qe);
3865 list_add_tail(&txq->qe, &tx->txq_q);
3866 txq->ib = NULL;
3867 txq->ib_seg_offset = -1;
3868 txq->tx = tx;
3869 }
3870
3871 /* IBs */
3872 i = 0;
3873 list_for_each(qe, &tx->txq_q) {
3874 txq = (struct bna_txq *)qe;
3875
3876 if (intr_info->num == 1)
3877 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3878 intr_info->idl[0].vector);
3879 else
3880 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3881 intr_info->idl[i].vector);
3882
3883 if (txq->ib == NULL)
3884 goto err_return;
3885
3886 txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
3887 if (txq->ib_seg_offset == -1)
3888 goto err_return;
3889
3890 i++;
3891 }
3892
3893 /*
3894 * Initialize
3895 */
3896
3897 /* Tx */
3898
3899 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3900 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3901 /* Following callbacks are mandatory */
3902 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3903 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3904 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3905
3906 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3907 tx->bna = bna;
3908 tx->priv = priv;
3909 tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
3910 tx->txq_stop_wc.wc_cbarg = tx;
3911 tx->txq_stop_wc.wc_count = 0;
3912
3913 tx->type = tx_cfg->tx_type;
3914
3915 tx->flags = 0;
3916 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
3917 switch (tx->type) {
3918 case BNA_TX_T_REGULAR:
3919 if (!(tx->bna->tx_mod.flags &
3920 BNA_TX_MOD_F_PORT_LOOPBACK))
3921 tx->flags |= BNA_TX_F_PORT_STARTED;
3922 break;
3923 case BNA_TX_T_LOOPBACK:
3924 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
3925 tx->flags |= BNA_TX_F_PORT_STARTED;
3926 break;
3927 }
3928 }
3929 if (tx->bna->tx_mod.cee_link)
3930 tx->flags |= BNA_TX_F_PRIO_LOCK;
3931
3932 /* TxQ */
3933
3934 i = 0;
3935 page_idx = 0;
3936 list_for_each(qe, &tx->txq_q) {
3937 txq = (struct bna_txq *)qe;
3938 txq->priority = tx_mod->priority;
3939 txq->tcb = (struct bna_tcb *)
3940 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3941 txq->tx_packets = 0;
3942 txq->tx_bytes = 0;
3943
3944 /* IB */
3945
3946 ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3947 ib_config.interpkt_timeo = 0; /* Not used */
3948 ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
3949 ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
3950 BFI_IB_CF_INT_ENABLE |
3951 BFI_IB_CF_COALESCING_MODE);
3952 bna_ib_config(txq->ib, &ib_config);
3953
3954 /* TCB */
3955
3956 txq->tcb->producer_index = 0;
3957 txq->tcb->consumer_index = 0;
3958 txq->tcb->hw_consumer_index = (volatile u32 *)
3959 ((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
3960 (txq->ib_seg_offset * BFI_IBIDX_SIZE));
3961 *(txq->tcb->hw_consumer_index) = 0;
3962 txq->tcb->q_depth = tx_cfg->txq_depth;
3963 txq->tcb->unmap_q = (void *)
3964 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3965 qset = (struct bna_doorbell_qset *)0;
3966 off = (unsigned long)&qset[txq->txq_id].txq[0];
3967 txq->tcb->q_dbell = off +
3968 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
3969 txq->tcb->i_dbell = &txq->ib->door_bell;
3970 txq->tcb->intr_type = intr_info->intr_type;
3971 txq->tcb->intr_vector = (intr_info->num == 1) ?
3972 intr_info->idl[0].vector :
3973 intr_info->idl[i].vector;
3974 txq->tcb->txq = txq;
3975 txq->tcb->bnad = bnad;
3976 txq->tcb->id = i;
3977
3978 /* QPT, SWQPT, Pages */
3979 bna_txq_qpt_setup(txq, page_count, page_size,
3980 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3981 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3982 &res_info[BNA_TX_RES_MEM_T_PAGE].
3983 res_u.mem_info.mdl[page_idx]);
3984 txq->tcb->page_idx = page_idx;
3985 txq->tcb->page_count = page_count;
3986 page_idx += page_count;
3987
3988 /* Callback to bnad for setting up TCB */
3989 if (tx->tcb_setup_cbfn)
3990 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3991
3992 i++;
3993 }
3994
3995 /* TxF */
3996
3997 tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
3998 tx->txf.vlan = 0;
3999
4000 /* Mbox element */
4001 bfa_q_qe_init(&tx->mbox_qe.qe);
4002
4003 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
4004
4005 return tx;
4006
4007err_return:
4008 bna_tx_free(tx);
4009 return NULL;
4010}
4011
4012void
4013bna_tx_destroy(struct bna_tx *tx)
4014{
4015 /* Callback to bnad for destroying TCB */
4016 if (tx->tcb_destroy_cbfn) {
4017 struct bna_txq *txq;
4018 struct list_head *qe;
4019
4020 list_for_each(qe, &tx->txq_q) {
4021 txq = (struct bna_txq *)qe;
4022 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
4023 }
4024 }
4025
4026 bna_tx_free(tx);
4027}
4028
4029void
4030bna_tx_enable(struct bna_tx *tx)
4031{
4032 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
4033 return;
4034
4035 tx->flags |= BNA_TX_F_ENABLED;
4036
4037 if (tx->flags & BNA_TX_F_PORT_STARTED)
4038 bfa_fsm_send_event(tx, TX_E_START);
4039}
4040
4041void
4042bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
4043 void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
4044{
4045 if (type == BNA_SOFT_CLEANUP) {
4046 (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
4047 return;
4048 }
4049
4050 tx->stop_cbfn = cbfn;
4051 tx->stop_cbarg = tx->bna->bnad;
4052
4053 tx->flags &= ~BNA_TX_F_ENABLED;
4054
4055 bfa_fsm_send_event(tx, TX_E_STOP);
4056}
4057
4058int
4059bna_tx_state_get(struct bna_tx *tx)
4060{
4061 return bfa_sm_to_state(tx_sm_table, tx->fsm);
4062}
4063
4064void
4065bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
4066 struct bna_res_info *res_info)
4067{
4068 int i;
4069
4070 tx_mod->bna = bna;
4071 tx_mod->flags = 0;
4072
4073 tx_mod->tx = (struct bna_tx *)
4074 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
4075 tx_mod->txq = (struct bna_txq *)
4076 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
4077
4078 INIT_LIST_HEAD(&tx_mod->tx_free_q);
4079 INIT_LIST_HEAD(&tx_mod->tx_active_q);
4080
4081 INIT_LIST_HEAD(&tx_mod->txq_free_q);
4082
4083 for (i = 0; i < BFI_MAX_TXQ; i++) {
4084 tx_mod->tx[i].txf.txf_id = i;
4085 bfa_q_qe_init(&tx_mod->tx[i].qe);
4086 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
4087
4088 tx_mod->txq[i].txq_id = i;
4089 bfa_q_qe_init(&tx_mod->txq[i].qe);
4090 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
4091 }
4092
4093 tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
4094 tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
4095 tx_mod->tx_stop_wc.wc_count = 0;
4096}
4097
4098void
4099bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
4100{
4101 struct list_head *qe;
4102 int i;
4103
4104 i = 0;
4105 list_for_each(qe, &tx_mod->tx_free_q)
4106 i++;
4107
4108 i = 0;
4109 list_for_each(qe, &tx_mod->txq_free_q)
4110 i++;
4111
4112 tx_mod->bna = NULL;
4113}
4114
4115void
4116bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4117{
4118 struct bna_tx *tx;
4119 struct list_head *qe;
4120
4121 tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
4122 if (type == BNA_TX_T_LOOPBACK)
4123 tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
4124
4125 list_for_each(qe, &tx_mod->tx_active_q) {
4126 tx = (struct bna_tx *)qe;
4127 if (tx->type == type)
4128 bna_tx_start(tx);
4129 }
4130}
4131
4132void
4133bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4134{
4135 struct bna_tx *tx;
4136 struct list_head *qe;
4137
4138 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4139 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4140
4141 tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
4142
4143 /**
4144 * Before calling bna_tx_stop(), increment tx_stop_wc as many times
4145 * as we are going to call bna_tx_stop
4146 */
4147 list_for_each(qe, &tx_mod->tx_active_q) {
4148 tx = (struct bna_tx *)qe;
4149 if (tx->type == type)
4150 bfa_wc_up(&tx_mod->tx_stop_wc);
4151 }
4152
4153 if (tx_mod->tx_stop_wc.wc_count == 0) {
4154 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
4155 tx_mod->stop_cbfn = NULL;
4156 return;
4157 }
4158
4159 list_for_each(qe, &tx_mod->tx_active_q) {
4160 tx = (struct bna_tx *)qe;
4161 if (tx->type == type)
4162 bna_tx_stop(tx);
4163 }
4164}
4165
4166void
4167bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
4168{
4169 struct bna_tx *tx;
4170 struct list_head *qe;
4171
4172 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4173 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4174
4175 list_for_each(qe, &tx_mod->tx_active_q) {
4176 tx = (struct bna_tx *)qe;
4177 bna_tx_fail(tx);
4178 }
4179}
4180
4181void
4182bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
4183{
4184 struct bna_tx *tx;
4185 struct list_head *qe;
4186
4187 if (prio != tx_mod->priority) {
4188 tx_mod->priority = prio;
4189
4190 list_for_each(qe, &tx_mod->tx_active_q) {
4191 tx = (struct bna_tx *)qe;
4192 bna_tx_prio_changed(tx, prio);
4193 }
4194 }
4195}
4196
4197void
4198bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
4199{
4200 struct bna_tx *tx;
4201 struct list_head *qe;
4202
4203 tx_mod->cee_link = cee_link;
4204
4205 list_for_each(qe, &tx_mod->tx_active_q) {
4206 tx = (struct bna_tx *)qe;
4207 bna_tx_cee_link_status(tx, cee_link);
4208 }
4209}
diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
new file mode 100644
index 000000000000..6877310f6ef4
--- /dev/null
+++ b/drivers/net/bna/bna_types.h
@@ -0,0 +1,1128 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BNA_TYPES_H__
19#define __BNA_TYPES_H__
20
21#include "cna.h"
22#include "bna_hw.h"
23#include "bfa_cee.h"
24
25/**
26 *
27 * Forward declarations
28 *
29 */
30
31struct bna_txq;
32struct bna_tx;
33struct bna_rxq;
34struct bna_cq;
35struct bna_rx;
36struct bna_rxf;
37struct bna_port;
38struct bna;
39struct bnad;
40
41/**
42 *
43 * Enums, primitive data types
44 *
45 */
46
47enum bna_status {
48 BNA_STATUS_T_DISABLED = 0,
49 BNA_STATUS_T_ENABLED = 1
50};
51
52enum bna_cleanup_type {
53 BNA_HARD_CLEANUP = 0,
54 BNA_SOFT_CLEANUP = 1
55};
56
57enum bna_cb_status {
58 BNA_CB_SUCCESS = 0,
59 BNA_CB_FAIL = 1,
60 BNA_CB_INTERRUPT = 2,
61 BNA_CB_BUSY = 3,
62 BNA_CB_INVALID_MAC = 4,
63 BNA_CB_MCAST_LIST_FULL = 5,
64 BNA_CB_UCAST_CAM_FULL = 6,
65 BNA_CB_WAITING = 7,
66 BNA_CB_NOT_EXEC = 8
67};
68
69enum bna_res_type {
70 BNA_RES_T_MEM = 1,
71 BNA_RES_T_INTR = 2
72};
73
74enum bna_mem_type {
75 BNA_MEM_T_KVA = 1,
76 BNA_MEM_T_DMA = 2
77};
78
79enum bna_intr_type {
80 BNA_INTR_T_INTX = 1,
81 BNA_INTR_T_MSIX = 2
82};
83
84enum bna_res_req_type {
85 BNA_RES_MEM_T_COM = 0,
86 BNA_RES_MEM_T_ATTR = 1,
87 BNA_RES_MEM_T_FWTRC = 2,
88 BNA_RES_MEM_T_STATS = 3,
89 BNA_RES_MEM_T_SWSTATS = 4,
90 BNA_RES_MEM_T_IBIDX = 5,
91 BNA_RES_MEM_T_IB_ARRAY = 6,
92 BNA_RES_MEM_T_INTR_ARRAY = 7,
93 BNA_RES_MEM_T_IDXSEG_ARRAY = 8,
94 BNA_RES_MEM_T_TX_ARRAY = 9,
95 BNA_RES_MEM_T_TXQ_ARRAY = 10,
96 BNA_RES_MEM_T_RX_ARRAY = 11,
97 BNA_RES_MEM_T_RXP_ARRAY = 12,
98 BNA_RES_MEM_T_RXQ_ARRAY = 13,
99 BNA_RES_MEM_T_UCMAC_ARRAY = 14,
100 BNA_RES_MEM_T_MCMAC_ARRAY = 15,
101 BNA_RES_MEM_T_RIT_ENTRY = 16,
102 BNA_RES_MEM_T_RIT_SEGMENT = 17,
103 BNA_RES_INTR_T_MBOX = 18,
104 BNA_RES_T_MAX
105};
106
107enum bna_tx_res_req_type {
108 BNA_TX_RES_MEM_T_TCB = 0,
109 BNA_TX_RES_MEM_T_UNMAPQ = 1,
110 BNA_TX_RES_MEM_T_QPT = 2,
111 BNA_TX_RES_MEM_T_SWQPT = 3,
112 BNA_TX_RES_MEM_T_PAGE = 4,
113 BNA_TX_RES_INTR_T_TXCMPL = 5,
114 BNA_TX_RES_T_MAX,
115};
116
117enum bna_rx_mem_type {
118 BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
119 BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
120 BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */
121 BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */
122 BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */
123 BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */
124 BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */
125 BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */
126 BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */
127 BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */
128 BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */
129 BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */
130 BNA_RX_RES_T_INTR = 12, /* Rx interrupts */
131 BNA_RX_RES_T_MAX = 13
132};
133
134enum bna_mbox_state {
135 BNA_MBOX_FREE = 0,
136 BNA_MBOX_POSTED = 1
137};
138
139enum bna_tx_type {
140 BNA_TX_T_REGULAR = 0,
141 BNA_TX_T_LOOPBACK = 1,
142};
143
144enum bna_tx_flags {
145 BNA_TX_F_PORT_STARTED = 1,
146 BNA_TX_F_ENABLED = 2,
147 BNA_TX_F_PRIO_LOCK = 4,
148};
149
150enum bna_tx_mod_flags {
151 BNA_TX_MOD_F_PORT_STARTED = 1,
152 BNA_TX_MOD_F_PORT_LOOPBACK = 2,
153};
154
155enum bna_rx_type {
156 BNA_RX_T_REGULAR = 0,
157 BNA_RX_T_LOOPBACK = 1,
158};
159
160enum bna_rxp_type {
161 BNA_RXP_SINGLE = 1,
162 BNA_RXP_SLR = 2,
163 BNA_RXP_HDS = 3
164};
165
166enum bna_rxmode {
167 BNA_RXMODE_PROMISC = 1,
168 BNA_RXMODE_DEFAULT = 2,
169 BNA_RXMODE_ALLMULTI = 4
170};
171
172enum bna_rx_event {
173 RX_E_START = 1,
174 RX_E_STOP = 2,
175 RX_E_FAIL = 3,
176 RX_E_RXF_STARTED = 4,
177 RX_E_RXF_STOPPED = 5,
178 RX_E_RXQ_STOPPED = 6,
179};
180
181enum bna_rx_state {
182 BNA_RX_STOPPED = 1,
183 BNA_RX_RXF_START_WAIT = 2,
184 BNA_RX_STARTED = 3,
185 BNA_RX_RXF_STOP_WAIT = 4,
186 BNA_RX_RXQ_STOP_WAIT = 5,
187};
188
189enum bna_rx_flags {
190 BNA_RX_F_ENABLE = 0x01, /* bnad enabled rxf */
191 BNA_RX_F_PORT_ENABLED = 0x02, /* Port object is enabled */
192 BNA_RX_F_PORT_FAILED = 0x04, /* Port in failed state */
193};
194
195enum bna_rx_mod_flags {
196 BNA_RX_MOD_F_PORT_STARTED = 1,
197 BNA_RX_MOD_F_PORT_LOOPBACK = 2,
198};
199
200enum bna_rxf_oper_state {
201 BNA_RXF_OPER_STATE_RUNNING = 0x01, /* rxf operational */
202 BNA_RXF_OPER_STATE_PAUSED = 0x02, /* rxf in PAUSED state */
203};
204
205enum bna_rxf_flags {
206 BNA_RXF_FL_STOP_PENDING = 0x01,
207 BNA_RXF_FL_FAILED = 0x02,
208 BNA_RXF_FL_RSS_CONFIG_PENDING = 0x04,
209 BNA_RXF_FL_OPERSTATE_CHANGED = 0x08,
210 BNA_RXF_FL_RXF_ENABLED = 0x10,
211 BNA_RXF_FL_VLAN_CONFIG_PENDING = 0x20,
212};
213
214enum bna_rxf_event {
215 RXF_E_START = 1,
216 RXF_E_STOP = 2,
217 RXF_E_FAIL = 3,
218 RXF_E_CAM_FLTR_MOD = 4,
219 RXF_E_STARTED = 5,
220 RXF_E_STOPPED = 6,
221 RXF_E_CAM_FLTR_RESP = 7,
222 RXF_E_PAUSE = 8,
223 RXF_E_RESUME = 9,
224 RXF_E_STAT_CLEARED = 10,
225};
226
227enum bna_rxf_state {
228 BNA_RXF_STOPPED = 1,
229 BNA_RXF_START_WAIT = 2,
230 BNA_RXF_CAM_FLTR_MOD_WAIT = 3,
231 BNA_RXF_STARTED = 4,
232 BNA_RXF_CAM_FLTR_CLR_WAIT = 5,
233 BNA_RXF_STOP_WAIT = 6,
234 BNA_RXF_PAUSE_WAIT = 7,
235 BNA_RXF_RESUME_WAIT = 8,
236 BNA_RXF_STAT_CLR_WAIT = 9,
237};
238
239enum bna_port_type {
240 BNA_PORT_T_REGULAR = 0,
241 BNA_PORT_T_LOOPBACK_INTERNAL = 1,
242 BNA_PORT_T_LOOPBACK_EXTERNAL = 2,
243};
244
245enum bna_link_status {
246 BNA_LINK_DOWN = 0,
247 BNA_LINK_UP = 1,
248 BNA_CEE_UP = 2
249};
250
251enum bna_llport_flags {
252 BNA_LLPORT_F_ENABLED = 1,
253 BNA_LLPORT_F_RX_ENABLED = 2
254};
255
256enum bna_port_flags {
257 BNA_PORT_F_DEVICE_READY = 1,
258 BNA_PORT_F_ENABLED = 2,
259 BNA_PORT_F_PAUSE_CHANGED = 4,
260 BNA_PORT_F_MTU_CHANGED = 8
261};
262
263enum bna_pkt_rates {
264 BNA_PKT_RATE_10K = 10000,
265 BNA_PKT_RATE_20K = 20000,
266 BNA_PKT_RATE_30K = 30000,
267 BNA_PKT_RATE_40K = 40000,
268 BNA_PKT_RATE_50K = 50000,
269 BNA_PKT_RATE_60K = 60000,
270 BNA_PKT_RATE_70K = 70000,
271 BNA_PKT_RATE_80K = 80000,
272};
273
274enum bna_dim_load_types {
275 BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */
276 BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */
277 BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */
278 BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */
279 BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */
280 BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */
281 BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */
282 BNA_LOAD_T_LOW_4 = 7, /* r < 10K */
283 BNA_LOAD_T_MAX = 8
284};
285
286enum bna_dim_bias_types {
287 BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */
288 BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */
289 BNA_BIAS_T_MAX = 2
290};
291
292struct bna_mac {
293 /* This should be the first one */
294 struct list_head qe;
295 u8 addr[ETH_ALEN];
296};
297
298struct bna_mem_descr {
299 u32 len;
300 void *kva;
301 struct bna_dma_addr dma;
302};
303
304struct bna_mem_info {
305 enum bna_mem_type mem_type;
306 u32 len;
307 u32 num;
308 u32 align_sz; /* 0/1 = no alignment */
309 struct bna_mem_descr *mdl;
310 void *cookie; /* For bnad to unmap dma later */
311};
312
313struct bna_intr_descr {
314 int vector;
315};
316
317struct bna_intr_info {
318 enum bna_intr_type intr_type;
319 int num;
320 struct bna_intr_descr *idl;
321};
322
323union bna_res_u {
324 struct bna_mem_info mem_info;
325 struct bna_intr_info intr_info;
326};
327
328struct bna_res_info {
329 enum bna_res_type res_type;
330 union bna_res_u res_u;
331};
332
333/* HW QPT */
334struct bna_qpt {
335 struct bna_dma_addr hw_qpt_ptr;
336 void *kv_qpt_ptr;
337 u32 page_count;
338 u32 page_size;
339};
340
341/**
342 *
343 * Device
344 *
345 */
346
347struct bna_device {
348 bfa_fsm_t fsm;
349 struct bfa_ioc ioc;
350
351 enum bna_intr_type intr_type;
352 int vector;
353
354 void (*ready_cbfn)(struct bnad *bnad, enum bna_cb_status status);
355 struct bnad *ready_cbarg;
356
357 void (*stop_cbfn)(struct bnad *bnad, enum bna_cb_status status);
358 struct bnad *stop_cbarg;
359
360 struct bna *bna;
361};
362
363/**
364 *
365 * Mail box
366 *
367 */
368
369struct bna_mbox_qe {
370 /* This should be the first one */
371 struct list_head qe;
372
373 struct bfa_mbox_cmd cmd;
374 u32 cmd_len;
375 /* Callback for port, tx, rx, rxf */
376 void (*cbfn)(void *arg, int status);
377 void *cbarg;
378};
379
380struct bna_mbox_mod {
381 enum bna_mbox_state state;
382 struct list_head posted_q;
383 u32 msg_pending;
384 u32 msg_ctr;
385 struct bna *bna;
386};
387
388/**
389 *
390 * Port
391 *
392 */
393
394/* Pause configuration */
395struct bna_pause_config {
396 enum bna_status tx_pause;
397 enum bna_status rx_pause;
398};
399
400struct bna_llport {
401 bfa_fsm_t fsm;
402 enum bna_llport_flags flags;
403
404 enum bna_port_type type;
405
406 enum bna_link_status link_status;
407
408 int admin_up_count;
409
410 void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
411
412 struct bna_mbox_qe mbox_qe;
413
414 struct bna *bna;
415};
416
417struct bna_port {
418 bfa_fsm_t fsm;
419 enum bna_port_flags flags;
420
421 enum bna_port_type type;
422
423 struct bna_llport llport;
424
425 struct bna_pause_config pause_config;
426 u8 priority;
427 int mtu;
428
429 /* Callback for bna_port_disable(), port_stop() */
430 void (*stop_cbfn)(void *, enum bna_cb_status);
431 void *stop_cbarg;
432
433 /* Callback for bna_port_pause_config() */
434 void (*pause_cbfn)(struct bnad *, enum bna_cb_status);
435
436 /* Callback for bna_port_mtu_set() */
437 void (*mtu_cbfn)(struct bnad *, enum bna_cb_status);
438
439 void (*link_cbfn)(struct bnad *, enum bna_link_status);
440
441 struct bfa_wc chld_stop_wc;
442
443 struct bna_mbox_qe mbox_qe;
444
445 struct bna *bna;
446};
447
448/**
449 *
450 * Interrupt Block
451 *
452 */
453
454/* IB index segment structure */
455struct bna_ibidx_seg {
456 /* This should be the first one */
457 struct list_head qe;
458
459 u8 ib_seg_size;
460 u8 ib_idx_tbl_offset;
461};
462
463/* Interrupt structure */
464struct bna_intr {
465 /* This should be the first one */
466 struct list_head qe;
467 int ref_count;
468
469 enum bna_intr_type intr_type;
470 int vector;
471
472 struct bna_ib *ib;
473};
474
475/* Doorbell structure */
476struct bna_ib_dbell {
477 void *__iomem doorbell_addr;
478 u32 doorbell_ack;
479};
480
481/* Interrupt timer configuration */
482struct bna_ib_config {
483 u8 coalescing_timeo; /* Unit is 5usec. */
484
485 int interpkt_count;
486 int interpkt_timeo;
487
488 enum ib_flags ctrl_flags;
489};
490
491/* IB structure */
492struct bna_ib {
493 /* This should be the first one */
494 struct list_head qe;
495
496 int ib_id;
497
498 int ref_count;
499 int start_count;
500
501 struct bna_dma_addr ib_seg_host_addr;
502 void *ib_seg_host_addr_kva;
503 u32 idx_mask; /* Size >= BNA_IBIDX_MAX_SEGSIZE */
504
505 struct bna_ibidx_seg *idx_seg;
506
507 struct bna_ib_dbell door_bell;
508
509 struct bna_intr *intr;
510
511 struct bna_ib_config ib_config;
512
513 struct bna *bna;
514};
515
516/* IB module - keeps track of IBs and interrupts */
517struct bna_ib_mod {
518 struct bna_ib *ib; /* BFI_MAX_IB entries */
519 struct bna_intr *intr; /* BFI_MAX_IB entries */
520 struct bna_ibidx_seg *idx_seg; /* BNA_IBIDX_TOTAL_SEGS */
521
522 struct list_head ib_free_q;
523
524 struct list_head ibidx_seg_pool[BFI_IBIDX_TOTAL_POOLS];
525
526 struct list_head intr_free_q;
527 struct list_head intr_active_q;
528
529 struct bna *bna;
530};
531
532/**
533 *
534 * Tx object
535 *
536 */
537
538/* Tx datapath control structure */
539#define BNA_Q_NAME_SIZE 16
540struct bna_tcb {
541 /* Fast path */
542 void **sw_qpt;
543 void *unmap_q;
544 u32 producer_index;
545 u32 consumer_index;
546 volatile u32 *hw_consumer_index;
547 u32 q_depth;
548 void *__iomem q_dbell;
549 struct bna_ib_dbell *i_dbell;
550 int page_idx;
551 int page_count;
552 /* Control path */
553 struct bna_txq *txq;
554 struct bnad *bnad;
555 enum bna_intr_type intr_type;
556 int intr_vector;
557 u8 priority; /* Current priority */
558 unsigned long flags; /* Used by bnad as required */
559 int id;
560 char name[BNA_Q_NAME_SIZE];
561};
562
563/* TxQ QPT and configuration */
564struct bna_txq {
565 /* This should be the first one */
566 struct list_head qe;
567
568 int txq_id;
569
570 u8 priority;
571
572 struct bna_qpt qpt;
573 struct bna_tcb *tcb;
574 struct bna_ib *ib;
575 int ib_seg_offset;
576
577 struct bna_tx *tx;
578
579 u64 tx_packets;
580 u64 tx_bytes;
581};
582
583/* TxF structure (hardware Tx Function) */
584struct bna_txf {
585 int txf_id;
586 enum txf_flags ctrl_flags;
587 u16 vlan;
588};
589
590/* Tx object */
591struct bna_tx {
592 /* This should be the first one */
593 struct list_head qe;
594
595 bfa_fsm_t fsm;
596 enum bna_tx_flags flags;
597
598 enum bna_tx_type type;
599
600 struct list_head txq_q;
601 struct bna_txf txf;
602
603 /* Tx event handlers */
604 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
605 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
606 void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
607 void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
608 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
609
610 /* callback for bna_tx_disable(), bna_tx_stop() */
611 void (*stop_cbfn)(void *arg, struct bna_tx *tx,
612 enum bna_cb_status status);
613 void *stop_cbarg;
614
615 /* callback for bna_tx_prio_set() */
616 void (*prio_change_cbfn)(struct bnad *bnad, struct bna_tx *tx,
617 enum bna_cb_status status);
618
619 struct bfa_wc txq_stop_wc;
620
621 struct bna_mbox_qe mbox_qe;
622
623 struct bna *bna;
624 void *priv; /* bnad's cookie */
625};
626
627struct bna_tx_config {
628 int num_txq;
629 int txq_depth;
630 enum bna_tx_type tx_type;
631};
632
633struct bna_tx_event_cbfn {
634 /* Optional */
635 void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *);
636 void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *);
637 /* Mandatory */
638 void (*tx_stall_cbfn)(struct bnad *, struct bna_tcb *);
639 void (*tx_resume_cbfn)(struct bnad *, struct bna_tcb *);
640 void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tcb *);
641};
642
643/* Tx module - keeps track of free, active tx objects */
644struct bna_tx_mod {
645 struct bna_tx *tx; /* BFI_MAX_TXQ entries */
646 struct bna_txq *txq; /* BFI_MAX_TXQ entries */
647
648 struct list_head tx_free_q;
649 struct list_head tx_active_q;
650
651 struct list_head txq_free_q;
652
653 /* callback for bna_tx_mod_stop() */
654 void (*stop_cbfn)(struct bna_port *port,
655 enum bna_cb_status status);
656
657 struct bfa_wc tx_stop_wc;
658
659 enum bna_tx_mod_flags flags;
660
661 int priority;
662 int cee_link;
663
664 u32 txf_bmap[2];
665
666 struct bna *bna;
667};
668
669/**
670 *
671 * Receive Indirection Table
672 *
673 */
674
675/* One row of RIT table */
676struct bna_rit_entry {
677 u8 large_rxq_id; /* used for either large or data buffers */
678 u8 small_rxq_id; /* used for either small or header buffers */
679};
680
681/* RIT segment */
682struct bna_rit_segment {
683 struct list_head qe;
684
685 u32 rit_offset;
686 u32 rit_size;
687 /**
688 * max_rit_size: Varies per RIT segment depending on how RIT is
689 * partitioned
690 */
691 u32 max_rit_size;
692
693 struct bna_rit_entry *rit;
694};
695
696struct bna_rit_mod {
697 struct bna_rit_entry *rit;
698 struct bna_rit_segment *rit_segment;
699
700 struct list_head rit_seg_pool[BFI_RIT_SEG_TOTAL_POOLS];
701};
702
703/**
704 *
705 * Rx object
706 *
707 */
708
709/* Rx datapath control structure */
710struct bna_rcb {
711 /* Fast path */
712 void **sw_qpt;
713 void *unmap_q;
714 u32 producer_index;
715 u32 consumer_index;
716 u32 q_depth;
717 void *__iomem q_dbell;
718 int page_idx;
719 int page_count;
720 /* Control path */
721 struct bna_rxq *rxq;
722 struct bna_cq *cq;
723 struct bnad *bnad;
724 unsigned long flags;
725 int id;
726};
727
728/* RxQ structure - QPT, configuration */
729struct bna_rxq {
730 struct list_head qe;
731 int rxq_id;
732
733 int buffer_size;
734 int q_depth;
735
736 struct bna_qpt qpt;
737 struct bna_rcb *rcb;
738
739 struct bna_rxp *rxp;
740 struct bna_rx *rx;
741
742 u64 rx_packets;
743 u64 rx_bytes;
744 u64 rx_packets_with_error;
745 u64 rxbuf_alloc_failed;
746};
747
748/* RxQ pair */
749union bna_rxq_u {
750 struct {
751 struct bna_rxq *hdr;
752 struct bna_rxq *data;
753 } hds;
754 struct {
755 struct bna_rxq *small;
756 struct bna_rxq *large;
757 } slr;
758 struct {
759 struct bna_rxq *only;
760 struct bna_rxq *reserved;
761 } single;
762};
763
764/* Packet rate for Dynamic Interrupt Moderation */
765struct bna_pkt_rate {
766 u32 small_pkt_cnt;
767 u32 large_pkt_cnt;
768};
769
770/* Completion control structure */
771struct bna_ccb {
772 /* Fast path */
773 void **sw_qpt;
774 u32 producer_index;
775 volatile u32 *hw_producer_index;
776 u32 q_depth;
777 struct bna_ib_dbell *i_dbell;
778 struct bna_rcb *rcb[2];
779 void *ctrl; /* For bnad */
780 struct bna_pkt_rate pkt_rate;
781 int page_idx;
782 int page_count;
783
784 /* Control path */
785 struct bna_cq *cq;
786 struct bnad *bnad;
787 enum bna_intr_type intr_type;
788 int intr_vector;
789 u8 rx_coalescing_timeo; /* For NAPI */
790 int id;
791 char name[BNA_Q_NAME_SIZE];
792};
793
794/* CQ QPT, configuration */
795struct bna_cq {
796 int cq_id;
797
798 struct bna_qpt qpt;
799 struct bna_ccb *ccb;
800
801 struct bna_ib *ib;
802 u8 ib_seg_offset;
803
804 struct bna_rx *rx;
805};
806
807struct bna_rss_config {
808 enum rss_hash_type hash_type;
809 u8 hash_mask;
810 u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
811};
812
813struct bna_hds_config {
814 enum hds_header_type hdr_type;
815 int header_size;
816};
817
818/* This structure is used during RX creation */
819struct bna_rx_config {
820 enum bna_rx_type rx_type;
821 int num_paths;
822 enum bna_rxp_type rxp_type;
823 int paused;
824 int q_depth;
825 /*
826 * Small/Large (or Header/Data) buffer size to be configured
827 * for SLR and HDS queue type. Large buffer size comes from
828 * port->mtu.
829 */
830 int small_buff_size;
831
832 enum bna_status rss_status;
833 struct bna_rss_config rss_config;
834
835 enum bna_status hds_status;
836 struct bna_hds_config hds_config;
837
838 enum bna_status vlan_strip_status;
839};
840
841/* Rx Path structure - one per MSIX vector/CPU */
842struct bna_rxp {
843 /* This should be the first one */
844 struct list_head qe;
845
846 enum bna_rxp_type type;
847 union bna_rxq_u rxq;
848 struct bna_cq cq;
849
850 struct bna_rx *rx;
851
852 /* MSI-x vector number for configuring RSS */
853 int vector;
854
855 struct bna_mbox_qe mbox_qe;
856};
857
858/* HDS configuration structure */
859struct bna_rxf_hds {
860 enum hds_header_type hdr_type;
861 int header_size;
862};
863
864/* RSS configuration structure */
865struct bna_rxf_rss {
866 enum rss_hash_type hash_type;
867 u8 hash_mask;
868 u32 toeplitz_hash_key[BFI_RSS_HASH_KEY_LEN];
869};
870
871/* RxF structure (hardware Rx Function) */
872struct bna_rxf {
873 bfa_fsm_t fsm;
874 int rxf_id;
875 enum rxf_flags ctrl_flags;
876 u16 default_vlan_tag;
877 enum bna_rxf_oper_state rxf_oper_state;
878 enum bna_status hds_status;
879 struct bna_rxf_hds hds_cfg;
880 enum bna_status rss_status;
881 struct bna_rxf_rss rss_cfg;
882 struct bna_rit_segment *rit_segment;
883 struct bna_rx *rx;
884 u32 forced_offset;
885 struct bna_mbox_qe mbox_qe;
886 int mcast_rxq_id;
887
888 /* callback for bna_rxf_start() */
889 void (*start_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
890 struct bna_rx *start_cbarg;
891
892 /* callback for bna_rxf_stop() */
893 void (*stop_cbfn) (struct bna_rx *rx, enum bna_cb_status status);
894 struct bna_rx *stop_cbarg;
895
896 /* callback for bna_rxf_receive_enable() / bna_rxf_receive_disable() */
897 void (*oper_state_cbfn) (struct bnad *bnad, struct bna_rx *rx,
898 enum bna_cb_status status);
899 struct bnad *oper_state_cbarg;
900
901 /**
902 * callback for:
903 * bna_rxf_ucast_set()
904 * bna_rxf_{ucast/mcast}_add(),
905 * bna_rxf_{ucast/mcast}_del(),
906 * bna_rxf_mode_set()
907 */
908 void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx,
909 enum bna_cb_status status);
910 struct bnad *cam_fltr_cbarg;
911
912 enum bna_rxf_flags rxf_flags;
913
914 /* List of unicast addresses yet to be applied to h/w */
915 struct list_head ucast_pending_add_q;
916 struct list_head ucast_pending_del_q;
917 int ucast_pending_set;
918 /* ucast addresses applied to the h/w */
919 struct list_head ucast_active_q;
920 struct bna_mac *ucast_active_mac;
921
922 /* List of multicast addresses yet to be applied to h/w */
923 struct list_head mcast_pending_add_q;
924 struct list_head mcast_pending_del_q;
925 /* multicast addresses applied to the h/w */
926 struct list_head mcast_active_q;
927
928 /* Rx modes yet to be applied to h/w */
929 enum bna_rxmode rxmode_pending;
930 enum bna_rxmode rxmode_pending_bitmask;
931 /* Rx modes applied to h/w */
932 enum bna_rxmode rxmode_active;
933
934 enum bna_status vlan_filter_status;
935 u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
936};
937
938/* Rx object */
939struct bna_rx {
940 /* This should be the first one */
941 struct list_head qe;
942
943 bfa_fsm_t fsm;
944
945 enum bna_rx_type type;
946
947 /* list-head for RX path objects */
948 struct list_head rxp_q;
949
950 struct bna_rxf rxf;
951
952 enum bna_rx_flags rx_flags;
953
954 struct bna_mbox_qe mbox_qe;
955
956 struct bfa_wc rxq_stop_wc;
957
958 /* Rx event handlers */
959 void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
960 void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
961 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
962 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
963 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
964 void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
965
966 /* callback for bna_rx_disable(), bna_rx_stop() */
967 void (*stop_cbfn)(void *arg, struct bna_rx *rx,
968 enum bna_cb_status status);
969 void *stop_cbarg;
970
971 struct bna *bna;
972 void *priv; /* bnad's cookie */
973};
974
975struct bna_rx_event_cbfn {
976 /* Optional */
977 void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *);
978 void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *);
979 void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *);
980 void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *);
981 /* Mandatory */
982 void (*rx_cleanup_cbfn)(struct bnad *, struct bna_ccb *);
983 void (*rx_post_cbfn)(struct bnad *, struct bna_rcb *);
984};
985
986/* Rx module - keeps track of free, active rx objects */
987struct bna_rx_mod {
988 struct bna *bna; /* back pointer to parent */
989 struct bna_rx *rx; /* BFI_MAX_RXQ entries */
990 struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */
991 struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */
992
993 struct list_head rx_free_q;
994 struct list_head rx_active_q;
995 int rx_free_count;
996
997 struct list_head rxp_free_q;
998 int rxp_free_count;
999
1000 struct list_head rxq_free_q;
1001 int rxq_free_count;
1002
1003 enum bna_rx_mod_flags flags;
1004
1005 /* callback for bna_rx_mod_stop() */
1006 void (*stop_cbfn)(struct bna_port *port,
1007 enum bna_cb_status status);
1008
1009 struct bfa_wc rx_stop_wc;
1010 u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX];
1011 u32 rxf_bmap[2];
1012};
1013
1014/**
1015 *
1016 * CAM
1017 *
1018 */
1019
1020struct bna_ucam_mod {
1021 struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
1022 struct list_head free_q;
1023
1024 struct bna *bna;
1025};
1026
1027struct bna_mcam_mod {
1028 struct bna_mac *mcmac; /* BFI_MAX_MCMAC entries */
1029 struct list_head free_q;
1030
1031 struct bna *bna;
1032};
1033
1034/**
1035 *
1036 * Statistics
1037 *
1038 */
1039
1040struct bna_tx_stats {
1041 int tx_state;
1042 int tx_flags;
1043 int num_txqs;
1044 u32 txq_bmap[2];
1045 int txf_id;
1046};
1047
1048struct bna_rx_stats {
1049 int rx_state;
1050 int rx_flags;
1051 int num_rxps;
1052 int num_rxqs;
1053 u32 rxq_bmap[2];
1054 u32 cq_bmap[2];
1055 int rxf_id;
1056 int rxf_state;
1057 int rxf_oper_state;
1058 int num_active_ucast;
1059 int num_active_mcast;
1060 int rxmode_active;
1061 int vlan_filter_status;
1062 u32 vlan_filter_table[(BFI_MAX_VLAN + 1) / 32];
1063 int rss_status;
1064 int hds_status;
1065};
1066
1067struct bna_sw_stats {
1068 int device_state;
1069 int port_state;
1070 int port_flags;
1071 int llport_state;
1072 int priority;
1073 int num_active_tx;
1074 int num_active_rx;
1075 struct bna_tx_stats tx_stats[BFI_MAX_TXQ];
1076 struct bna_rx_stats rx_stats[BFI_MAX_RXQ];
1077};
1078
1079struct bna_stats {
1080 u32 txf_bmap[2];
1081 u32 rxf_bmap[2];
1082 struct bfi_ll_stats *hw_stats;
1083 struct bna_sw_stats *sw_stats;
1084};
1085
1086/**
1087 *
1088 * BNA
1089 *
1090 */
1091
1092struct bna {
1093 struct bfa_pcidev pcidev;
1094
1095 int port_num;
1096
1097 struct bna_chip_regs regs;
1098
1099 struct bna_dma_addr hw_stats_dma;
1100 struct bna_stats stats;
1101
1102 struct bna_device device;
1103 struct bfa_cee cee;
1104
1105 struct bna_mbox_mod mbox_mod;
1106
1107 struct bna_port port;
1108
1109 struct bna_tx_mod tx_mod;
1110
1111 struct bna_rx_mod rx_mod;
1112
1113 struct bna_ib_mod ib_mod;
1114
1115 struct bna_ucam_mod ucam_mod;
1116 struct bna_mcam_mod mcam_mod;
1117
1118 struct bna_rit_mod rit_mod;
1119
1120 int rxf_default_id;
1121 int rxf_promisc_id;
1122
1123 struct bna_mbox_qe mbox_qe;
1124
1125 struct bnad *bnad;
1126};
1127
1128#endif /* __BNA_TYPES_H__ */
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
new file mode 100644
index 000000000000..e380c0e88f4f
--- /dev/null
+++ b/drivers/net/bna/bnad.c
@@ -0,0 +1,3267 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include <linux/netdevice.h>
19#include <linux/skbuff.h>
20#include <linux/etherdevice.h>
21#include <linux/in.h>
22#include <linux/ethtool.h>
23#include <linux/if_vlan.h>
24#include <linux/if_ether.h>
25#include <linux/ip.h>
26
27#include "bnad.h"
28#include "bna.h"
29#include "cna.h"
30
31DEFINE_MUTEX(bnad_fwimg_mutex);
32
33/*
34 * Module params
35 */
36static uint bnad_msix_disable;
37module_param(bnad_msix_disable, uint, 0444);
38MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
39
40static uint bnad_ioc_auto_recover = 1;
41module_param(bnad_ioc_auto_recover, uint, 0444);
42MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
43
44/*
45 * Global variables
46 */
47u32 bnad_rxqs_per_cq = 2;
48
49const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
50
51/*
52 * Local MACROS
53 */
54#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
55
56#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
57
58#define BNAD_GET_MBOX_IRQ(_bnad) \
59 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
60 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
61 ((_bnad)->pcidev->irq))
62
63#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
64do { \
65 (_res_info)->res_type = BNA_RES_T_MEM; \
66 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
67 (_res_info)->res_u.mem_info.num = (_num); \
68 (_res_info)->res_u.mem_info.len = \
69 sizeof(struct bnad_unmap_q) + \
70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
71} while (0)
72
73/*
74 * Reinitialize completions in CQ, once Rx is taken down
75 */
76static void
77bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
78{
79 struct bna_cq_entry *cmpl, *next_cmpl;
80 unsigned int wi_range, wis = 0, ccb_prod = 0;
81 int i;
82
83 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
84 wi_range);
85
86 for (i = 0; i < ccb->q_depth; i++) {
87 wis++;
88 if (likely(--wi_range))
89 next_cmpl = cmpl + 1;
90 else {
91 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
92 wis = 0;
93 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
94 next_cmpl, wi_range);
95 }
96 cmpl->valid = 0;
97 cmpl = next_cmpl;
98 }
99}
100
101/*
102 * Frees all pending Tx Bufs
103 * At this point no activity is expected on the Q,
104 * so DMA unmap & freeing is fine.
105 */
106static void
107bnad_free_all_txbufs(struct bnad *bnad,
108 struct bna_tcb *tcb)
109{
110 u16 unmap_cons;
111 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
112 struct bnad_skb_unmap *unmap_array;
113 struct sk_buff *skb = NULL;
114 int i;
115
116 unmap_array = unmap_q->unmap_array;
117
118 unmap_cons = 0;
119 while (unmap_cons < unmap_q->q_depth) {
120 skb = unmap_array[unmap_cons].skb;
121 if (!skb) {
122 unmap_cons++;
123 continue;
124 }
125 unmap_array[unmap_cons].skb = NULL;
126
127 pci_unmap_single(bnad->pcidev,
128 pci_unmap_addr(&unmap_array[unmap_cons],
129 dma_addr), skb_headlen(skb),
130 PCI_DMA_TODEVICE);
131
132 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
133 unmap_cons++;
134 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
135 pci_unmap_page(bnad->pcidev,
136 pci_unmap_addr(&unmap_array[unmap_cons],
137 dma_addr),
138 skb_shinfo(skb)->frags[i].size,
139 PCI_DMA_TODEVICE);
140 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
141 0);
142 unmap_cons++;
143 }
144 dev_kfree_skb_any(skb);
145 }
146}
147
148/* Data Path Handlers */
149
150/*
151 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
152 * Can be called in a) Interrupt context
153 * b) Sending context
154 * c) Tasklet context
155 */
156static u32
157bnad_free_txbufs(struct bnad *bnad,
158 struct bna_tcb *tcb)
159{
160 u32 sent_packets = 0, sent_bytes = 0;
161 u16 wis, unmap_cons, updated_hw_cons;
162 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
163 struct bnad_skb_unmap *unmap_array;
164 struct sk_buff *skb;
165 int i;
166
167 /*
168 * Just return if TX is stopped. This check is useful
169 * when bnad_free_txbufs() runs out of a tasklet scheduled
170 * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
171 * but this routine runs actually after the cleanup has been
172 * executed.
173 */
174 if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
175 return 0;
176
177 updated_hw_cons = *(tcb->hw_consumer_index);
178
179 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
180 updated_hw_cons, tcb->q_depth);
181
182 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
183
184 unmap_array = unmap_q->unmap_array;
185 unmap_cons = unmap_q->consumer_index;
186
187 prefetch(&unmap_array[unmap_cons + 1]);
188 while (wis) {
189 skb = unmap_array[unmap_cons].skb;
190
191 unmap_array[unmap_cons].skb = NULL;
192
193 sent_packets++;
194 sent_bytes += skb->len;
195 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
196
197 pci_unmap_single(bnad->pcidev,
198 pci_unmap_addr(&unmap_array[unmap_cons],
199 dma_addr), skb_headlen(skb),
200 PCI_DMA_TODEVICE);
201 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
202 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
203
204 prefetch(&unmap_array[unmap_cons + 1]);
205 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
206 prefetch(&unmap_array[unmap_cons + 1]);
207
208 pci_unmap_page(bnad->pcidev,
209 pci_unmap_addr(&unmap_array[unmap_cons],
210 dma_addr),
211 skb_shinfo(skb)->frags[i].size,
212 PCI_DMA_TODEVICE);
213 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
214 0);
215 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
216 }
217 dev_kfree_skb_any(skb);
218 }
219
220 /* Update consumer pointers. */
221 tcb->consumer_index = updated_hw_cons;
222 unmap_q->consumer_index = unmap_cons;
223
224 tcb->txq->tx_packets += sent_packets;
225 tcb->txq->tx_bytes += sent_bytes;
226
227 return sent_packets;
228}
229
230/* Tx Free Tasklet function */
231/* Frees for all the tcb's in all the Tx's */
232/*
233 * Scheduled from sending context, so that
234 * the fat Tx lock is not held for too long
235 * in the sending context.
236 */
237static void
238bnad_tx_free_tasklet(unsigned long bnad_ptr)
239{
240 struct bnad *bnad = (struct bnad *)bnad_ptr;
241 struct bna_tcb *tcb;
242 u32 acked;
243 int i, j;
244
245 for (i = 0; i < bnad->num_tx; i++) {
246 for (j = 0; j < bnad->num_txq_per_tx; j++) {
247 tcb = bnad->tx_info[i].tcb[j];
248 if (!tcb)
249 continue;
250 if (((u16) (*tcb->hw_consumer_index) !=
251 tcb->consumer_index) &&
252 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
253 &tcb->flags))) {
254 acked = bnad_free_txbufs(bnad, tcb);
255 bna_ib_ack(tcb->i_dbell, acked);
256 smp_mb__before_clear_bit();
257 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
258 }
259 }
260 }
261}
262
263static u32
264bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
265{
266 struct net_device *netdev = bnad->netdev;
267 u32 sent;
268
269 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
270 return 0;
271
272 sent = bnad_free_txbufs(bnad, tcb);
273 if (sent) {
274 if (netif_queue_stopped(netdev) &&
275 netif_carrier_ok(netdev) &&
276 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
277 BNAD_NETIF_WAKE_THRESHOLD) {
278 netif_wake_queue(netdev);
279 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
280 }
281 bna_ib_ack(tcb->i_dbell, sent);
282 } else
283 bna_ib_ack(tcb->i_dbell, 0);
284
285 smp_mb__before_clear_bit();
286 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
287
288 return sent;
289}
290
291/* MSIX Tx Completion Handler */
292static irqreturn_t
293bnad_msix_tx(int irq, void *data)
294{
295 struct bna_tcb *tcb = (struct bna_tcb *)data;
296 struct bnad *bnad = tcb->bnad;
297
298 bnad_tx(bnad, tcb);
299
300 return IRQ_HANDLED;
301}
302
303static void
304bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
305{
306 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
307
308 rcb->producer_index = 0;
309 rcb->consumer_index = 0;
310
311 unmap_q->producer_index = 0;
312 unmap_q->consumer_index = 0;
313}
314
315static void
316bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
317{
318 struct bnad_unmap_q *unmap_q;
319 struct sk_buff *skb;
320
321 unmap_q = rcb->unmap_q;
322 while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
323 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
324 BUG_ON(!(skb));
325 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
326 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
327 unmap_array[unmap_q->consumer_index],
328 dma_addr), rcb->rxq->buffer_size +
329 NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
330 dev_kfree_skb(skb);
331 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
332 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
333 }
334
335 bnad_reset_rcb(bnad, rcb);
336}
337
338static void
339bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
340{
341 u16 to_alloc, alloced, unmap_prod, wi_range;
342 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
343 struct bnad_skb_unmap *unmap_array;
344 struct bna_rxq_entry *rxent;
345 struct sk_buff *skb;
346 dma_addr_t dma_addr;
347
348 alloced = 0;
349 to_alloc =
350 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
351
352 unmap_array = unmap_q->unmap_array;
353 unmap_prod = unmap_q->producer_index;
354
355 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
356
357 while (to_alloc--) {
358 if (!wi_range) {
359 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
360 wi_range);
361 }
362 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
363 GFP_ATOMIC);
364 if (unlikely(!skb)) {
365 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
366 goto finishing;
367 }
368 skb->dev = bnad->netdev;
369 skb_reserve(skb, NET_IP_ALIGN);
370 unmap_array[unmap_prod].skb = skb;
371 dma_addr = pci_map_single(bnad->pcidev, skb->data,
372 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
373 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
374 dma_addr);
375 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
376 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
377
378 rxent++;
379 wi_range--;
380 alloced++;
381 }
382
383finishing:
384 if (likely(alloced)) {
385 unmap_q->producer_index = unmap_prod;
386 rcb->producer_index = unmap_prod;
387 smp_mb();
388 bna_rxq_prod_indx_doorbell(rcb);
389 }
390}
391
392/*
393 * Locking is required in the enable path
394 * because it is called from a napi poll
395 * context, where the bna_lock is not held
396 * unlike the IRQ context.
397 */
398static void
399bnad_enable_txrx_irqs(struct bnad *bnad)
400{
401 struct bna_tcb *tcb;
402 struct bna_ccb *ccb;
403 int i, j;
404 unsigned long flags;
405
406 spin_lock_irqsave(&bnad->bna_lock, flags);
407 for (i = 0; i < bnad->num_tx; i++) {
408 for (j = 0; j < bnad->num_txq_per_tx; j++) {
409 tcb = bnad->tx_info[i].tcb[j];
410 bna_ib_coalescing_timer_set(tcb->i_dbell,
411 tcb->txq->ib->ib_config.coalescing_timeo);
412 bna_ib_ack(tcb->i_dbell, 0);
413 }
414 }
415
416 for (i = 0; i < bnad->num_rx; i++) {
417 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
418 ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
419 bnad_enable_rx_irq_unsafe(ccb);
420 }
421 }
422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
423}
424
425static inline void
426bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
427{
428 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
429
430 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
431 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
432 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
433 bnad_alloc_n_post_rxbufs(bnad, rcb);
434 smp_mb__before_clear_bit();
435 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
436 }
437}
438
439static u32
440bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
441{
442 struct bna_cq_entry *cmpl, *next_cmpl;
443 struct bna_rcb *rcb = NULL;
444 unsigned int wi_range, packets = 0, wis = 0;
445 struct bnad_unmap_q *unmap_q;
446 struct sk_buff *skb;
447 u32 flags;
448 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
449 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
450
451 prefetch(bnad->netdev);
452 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
453 wi_range);
454 BUG_ON(!(wi_range <= ccb->q_depth));
455 while (cmpl->valid && packets < budget) {
456 packets++;
457 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
458
459 if (qid0 == cmpl->rxq_id)
460 rcb = ccb->rcb[0];
461 else
462 rcb = ccb->rcb[1];
463
464 unmap_q = rcb->unmap_q;
465
466 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
467 BUG_ON(!(skb));
468 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
469 pci_unmap_single(bnad->pcidev,
470 pci_unmap_addr(&unmap_q->
471 unmap_array[unmap_q->
472 consumer_index],
473 dma_addr),
474 rcb->rxq->buffer_size,
475 PCI_DMA_FROMDEVICE);
476 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
477
478 /* Should be more efficient ? Performance ? */
479 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
480
481 wis++;
482 if (likely(--wi_range))
483 next_cmpl = cmpl + 1;
484 else {
485 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
486 wis = 0;
487 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
488 next_cmpl, wi_range);
489 BUG_ON(!(wi_range <= ccb->q_depth));
490 }
491 prefetch(next_cmpl);
492
493 flags = ntohl(cmpl->flags);
494 if (unlikely
495 (flags &
496 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
497 BNA_CQ_EF_TOO_LONG))) {
498 dev_kfree_skb_any(skb);
499 rcb->rxq->rx_packets_with_error++;
500 goto next;
501 }
502
503 skb_put(skb, ntohs(cmpl->length));
504 if (likely
505 (bnad->rx_csum &&
506 (((flags & BNA_CQ_EF_IPV4) &&
507 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
508 (flags & BNA_CQ_EF_IPV6)) &&
509 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
510 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
511 skb->ip_summed = CHECKSUM_UNNECESSARY;
512 else
513 skb_checksum_none_assert(skb);
514
515 rcb->rxq->rx_packets++;
516 rcb->rxq->rx_bytes += skb->len;
517 skb->protocol = eth_type_trans(skb, bnad->netdev);
518
519 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
520 struct bnad_rx_ctrl *rx_ctrl =
521 (struct bnad_rx_ctrl *)ccb->ctrl;
522 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
523 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
524 ntohs(cmpl->vlan_tag), skb);
525 else
526 vlan_hwaccel_receive_skb(skb,
527 bnad->vlan_grp,
528 ntohs(cmpl->vlan_tag));
529
530 } else { /* Not VLAN tagged/stripped */
531 struct bnad_rx_ctrl *rx_ctrl =
532 (struct bnad_rx_ctrl *)ccb->ctrl;
533 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
534 napi_gro_receive(&rx_ctrl->napi, skb);
535 else
536 netif_receive_skb(skb);
537 }
538
539next:
540 cmpl->valid = 0;
541 cmpl = next_cmpl;
542 }
543
544 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
545
546 if (likely(ccb)) {
547 bna_ib_ack(ccb->i_dbell, packets);
548 bnad_refill_rxq(bnad, ccb->rcb[0]);
549 if (ccb->rcb[1])
550 bnad_refill_rxq(bnad, ccb->rcb[1]);
551 } else
552 bna_ib_ack(ccb->i_dbell, 0);
553
554 return packets;
555}
556
557static void
558bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
559{
560 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
561 bna_ib_ack(ccb->i_dbell, 0);
562}
563
564static void
565bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
566{
567 spin_lock_irq(&bnad->bna_lock); /* Because of polling context */
568 bnad_enable_rx_irq_unsafe(ccb);
569 spin_unlock_irq(&bnad->bna_lock);
570}
571
572static void
573bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
574{
575 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
576 if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
577 bnad_disable_rx_irq(bnad, ccb);
578 __napi_schedule((&rx_ctrl->napi));
579 }
580 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
581}
582
583/* MSIX Rx Path Handler */
584static irqreturn_t
585bnad_msix_rx(int irq, void *data)
586{
587 struct bna_ccb *ccb = (struct bna_ccb *)data;
588 struct bnad *bnad = ccb->bnad;
589
590 bnad_netif_rx_schedule_poll(bnad, ccb);
591
592 return IRQ_HANDLED;
593}
594
595/* Interrupt handlers */
596
597/* Mbox Interrupt Handlers */
598static irqreturn_t
599bnad_msix_mbox_handler(int irq, void *data)
600{
601 u32 intr_status;
602 unsigned long flags;
603 struct net_device *netdev = data;
604 struct bnad *bnad;
605
606 bnad = netdev_priv(netdev);
607
608 /* BNA_ISR_GET(bnad); Inc Ref count */
609 spin_lock_irqsave(&bnad->bna_lock, flags);
610
611 bna_intr_status_get(&bnad->bna, intr_status);
612
613 if (BNA_IS_MBOX_ERR_INTR(intr_status))
614 bna_mbox_handler(&bnad->bna, intr_status);
615
616 spin_unlock_irqrestore(&bnad->bna_lock, flags);
617
618 /* BNAD_ISR_PUT(bnad); Dec Ref count */
619 return IRQ_HANDLED;
620}
621
622static irqreturn_t
623bnad_isr(int irq, void *data)
624{
625 int i, j;
626 u32 intr_status;
627 unsigned long flags;
628 struct net_device *netdev = data;
629 struct bnad *bnad = netdev_priv(netdev);
630 struct bnad_rx_info *rx_info;
631 struct bnad_rx_ctrl *rx_ctrl;
632
633 spin_lock_irqsave(&bnad->bna_lock, flags);
634
635 bna_intr_status_get(&bnad->bna, intr_status);
636 if (!intr_status) {
637 spin_unlock_irqrestore(&bnad->bna_lock, flags);
638 return IRQ_NONE;
639 }
640
641 if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
642 bna_mbox_handler(&bnad->bna, intr_status);
643 if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
644 spin_unlock_irqrestore(&bnad->bna_lock, flags);
645 goto done;
646 }
647 }
648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
649
650 /* Process data interrupts */
651 for (i = 0; i < bnad->num_rx; i++) {
652 rx_info = &bnad->rx_info[i];
653 if (!rx_info->rx)
654 continue;
655 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
656 rx_ctrl = &rx_info->rx_ctrl[j];
657 if (rx_ctrl->ccb)
658 bnad_netif_rx_schedule_poll(bnad,
659 rx_ctrl->ccb);
660 }
661 }
662done:
663 return IRQ_HANDLED;
664}
665
666/*
667 * Called in interrupt / callback context
668 * with bna_lock held, so cfg_flags access is OK
669 */
670static void
671bnad_enable_mbox_irq(struct bnad *bnad)
672{
673 int irq = BNAD_GET_MBOX_IRQ(bnad);
674
675 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
676 return;
677
678 if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
679 enable_irq(irq);
680 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
681}
682
683/*
684 * Called with bnad->bna_lock held b'cos of
685 * bnad->cfg_flags access.
686 */
687void
688bnad_disable_mbox_irq(struct bnad *bnad)
689{
690 int irq = BNAD_GET_MBOX_IRQ(bnad);
691
692 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
693 return;
694
695 if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
696 disable_irq_nosync(irq);
697 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
698}
699
700/* Control Path Handlers */
701
702/* Callbacks */
703void
704bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
705{
706 bnad_enable_mbox_irq(bnad);
707}
708
709void
710bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
711{
712 bnad_disable_mbox_irq(bnad);
713}
714
715void
716bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
717{
718 complete(&bnad->bnad_completions.ioc_comp);
719 bnad->bnad_completions.ioc_comp_status = status;
720}
721
722void
723bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
724{
725 complete(&bnad->bnad_completions.ioc_comp);
726 bnad->bnad_completions.ioc_comp_status = status;
727}
728
729static void
730bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
731{
732 struct bnad *bnad = (struct bnad *)arg;
733
734 complete(&bnad->bnad_completions.port_comp);
735
736 netif_carrier_off(bnad->netdev);
737}
738
739void
740bnad_cb_port_link_status(struct bnad *bnad,
741 enum bna_link_status link_status)
742{
743 bool link_up = 0;
744
745 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
746
747 if (link_status == BNA_CEE_UP) {
748 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
749 BNAD_UPDATE_CTR(bnad, cee_up);
750 } else
751 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
752
753 if (link_up) {
754 if (!netif_carrier_ok(bnad->netdev)) {
755 pr_warn("bna: %s link up\n",
756 bnad->netdev->name);
757 netif_carrier_on(bnad->netdev);
758 BNAD_UPDATE_CTR(bnad, link_toggle);
759 if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
760 /* Force an immediate Transmit Schedule */
761 pr_info("bna: %s TX_STARTED\n",
762 bnad->netdev->name);
763 netif_wake_queue(bnad->netdev);
764 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
765 } else {
766 netif_stop_queue(bnad->netdev);
767 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
768 }
769 }
770 } else {
771 if (netif_carrier_ok(bnad->netdev)) {
772 pr_warn("bna: %s link down\n",
773 bnad->netdev->name);
774 netif_carrier_off(bnad->netdev);
775 BNAD_UPDATE_CTR(bnad, link_toggle);
776 }
777 }
778}
779
780static void
781bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
782 enum bna_cb_status status)
783{
784 struct bnad *bnad = (struct bnad *)arg;
785
786 complete(&bnad->bnad_completions.tx_comp);
787}
788
789static void
790bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
791{
792 struct bnad_tx_info *tx_info =
793 (struct bnad_tx_info *)tcb->txq->tx->priv;
794 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
795
796 tx_info->tcb[tcb->id] = tcb;
797 unmap_q->producer_index = 0;
798 unmap_q->consumer_index = 0;
799 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
800}
801
802static void
803bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
804{
805 struct bnad_tx_info *tx_info =
806 (struct bnad_tx_info *)tcb->txq->tx->priv;
807
808 tx_info->tcb[tcb->id] = NULL;
809}
810
811static void
812bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
813{
814 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
815
816 unmap_q->producer_index = 0;
817 unmap_q->consumer_index = 0;
818 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
819}
820
821static void
822bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
823{
824 struct bnad_rx_info *rx_info =
825 (struct bnad_rx_info *)ccb->cq->rx->priv;
826
827 rx_info->rx_ctrl[ccb->id].ccb = ccb;
828 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
829}
830
831static void
832bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
833{
834 struct bnad_rx_info *rx_info =
835 (struct bnad_rx_info *)ccb->cq->rx->priv;
836
837 rx_info->rx_ctrl[ccb->id].ccb = NULL;
838}
839
840static void
841bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
842{
843 struct bnad_tx_info *tx_info =
844 (struct bnad_tx_info *)tcb->txq->tx->priv;
845
846 if (tx_info != &bnad->tx_info[0])
847 return;
848
849 clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
850 netif_stop_queue(bnad->netdev);
851 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
852}
853
854static void
855bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
856{
857 if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
858 return;
859
860 if (netif_carrier_ok(bnad->netdev)) {
861 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
862 netif_wake_queue(bnad->netdev);
863 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
864 }
865}
866
867static void
868bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
869{
870 struct bnad_unmap_q *unmap_q;
871
872 if (!tcb || (!tcb->unmap_q))
873 return;
874
875 unmap_q = tcb->unmap_q;
876 if (!unmap_q->unmap_array)
877 return;
878
879 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
880 return;
881
882 bnad_free_all_txbufs(bnad, tcb);
883
884 unmap_q->producer_index = 0;
885 unmap_q->consumer_index = 0;
886
887 smp_mb__before_clear_bit();
888 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
889}
890
891static void
892bnad_cb_rx_cleanup(struct bnad *bnad,
893 struct bna_ccb *ccb)
894{
895 bnad_cq_cmpl_init(bnad, ccb);
896
897 bnad_free_rxbufs(bnad, ccb->rcb[0]);
898 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
899
900 if (ccb->rcb[1]) {
901 bnad_free_rxbufs(bnad, ccb->rcb[1]);
902 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
903 }
904}
905
906static void
907bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
908{
909 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
910
911 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
912
913 /* Now allocate & post buffers for this RCB */
914 /* !!Allocation in callback context */
915 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
916 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
917 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
918 bnad_alloc_n_post_rxbufs(bnad, rcb);
919 smp_mb__before_clear_bit();
920 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
921 }
922}
923
924static void
925bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
926 enum bna_cb_status status)
927{
928 struct bnad *bnad = (struct bnad *)arg;
929
930 complete(&bnad->bnad_completions.rx_comp);
931}
932
933static void
934bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
935 enum bna_cb_status status)
936{
937 bnad->bnad_completions.mcast_comp_status = status;
938 complete(&bnad->bnad_completions.mcast_comp);
939}
940
941void
942bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
943 struct bna_stats *stats)
944{
945 if (status == BNA_CB_SUCCESS)
946 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
947
948 if (!netif_running(bnad->netdev) ||
949 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
950 return;
951
952 mod_timer(&bnad->stats_timer,
953 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
954}
955
956void
957bnad_cb_stats_clr(struct bnad *bnad)
958{
959}
960
961/* Resource allocation, free functions */
962
963static void
964bnad_mem_free(struct bnad *bnad,
965 struct bna_mem_info *mem_info)
966{
967 int i;
968 dma_addr_t dma_pa;
969
970 if (mem_info->mdl == NULL)
971 return;
972
973 for (i = 0; i < mem_info->num; i++) {
974 if (mem_info->mdl[i].kva != NULL) {
975 if (mem_info->mem_type == BNA_MEM_T_DMA) {
976 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
977 dma_pa);
978 pci_free_consistent(bnad->pcidev,
979 mem_info->mdl[i].len,
980 mem_info->mdl[i].kva, dma_pa);
981 } else
982 kfree(mem_info->mdl[i].kva);
983 }
984 }
985 kfree(mem_info->mdl);
986 mem_info->mdl = NULL;
987}
988
989static int
990bnad_mem_alloc(struct bnad *bnad,
991 struct bna_mem_info *mem_info)
992{
993 int i;
994 dma_addr_t dma_pa;
995
996 if ((mem_info->num == 0) || (mem_info->len == 0)) {
997 mem_info->mdl = NULL;
998 return 0;
999 }
1000
1001 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1002 GFP_KERNEL);
1003 if (mem_info->mdl == NULL)
1004 return -ENOMEM;
1005
1006 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1007 for (i = 0; i < mem_info->num; i++) {
1008 mem_info->mdl[i].len = mem_info->len;
1009 mem_info->mdl[i].kva =
1010 pci_alloc_consistent(bnad->pcidev,
1011 mem_info->len, &dma_pa);
1012
1013 if (mem_info->mdl[i].kva == NULL)
1014 goto err_return;
1015
1016 BNA_SET_DMA_ADDR(dma_pa,
1017 &(mem_info->mdl[i].dma));
1018 }
1019 } else {
1020 for (i = 0; i < mem_info->num; i++) {
1021 mem_info->mdl[i].len = mem_info->len;
1022 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1023 GFP_KERNEL);
1024 if (mem_info->mdl[i].kva == NULL)
1025 goto err_return;
1026 }
1027 }
1028
1029 return 0;
1030
1031err_return:
1032 bnad_mem_free(bnad, mem_info);
1033 return -ENOMEM;
1034}
1035
1036/* Free IRQ for Mailbox */
1037static void
1038bnad_mbox_irq_free(struct bnad *bnad,
1039 struct bna_intr_info *intr_info)
1040{
1041 int irq;
1042 unsigned long flags;
1043
1044 if (intr_info->idl == NULL)
1045 return;
1046
1047 spin_lock_irqsave(&bnad->bna_lock, flags);
1048
1049 bnad_disable_mbox_irq(bnad);
1050
1051 irq = BNAD_GET_MBOX_IRQ(bnad);
1052 free_irq(irq, bnad->netdev);
1053
1054 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1055
1056 kfree(intr_info->idl);
1057}
1058
1059/*
1060 * Allocates IRQ for Mailbox, but keep it disabled
1061 * This will be enabled once we get the mbox enable callback
1062 * from bna
1063 */
1064static int
1065bnad_mbox_irq_alloc(struct bnad *bnad,
1066 struct bna_intr_info *intr_info)
1067{
1068 int err;
1069 unsigned long flags;
1070 u32 irq;
1071 irq_handler_t irq_handler;
1072
1073 /* Mbox should use only 1 vector */
1074
1075 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1076 if (!intr_info->idl)
1077 return -ENOMEM;
1078
1079 spin_lock_irqsave(&bnad->bna_lock, flags);
1080 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1081 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1082 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1083 flags = 0;
1084 intr_info->intr_type = BNA_INTR_T_MSIX;
1085 intr_info->idl[0].vector = bnad->msix_num - 1;
1086 } else {
1087 irq_handler = (irq_handler_t)bnad_isr;
1088 irq = bnad->pcidev->irq;
1089 flags = IRQF_SHARED;
1090 intr_info->intr_type = BNA_INTR_T_INTX;
1091 /* intr_info->idl.vector = 0 ? */
1092 }
1093 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1094
1095 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1096
1097 err = request_irq(irq, irq_handler, flags,
1098 bnad->mbox_irq_name, bnad->netdev);
1099 if (err) {
1100 kfree(intr_info->idl);
1101 intr_info->idl = NULL;
1102 return err;
1103 }
1104
1105 spin_lock_irqsave(&bnad->bna_lock, flags);
1106 bnad_disable_mbox_irq(bnad);
1107 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1108 return 0;
1109}
1110
1111static void
1112bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1113{
1114 kfree(intr_info->idl);
1115 intr_info->idl = NULL;
1116}
1117
1118/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1119static int
1120bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1121 uint txrx_id, struct bna_intr_info *intr_info)
1122{
1123 int i, vector_start = 0;
1124 u32 cfg_flags;
1125 unsigned long flags;
1126
1127 spin_lock_irqsave(&bnad->bna_lock, flags);
1128 cfg_flags = bnad->cfg_flags;
1129 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1130
1131 if (cfg_flags & BNAD_CF_MSIX) {
1132 intr_info->intr_type = BNA_INTR_T_MSIX;
1133 intr_info->idl = kcalloc(intr_info->num,
1134 sizeof(struct bna_intr_descr),
1135 GFP_KERNEL);
1136 if (!intr_info->idl)
1137 return -ENOMEM;
1138
1139 switch (src) {
1140 case BNAD_INTR_TX:
1141 vector_start = txrx_id;
1142 break;
1143
1144 case BNAD_INTR_RX:
1145 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1146 txrx_id;
1147 break;
1148
1149 default:
1150 BUG();
1151 }
1152
1153 for (i = 0; i < intr_info->num; i++)
1154 intr_info->idl[i].vector = vector_start + i;
1155 } else {
1156 intr_info->intr_type = BNA_INTR_T_INTX;
1157 intr_info->num = 1;
1158 intr_info->idl = kcalloc(intr_info->num,
1159 sizeof(struct bna_intr_descr),
1160 GFP_KERNEL);
1161 if (!intr_info->idl)
1162 return -ENOMEM;
1163
1164 switch (src) {
1165 case BNAD_INTR_TX:
1166 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1167 break;
1168
1169 case BNAD_INTR_RX:
1170 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1171 break;
1172 }
1173 }
1174 return 0;
1175}
1176
1177/**
1178 * NOTE: Should be called for MSIX only
1179 * Unregisters Tx MSIX vector(s) from the kernel
1180 */
1181static void
1182bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1183 int num_txqs)
1184{
1185 int i;
1186 int vector_num;
1187
1188 for (i = 0; i < num_txqs; i++) {
1189 if (tx_info->tcb[i] == NULL)
1190 continue;
1191
1192 vector_num = tx_info->tcb[i]->intr_vector;
1193 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1194 }
1195}
1196
1197/**
1198 * NOTE: Should be called for MSIX only
1199 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1200 */
1201static int
1202bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1203 uint tx_id, int num_txqs)
1204{
1205 int i;
1206 int err;
1207 int vector_num;
1208
1209 for (i = 0; i < num_txqs; i++) {
1210 vector_num = tx_info->tcb[i]->intr_vector;
1211 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1212 tx_id + tx_info->tcb[i]->id);
1213 err = request_irq(bnad->msix_table[vector_num].vector,
1214 (irq_handler_t)bnad_msix_tx, 0,
1215 tx_info->tcb[i]->name,
1216 tx_info->tcb[i]);
1217 if (err)
1218 goto err_return;
1219 }
1220
1221 return 0;
1222
1223err_return:
1224 if (i > 0)
1225 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1226 return -1;
1227}
1228
1229/**
1230 * NOTE: Should be called for MSIX only
1231 * Unregisters Rx MSIX vector(s) from the kernel
1232 */
1233static void
1234bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1235 int num_rxps)
1236{
1237 int i;
1238 int vector_num;
1239
1240 for (i = 0; i < num_rxps; i++) {
1241 if (rx_info->rx_ctrl[i].ccb == NULL)
1242 continue;
1243
1244 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1245 free_irq(bnad->msix_table[vector_num].vector,
1246 rx_info->rx_ctrl[i].ccb);
1247 }
1248}
1249
1250/**
1251 * NOTE: Should be called for MSIX only
1252 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1253 */
1254static int
1255bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1256 uint rx_id, int num_rxps)
1257{
1258 int i;
1259 int err;
1260 int vector_num;
1261
1262 for (i = 0; i < num_rxps; i++) {
1263 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1264 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1265 bnad->netdev->name,
1266 rx_id + rx_info->rx_ctrl[i].ccb->id);
1267 err = request_irq(bnad->msix_table[vector_num].vector,
1268 (irq_handler_t)bnad_msix_rx, 0,
1269 rx_info->rx_ctrl[i].ccb->name,
1270 rx_info->rx_ctrl[i].ccb);
1271 if (err)
1272 goto err_return;
1273 }
1274
1275 return 0;
1276
1277err_return:
1278 if (i > 0)
1279 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1280 return -1;
1281}
1282
1283/* Free Tx object Resources */
1284static void
1285bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1286{
1287 int i;
1288
1289 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1290 if (res_info[i].res_type == BNA_RES_T_MEM)
1291 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1292 else if (res_info[i].res_type == BNA_RES_T_INTR)
1293 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1294 }
1295}
1296
1297/* Allocates memory and interrupt resources for Tx object */
1298static int
1299bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1300 uint tx_id)
1301{
1302 int i, err = 0;
1303
1304 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1305 if (res_info[i].res_type == BNA_RES_T_MEM)
1306 err = bnad_mem_alloc(bnad,
1307 &res_info[i].res_u.mem_info);
1308 else if (res_info[i].res_type == BNA_RES_T_INTR)
1309 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1310 &res_info[i].res_u.intr_info);
1311 if (err)
1312 goto err_return;
1313 }
1314 return 0;
1315
1316err_return:
1317 bnad_tx_res_free(bnad, res_info);
1318 return err;
1319}
1320
1321/* Free Rx object Resources */
1322static void
1323bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1324{
1325 int i;
1326
1327 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1328 if (res_info[i].res_type == BNA_RES_T_MEM)
1329 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1330 else if (res_info[i].res_type == BNA_RES_T_INTR)
1331 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1332 }
1333}
1334
1335/* Allocates memory and interrupt resources for Rx object */
1336static int
1337bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1338 uint rx_id)
1339{
1340 int i, err = 0;
1341
1342 /* All memory needs to be allocated before setup_ccbs */
1343 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1344 if (res_info[i].res_type == BNA_RES_T_MEM)
1345 err = bnad_mem_alloc(bnad,
1346 &res_info[i].res_u.mem_info);
1347 else if (res_info[i].res_type == BNA_RES_T_INTR)
1348 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1349 &res_info[i].res_u.intr_info);
1350 if (err)
1351 goto err_return;
1352 }
1353 return 0;
1354
1355err_return:
1356 bnad_rx_res_free(bnad, res_info);
1357 return err;
1358}
1359
1360/* Timer callbacks */
1361/* a) IOC timer */
1362static void
1363bnad_ioc_timeout(unsigned long data)
1364{
1365 struct bnad *bnad = (struct bnad *)data;
1366 unsigned long flags;
1367
1368 spin_lock_irqsave(&bnad->bna_lock, flags);
1369 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1370 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1371}
1372
1373static void
1374bnad_ioc_hb_check(unsigned long data)
1375{
1376 struct bnad *bnad = (struct bnad *)data;
1377 unsigned long flags;
1378
1379 spin_lock_irqsave(&bnad->bna_lock, flags);
1380 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1381 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1382}
1383
1384static void
1385bnad_ioc_sem_timeout(unsigned long data)
1386{
1387 struct bnad *bnad = (struct bnad *)data;
1388 unsigned long flags;
1389
1390 spin_lock_irqsave(&bnad->bna_lock, flags);
1391 bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
1392 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1393}
1394
1395/*
1396 * All timer routines use bnad->bna_lock to protect against
1397 * the following race, which may occur in case of no locking:
1398 * Time CPU m CPU n
1399 * 0 1 = test_bit
1400 * 1 clear_bit
1401 * 2 del_timer_sync
1402 * 3 mod_timer
1403 */
1404
1405/* b) Dynamic Interrupt Moderation Timer */
1406static void
1407bnad_dim_timeout(unsigned long data)
1408{
1409 struct bnad *bnad = (struct bnad *)data;
1410 struct bnad_rx_info *rx_info;
1411 struct bnad_rx_ctrl *rx_ctrl;
1412 int i, j;
1413 unsigned long flags;
1414
1415 if (!netif_carrier_ok(bnad->netdev))
1416 return;
1417
1418 spin_lock_irqsave(&bnad->bna_lock, flags);
1419 for (i = 0; i < bnad->num_rx; i++) {
1420 rx_info = &bnad->rx_info[i];
1421 if (!rx_info->rx)
1422 continue;
1423 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1424 rx_ctrl = &rx_info->rx_ctrl[j];
1425 if (!rx_ctrl->ccb)
1426 continue;
1427 bna_rx_dim_update(rx_ctrl->ccb);
1428 }
1429 }
1430
1431 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1432 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1433 mod_timer(&bnad->dim_timer,
1434 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1435 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1436}
1437
1438/* c) Statistics Timer */
1439static void
1440bnad_stats_timeout(unsigned long data)
1441{
1442 struct bnad *bnad = (struct bnad *)data;
1443 unsigned long flags;
1444
1445 if (!netif_running(bnad->netdev) ||
1446 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1447 return;
1448
1449 spin_lock_irqsave(&bnad->bna_lock, flags);
1450 bna_stats_get(&bnad->bna);
1451 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1452}
1453
1454/*
1455 * Set up timer for DIM
1456 * Called with bnad->bna_lock held
1457 */
1458void
1459bnad_dim_timer_start(struct bnad *bnad)
1460{
1461 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1462 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1463 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1464 (unsigned long)bnad);
1465 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1466 mod_timer(&bnad->dim_timer,
1467 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1468 }
1469}
1470
1471/*
1472 * Set up timer for statistics
1473 * Called with mutex_lock(&bnad->conf_mutex) held
1474 */
1475static void
1476bnad_stats_timer_start(struct bnad *bnad)
1477{
1478 unsigned long flags;
1479
1480 spin_lock_irqsave(&bnad->bna_lock, flags);
1481 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1482 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1483 (unsigned long)bnad);
1484 mod_timer(&bnad->stats_timer,
1485 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1486 }
1487 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1488
1489}
1490
1491/*
1492 * Stops the stats timer
1493 * Called with mutex_lock(&bnad->conf_mutex) held
1494 */
1495static void
1496bnad_stats_timer_stop(struct bnad *bnad)
1497{
1498 int to_del = 0;
1499 unsigned long flags;
1500
1501 spin_lock_irqsave(&bnad->bna_lock, flags);
1502 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1503 to_del = 1;
1504 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1505 if (to_del)
1506 del_timer_sync(&bnad->stats_timer);
1507}
1508
1509/* Utilities */
1510
1511static void
1512bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1513{
1514 int i = 1; /* Index 0 has broadcast address */
1515 struct netdev_hw_addr *mc_addr;
1516
1517 netdev_for_each_mc_addr(mc_addr, netdev) {
1518 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1519 ETH_ALEN);
1520 i++;
1521 }
1522}
1523
1524static int
1525bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1526{
1527 struct bnad_rx_ctrl *rx_ctrl =
1528 container_of(napi, struct bnad_rx_ctrl, napi);
1529 struct bna_ccb *ccb;
1530 struct bnad *bnad;
1531 int rcvd = 0;
1532
1533 ccb = rx_ctrl->ccb;
1534
1535 bnad = ccb->bnad;
1536
1537 if (!netif_carrier_ok(bnad->netdev))
1538 goto poll_exit;
1539
1540 rcvd = bnad_poll_cq(bnad, ccb, budget);
1541 if (rcvd == budget)
1542 return rcvd;
1543
1544poll_exit:
1545 napi_complete((napi));
1546
1547 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1548
1549 bnad_enable_rx_irq(bnad, ccb);
1550 return rcvd;
1551}
1552
1553static int
1554bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
1555{
1556 struct bnad_rx_ctrl *rx_ctrl =
1557 container_of(napi, struct bnad_rx_ctrl, napi);
1558 struct bna_ccb *ccb;
1559 struct bnad *bnad;
1560 int rcvd = 0;
1561 int i, j;
1562
1563 ccb = rx_ctrl->ccb;
1564
1565 bnad = ccb->bnad;
1566
1567 if (!netif_carrier_ok(bnad->netdev))
1568 goto poll_exit;
1569
1570 /* Handle Tx Completions, if any */
1571 for (i = 0; i < bnad->num_tx; i++) {
1572 for (j = 0; j < bnad->num_txq_per_tx; j++)
1573 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
1574 }
1575
1576 /* Handle Rx Completions */
1577 rcvd = bnad_poll_cq(bnad, ccb, budget);
1578 if (rcvd == budget)
1579 return rcvd;
1580poll_exit:
1581 napi_complete((napi));
1582
1583 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1584
1585 bnad_enable_txrx_irqs(bnad);
1586 return rcvd;
1587}
1588
1589static void
1590bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1591{
1592 int (*napi_poll) (struct napi_struct *, int);
1593 struct bnad_rx_ctrl *rx_ctrl;
1594 int i;
1595 unsigned long flags;
1596
1597 spin_lock_irqsave(&bnad->bna_lock, flags);
1598 if (bnad->cfg_flags & BNAD_CF_MSIX)
1599 napi_poll = bnad_napi_poll_rx;
1600 else
1601 napi_poll = bnad_napi_poll_txrx;
1602 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1603
1604 /* Initialize & enable NAPI */
1605 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1606 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1607 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1608 napi_poll, 64);
1609 napi_enable(&rx_ctrl->napi);
1610 }
1611}
1612
1613static void
1614bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1615{
1616 int i;
1617
1618 /* First disable and then clean up */
1619 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1620 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1621 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1622 }
1623}
1624
1625/* Should be held with conf_lock held */
1626void
1627bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1628{
1629 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1630 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1631 unsigned long flags;
1632
1633 if (!tx_info->tx)
1634 return;
1635
1636 init_completion(&bnad->bnad_completions.tx_comp);
1637 spin_lock_irqsave(&bnad->bna_lock, flags);
1638 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1639 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1640 wait_for_completion(&bnad->bnad_completions.tx_comp);
1641
1642 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1643 bnad_tx_msix_unregister(bnad, tx_info,
1644 bnad->num_txq_per_tx);
1645
1646 spin_lock_irqsave(&bnad->bna_lock, flags);
1647 bna_tx_destroy(tx_info->tx);
1648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1649
1650 tx_info->tx = NULL;
1651
1652 if (0 == tx_id)
1653 tasklet_kill(&bnad->tx_free_tasklet);
1654
1655 bnad_tx_res_free(bnad, res_info);
1656}
1657
1658/* Should be held with conf_lock held */
1659int
1660bnad_setup_tx(struct bnad *bnad, uint tx_id)
1661{
1662 int err;
1663 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1664 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1665 struct bna_intr_info *intr_info =
1666 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1667 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1668 struct bna_tx_event_cbfn tx_cbfn;
1669 struct bna_tx *tx;
1670 unsigned long flags;
1671
1672 /* Initialize the Tx object configuration */
1673 tx_config->num_txq = bnad->num_txq_per_tx;
1674 tx_config->txq_depth = bnad->txq_depth;
1675 tx_config->tx_type = BNA_TX_T_REGULAR;
1676
1677 /* Initialize the tx event handlers */
1678 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1679 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1680 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1681 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1682 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1683
1684 /* Get BNA's resource requirement for one tx object */
1685 spin_lock_irqsave(&bnad->bna_lock, flags);
1686 bna_tx_res_req(bnad->num_txq_per_tx,
1687 bnad->txq_depth, res_info);
1688 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1689
1690 /* Fill Unmap Q memory requirements */
1691 BNAD_FILL_UNMAPQ_MEM_REQ(
1692 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1693 bnad->num_txq_per_tx,
1694 BNAD_TX_UNMAPQ_DEPTH);
1695
1696 /* Allocate resources */
1697 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1698 if (err)
1699 return err;
1700
1701 /* Ask BNA to create one Tx object, supplying required resources */
1702 spin_lock_irqsave(&bnad->bna_lock, flags);
1703 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1704 tx_info);
1705 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1706 if (!tx)
1707 goto err_return;
1708 tx_info->tx = tx;
1709
1710 /* Register ISR for the Tx object */
1711 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1712 err = bnad_tx_msix_register(bnad, tx_info,
1713 tx_id, bnad->num_txq_per_tx);
1714 if (err)
1715 goto err_return;
1716 }
1717
1718 spin_lock_irqsave(&bnad->bna_lock, flags);
1719 bna_tx_enable(tx);
1720 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1721
1722 return 0;
1723
1724err_return:
1725 bnad_tx_res_free(bnad, res_info);
1726 return err;
1727}
1728
1729/* Setup the rx config for bna_rx_create */
1730/* bnad decides the configuration */
1731static void
1732bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1733{
1734 rx_config->rx_type = BNA_RX_T_REGULAR;
1735 rx_config->num_paths = bnad->num_rxp_per_rx;
1736
1737 if (bnad->num_rxp_per_rx > 1) {
1738 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1739 rx_config->rss_config.hash_type =
1740 (BFI_RSS_T_V4_TCP |
1741 BFI_RSS_T_V6_TCP |
1742 BFI_RSS_T_V4_IP |
1743 BFI_RSS_T_V6_IP);
1744 rx_config->rss_config.hash_mask =
1745 bnad->num_rxp_per_rx - 1;
1746 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1747 sizeof(rx_config->rss_config.toeplitz_hash_key));
1748 } else {
1749 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1750 memset(&rx_config->rss_config, 0,
1751 sizeof(rx_config->rss_config));
1752 }
1753 rx_config->rxp_type = BNA_RXP_SLR;
1754 rx_config->q_depth = bnad->rxq_depth;
1755
1756 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1757
1758 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1759}
1760
1761/* Called with mutex_lock(&bnad->conf_mutex) held */
1762void
1763bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1764{
1765 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1766 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1767 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1768 unsigned long flags;
1769 int dim_timer_del = 0;
1770
1771 if (!rx_info->rx)
1772 return;
1773
1774 if (0 == rx_id) {
1775 spin_lock_irqsave(&bnad->bna_lock, flags);
1776 dim_timer_del = bnad_dim_timer_running(bnad);
1777 if (dim_timer_del)
1778 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1779 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1780 if (dim_timer_del)
1781 del_timer_sync(&bnad->dim_timer);
1782 }
1783
1784 bnad_napi_disable(bnad, rx_id);
1785
1786 init_completion(&bnad->bnad_completions.rx_comp);
1787 spin_lock_irqsave(&bnad->bna_lock, flags);
1788 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1789 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1790 wait_for_completion(&bnad->bnad_completions.rx_comp);
1791
1792 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1793 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1794
1795 spin_lock_irqsave(&bnad->bna_lock, flags);
1796 bna_rx_destroy(rx_info->rx);
1797 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1798
1799 rx_info->rx = NULL;
1800
1801 bnad_rx_res_free(bnad, res_info);
1802}
1803
1804/* Called with mutex_lock(&bnad->conf_mutex) held */
1805int
1806bnad_setup_rx(struct bnad *bnad, uint rx_id)
1807{
1808 int err;
1809 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1810 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1811 struct bna_intr_info *intr_info =
1812 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1813 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1814 struct bna_rx_event_cbfn rx_cbfn;
1815 struct bna_rx *rx;
1816 unsigned long flags;
1817
1818 /* Initialize the Rx object configuration */
1819 bnad_init_rx_config(bnad, rx_config);
1820
1821 /* Initialize the Rx event handlers */
1822 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1823 rx_cbfn.rcb_destroy_cbfn = NULL;
1824 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1825 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1826 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1827 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1828
1829 /* Get BNA's resource requirement for one Rx object */
1830 spin_lock_irqsave(&bnad->bna_lock, flags);
1831 bna_rx_res_req(rx_config, res_info);
1832 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1833
1834 /* Fill Unmap Q memory requirements */
1835 BNAD_FILL_UNMAPQ_MEM_REQ(
1836 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1837 rx_config->num_paths +
1838 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1839 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1840
1841 /* Allocate resource */
1842 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1843 if (err)
1844 return err;
1845
1846 /* Ask BNA to create one Rx object, supplying required resources */
1847 spin_lock_irqsave(&bnad->bna_lock, flags);
1848 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1849 rx_info);
1850 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1851 if (!rx)
1852 goto err_return;
1853 rx_info->rx = rx;
1854
1855 /* Register ISR for the Rx object */
1856 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1857 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1858 rx_config->num_paths);
1859 if (err)
1860 goto err_return;
1861 }
1862
1863 /* Enable NAPI */
1864 bnad_napi_enable(bnad, rx_id);
1865
1866 spin_lock_irqsave(&bnad->bna_lock, flags);
1867 if (0 == rx_id) {
1868 /* Set up Dynamic Interrupt Moderation Vector */
1869 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1870 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1871
1872 /* Enable VLAN filtering only on the default Rx */
1873 bna_rx_vlanfilter_enable(rx);
1874
1875 /* Start the DIM timer */
1876 bnad_dim_timer_start(bnad);
1877 }
1878
1879 bna_rx_enable(rx);
1880 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1881
1882 return 0;
1883
1884err_return:
1885 bnad_cleanup_rx(bnad, rx_id);
1886 return err;
1887}
1888
1889/* Called with conf_lock & bnad->bna_lock held */
1890void
1891bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1892{
1893 struct bnad_tx_info *tx_info;
1894
1895 tx_info = &bnad->tx_info[0];
1896 if (!tx_info->tx)
1897 return;
1898
1899 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1900}
1901
1902/* Called with conf_lock & bnad->bna_lock held */
1903void
1904bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1905{
1906 struct bnad_rx_info *rx_info;
1907 int i;
1908
1909 for (i = 0; i < bnad->num_rx; i++) {
1910 rx_info = &bnad->rx_info[i];
1911 if (!rx_info->rx)
1912 continue;
1913 bna_rx_coalescing_timeo_set(rx_info->rx,
1914 bnad->rx_coalescing_timeo);
1915 }
1916}
1917
1918/*
1919 * Called with bnad->bna_lock held
1920 */
1921static int
1922bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1923{
1924 int ret;
1925
1926 if (!is_valid_ether_addr(mac_addr))
1927 return -EADDRNOTAVAIL;
1928
1929 /* If datapath is down, pretend everything went through */
1930 if (!bnad->rx_info[0].rx)
1931 return 0;
1932
1933 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1934 if (ret != BNA_CB_SUCCESS)
1935 return -EADDRNOTAVAIL;
1936
1937 return 0;
1938}
1939
1940/* Should be called with conf_lock held */
1941static int
1942bnad_enable_default_bcast(struct bnad *bnad)
1943{
1944 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1945 int ret;
1946 unsigned long flags;
1947
1948 init_completion(&bnad->bnad_completions.mcast_comp);
1949
1950 spin_lock_irqsave(&bnad->bna_lock, flags);
1951 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1952 bnad_cb_rx_mcast_add);
1953 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1954
1955 if (ret == BNA_CB_SUCCESS)
1956 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1957 else
1958 return -ENODEV;
1959
1960 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1961 return -ENODEV;
1962
1963 return 0;
1964}
1965
1966/* Statistics utilities */
1967void
1968bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
1969{
1970 int i, j;
1971
1972 for (i = 0; i < bnad->num_rx; i++) {
1973 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1974 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1975 stats->rx_packets += bnad->rx_info[i].
1976 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
1977 stats->rx_bytes += bnad->rx_info[i].
1978 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
1979 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1980 bnad->rx_info[i].rx_ctrl[j].ccb->
1981 rcb[1]->rxq) {
1982 stats->rx_packets +=
1983 bnad->rx_info[i].rx_ctrl[j].
1984 ccb->rcb[1]->rxq->rx_packets;
1985 stats->rx_bytes +=
1986 bnad->rx_info[i].rx_ctrl[j].
1987 ccb->rcb[1]->rxq->rx_bytes;
1988 }
1989 }
1990 }
1991 }
1992 for (i = 0; i < bnad->num_tx; i++) {
1993 for (j = 0; j < bnad->num_txq_per_tx; j++) {
1994 if (bnad->tx_info[i].tcb[j]) {
1995 stats->tx_packets +=
1996 bnad->tx_info[i].tcb[j]->txq->tx_packets;
1997 stats->tx_bytes +=
1998 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
1999 }
2000 }
2001 }
2002}
2003
2004/*
2005 * Must be called with the bna_lock held.
2006 */
2007void
2008bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2009{
2010 struct bfi_ll_stats_mac *mac_stats;
2011 u64 bmap;
2012 int i;
2013
2014 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2015 stats->rx_errors =
2016 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2017 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2018 mac_stats->rx_undersize;
2019 stats->tx_errors = mac_stats->tx_fcs_error +
2020 mac_stats->tx_undersize;
2021 stats->rx_dropped = mac_stats->rx_drop;
2022 stats->tx_dropped = mac_stats->tx_drop;
2023 stats->multicast = mac_stats->rx_multicast;
2024 stats->collisions = mac_stats->tx_total_collision;
2025
2026 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2027
2028 /* receive ring buffer overflow ?? */
2029
2030 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2031 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2032 /* recv'r fifo overrun */
2033 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2034 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2035 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2036 if (bmap & 1) {
2037 stats->rx_fifo_errors +=
2038 bnad->stats.bna_stats->
2039 hw_stats->rxf_stats[i].frame_drops;
2040 break;
2041 }
2042 bmap >>= 1;
2043 }
2044}
2045
2046static void
2047bnad_mbox_irq_sync(struct bnad *bnad)
2048{
2049 u32 irq;
2050 unsigned long flags;
2051
2052 spin_lock_irqsave(&bnad->bna_lock, flags);
2053 if (bnad->cfg_flags & BNAD_CF_MSIX)
2054 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2055 else
2056 irq = bnad->pcidev->irq;
2057 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2058
2059 synchronize_irq(irq);
2060}
2061
2062/* Utility used by bnad_start_xmit, for doing TSO */
2063static int
2064bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2065{
2066 int err;
2067
2068 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2069 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2070 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2071 if (skb_header_cloned(skb)) {
2072 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2073 if (err) {
2074 BNAD_UPDATE_CTR(bnad, tso_err);
2075 return err;
2076 }
2077 }
2078
2079 /*
2080 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2081 * excluding the length field.
2082 */
2083 if (skb->protocol == htons(ETH_P_IP)) {
2084 struct iphdr *iph = ip_hdr(skb);
2085
2086 /* Do we really need these? */
2087 iph->tot_len = 0;
2088 iph->check = 0;
2089
2090 tcp_hdr(skb)->check =
2091 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2092 IPPROTO_TCP, 0);
2093 BNAD_UPDATE_CTR(bnad, tso4);
2094 } else {
2095 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2096
2097 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2098 ipv6h->payload_len = 0;
2099 tcp_hdr(skb)->check =
2100 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2101 IPPROTO_TCP, 0);
2102 BNAD_UPDATE_CTR(bnad, tso6);
2103 }
2104
2105 return 0;
2106}
2107
2108/*
2109 * Initialize Q numbers depending on Rx Paths
2110 * Called with bnad->bna_lock held, because of cfg_flags
2111 * access.
2112 */
2113static void
2114bnad_q_num_init(struct bnad *bnad)
2115{
2116 int rxps;
2117
2118 rxps = min((uint)num_online_cpus(),
2119 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2120
2121 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2122 rxps = 1; /* INTx */
2123
2124 bnad->num_rx = 1;
2125 bnad->num_tx = 1;
2126 bnad->num_rxp_per_rx = rxps;
2127 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2128}
2129
2130/*
2131 * Adjusts the Q numbers, given a number of msix vectors
2132 * Give preference to RSS as opposed to Tx priority Queues,
2133 * in such a case, just use 1 Tx Q
2134 * Called with bnad->bna_lock held b'cos of cfg_flags access
2135 */
2136static void
2137bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2138{
2139 bnad->num_txq_per_tx = 1;
2140 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2141 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2142 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2143 bnad->num_rxp_per_rx = msix_vectors -
2144 (bnad->num_tx * bnad->num_txq_per_tx) -
2145 BNAD_MAILBOX_MSIX_VECTORS;
2146 } else
2147 bnad->num_rxp_per_rx = 1;
2148}
2149
2150static void
2151bnad_set_netdev_perm_addr(struct bnad *bnad)
2152{
2153 struct net_device *netdev = bnad->netdev;
2154
2155 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
2156 if (is_zero_ether_addr(netdev->dev_addr))
2157 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
2158}
2159
2160/* Enable / disable device */
2161static void
2162bnad_device_disable(struct bnad *bnad)
2163{
2164 unsigned long flags;
2165
2166 init_completion(&bnad->bnad_completions.ioc_comp);
2167
2168 spin_lock_irqsave(&bnad->bna_lock, flags);
2169 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2170 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2171
2172 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2173
2174}
2175
2176static int
2177bnad_device_enable(struct bnad *bnad)
2178{
2179 int err = 0;
2180 unsigned long flags;
2181
2182 init_completion(&bnad->bnad_completions.ioc_comp);
2183
2184 spin_lock_irqsave(&bnad->bna_lock, flags);
2185 bna_device_enable(&bnad->bna.device);
2186 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2187
2188 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2189
2190 if (bnad->bnad_completions.ioc_comp_status)
2191 err = bnad->bnad_completions.ioc_comp_status;
2192
2193 return err;
2194}
2195
2196/* Free BNA resources */
2197static void
2198bnad_res_free(struct bnad *bnad)
2199{
2200 int i;
2201 struct bna_res_info *res_info = &bnad->res_info[0];
2202
2203 for (i = 0; i < BNA_RES_T_MAX; i++) {
2204 if (res_info[i].res_type == BNA_RES_T_MEM)
2205 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2206 else
2207 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2208 }
2209}
2210
2211/* Allocates memory and interrupt resources for BNA */
2212static int
2213bnad_res_alloc(struct bnad *bnad)
2214{
2215 int i, err;
2216 struct bna_res_info *res_info = &bnad->res_info[0];
2217
2218 for (i = 0; i < BNA_RES_T_MAX; i++) {
2219 if (res_info[i].res_type == BNA_RES_T_MEM)
2220 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2221 else
2222 err = bnad_mbox_irq_alloc(bnad,
2223 &res_info[i].res_u.intr_info);
2224 if (err)
2225 goto err_return;
2226 }
2227 return 0;
2228
2229err_return:
2230 bnad_res_free(bnad);
2231 return err;
2232}
2233
2234/* Interrupt enable / disable */
2235static void
2236bnad_enable_msix(struct bnad *bnad)
2237{
2238 int i, ret;
2239 u32 tot_msix_num;
2240 unsigned long flags;
2241
2242 spin_lock_irqsave(&bnad->bna_lock, flags);
2243 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2244 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2245 return;
2246 }
2247 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2248
2249 if (bnad->msix_table)
2250 return;
2251
2252 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2253
2254 bnad->msix_table =
2255 kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2256
2257 if (!bnad->msix_table)
2258 goto intx_mode;
2259
2260 for (i = 0; i < tot_msix_num; i++)
2261 bnad->msix_table[i].entry = i;
2262
2263 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num);
2264 if (ret > 0) {
2265 /* Not enough MSI-X vectors. */
2266
2267 spin_lock_irqsave(&bnad->bna_lock, flags);
2268 /* ret = #of vectors that we got */
2269 bnad_q_num_adjust(bnad, ret);
2270 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2271
2272 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2273 + (bnad->num_rx
2274 * bnad->num_rxp_per_rx) +
2275 BNAD_MAILBOX_MSIX_VECTORS;
2276 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2277
2278 /* Try once more with adjusted numbers */
2279 /* If this fails, fall back to INTx */
2280 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2281 tot_msix_num);
2282 if (ret)
2283 goto intx_mode;
2284
2285 } else if (ret < 0)
2286 goto intx_mode;
2287 return;
2288
2289intx_mode:
2290
2291 kfree(bnad->msix_table);
2292 bnad->msix_table = NULL;
2293 bnad->msix_num = 0;
2294 bnad->msix_diag_num = 0;
2295 spin_lock_irqsave(&bnad->bna_lock, flags);
2296 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2297 bnad_q_num_init(bnad);
2298 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2299}
2300
2301static void
2302bnad_disable_msix(struct bnad *bnad)
2303{
2304 u32 cfg_flags;
2305 unsigned long flags;
2306
2307 spin_lock_irqsave(&bnad->bna_lock, flags);
2308 cfg_flags = bnad->cfg_flags;
2309 if (bnad->cfg_flags & BNAD_CF_MSIX)
2310 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2311 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2312
2313 if (cfg_flags & BNAD_CF_MSIX) {
2314 pci_disable_msix(bnad->pcidev);
2315 kfree(bnad->msix_table);
2316 bnad->msix_table = NULL;
2317 }
2318}
2319
2320/* Netdev entry points */
2321static int
2322bnad_open(struct net_device *netdev)
2323{
2324 int err;
2325 struct bnad *bnad = netdev_priv(netdev);
2326 struct bna_pause_config pause_config;
2327 int mtu;
2328 unsigned long flags;
2329
2330 mutex_lock(&bnad->conf_mutex);
2331
2332 /* Tx */
2333 err = bnad_setup_tx(bnad, 0);
2334 if (err)
2335 goto err_return;
2336
2337 /* Rx */
2338 err = bnad_setup_rx(bnad, 0);
2339 if (err)
2340 goto cleanup_tx;
2341
2342 /* Port */
2343 pause_config.tx_pause = 0;
2344 pause_config.rx_pause = 0;
2345
2346 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2347
2348 spin_lock_irqsave(&bnad->bna_lock, flags);
2349 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2350 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2351 bna_port_enable(&bnad->bna.port);
2352 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2353
2354 /* Enable broadcast */
2355 bnad_enable_default_bcast(bnad);
2356
2357 /* Set the UCAST address */
2358 spin_lock_irqsave(&bnad->bna_lock, flags);
2359 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2360 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2361
2362 /* Start the stats timer */
2363 bnad_stats_timer_start(bnad);
2364
2365 mutex_unlock(&bnad->conf_mutex);
2366
2367 return 0;
2368
2369cleanup_tx:
2370 bnad_cleanup_tx(bnad, 0);
2371
2372err_return:
2373 mutex_unlock(&bnad->conf_mutex);
2374 return err;
2375}
2376
2377static int
2378bnad_stop(struct net_device *netdev)
2379{
2380 struct bnad *bnad = netdev_priv(netdev);
2381 unsigned long flags;
2382
2383 mutex_lock(&bnad->conf_mutex);
2384
2385 /* Stop the stats timer */
2386 bnad_stats_timer_stop(bnad);
2387
2388 init_completion(&bnad->bnad_completions.port_comp);
2389
2390 spin_lock_irqsave(&bnad->bna_lock, flags);
2391 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2392 bnad_cb_port_disabled);
2393 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2394
2395 wait_for_completion(&bnad->bnad_completions.port_comp);
2396
2397 bnad_cleanup_tx(bnad, 0);
2398 bnad_cleanup_rx(bnad, 0);
2399
2400 /* Synchronize mailbox IRQ */
2401 bnad_mbox_irq_sync(bnad);
2402
2403 mutex_unlock(&bnad->conf_mutex);
2404
2405 return 0;
2406}
2407
2408/* TX */
2409/*
2410 * bnad_start_xmit : Netdev entry point for Transmit
2411 * Called under lock held by net_device
2412 */
2413static netdev_tx_t
2414bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2415{
2416 struct bnad *bnad = netdev_priv(netdev);
2417
2418 u16 txq_prod, vlan_tag = 0;
2419 u32 unmap_prod, wis, wis_used, wi_range;
2420 u32 vectors, vect_id, i, acked;
2421 u32 tx_id;
2422 int err;
2423
2424 struct bnad_tx_info *tx_info;
2425 struct bna_tcb *tcb;
2426 struct bnad_unmap_q *unmap_q;
2427 dma_addr_t dma_addr;
2428 struct bna_txq_entry *txqent;
2429 bna_txq_wi_ctrl_flag_t flags;
2430
2431 if (unlikely
2432 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2433 dev_kfree_skb(skb);
2434 return NETDEV_TX_OK;
2435 }
2436
2437 /*
2438 * Takes care of the Tx that is scheduled between clearing the flag
2439 * and the netif_stop_queue() call.
2440 */
2441 if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
2442 dev_kfree_skb(skb);
2443 return NETDEV_TX_OK;
2444 }
2445
2446 tx_id = 0;
2447
2448 tx_info = &bnad->tx_info[tx_id];
2449 tcb = tx_info->tcb[tx_id];
2450 unmap_q = tcb->unmap_q;
2451
2452 vectors = 1 + skb_shinfo(skb)->nr_frags;
2453 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2454 dev_kfree_skb(skb);
2455 return NETDEV_TX_OK;
2456 }
2457 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2458 acked = 0;
2459 if (unlikely
2460 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2461 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2462 if ((u16) (*tcb->hw_consumer_index) !=
2463 tcb->consumer_index &&
2464 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2465 acked = bnad_free_txbufs(bnad, tcb);
2466 bna_ib_ack(tcb->i_dbell, acked);
2467 smp_mb__before_clear_bit();
2468 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2469 } else {
2470 netif_stop_queue(netdev);
2471 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2472 }
2473
2474 smp_mb();
2475 /*
2476 * Check again to deal with race condition between
2477 * netif_stop_queue here, and netif_wake_queue in
2478 * interrupt handler which is not inside netif tx lock.
2479 */
2480 if (likely
2481 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2482 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2483 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2484 return NETDEV_TX_BUSY;
2485 } else {
2486 netif_wake_queue(netdev);
2487 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2488 }
2489 }
2490
2491 unmap_prod = unmap_q->producer_index;
2492 wis_used = 1;
2493 vect_id = 0;
2494 flags = 0;
2495
2496 txq_prod = tcb->producer_index;
2497 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2498 BUG_ON(!(wi_range <= tcb->q_depth));
2499 txqent->hdr.wi.reserved = 0;
2500 txqent->hdr.wi.num_vectors = vectors;
2501 txqent->hdr.wi.opcode =
2502 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2503 BNA_TXQ_WI_SEND));
2504
2505 if (bnad->vlan_grp && vlan_tx_tag_present(skb)) {
2506 vlan_tag = (u16) vlan_tx_tag_get(skb);
2507 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2508 }
2509 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2510 vlan_tag =
2511 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2512 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2513 }
2514
2515 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2516
2517 if (skb_is_gso(skb)) {
2518 err = bnad_tso_prepare(bnad, skb);
2519 if (err) {
2520 dev_kfree_skb(skb);
2521 return NETDEV_TX_OK;
2522 }
2523 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2524 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2525 txqent->hdr.wi.l4_hdr_size_n_offset =
2526 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2527 (tcp_hdrlen(skb) >> 2,
2528 skb_transport_offset(skb)));
2529 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2530 u8 proto = 0;
2531
2532 txqent->hdr.wi.lso_mss = 0;
2533
2534 if (skb->protocol == htons(ETH_P_IP))
2535 proto = ip_hdr(skb)->protocol;
2536 else if (skb->protocol == htons(ETH_P_IPV6)) {
2537 /* nexthdr may not be TCP immediately. */
2538 proto = ipv6_hdr(skb)->nexthdr;
2539 }
2540 if (proto == IPPROTO_TCP) {
2541 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2542 txqent->hdr.wi.l4_hdr_size_n_offset =
2543 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2544 (0, skb_transport_offset(skb)));
2545
2546 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2547
2548 BUG_ON(!(skb_headlen(skb) >=
2549 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2550
2551 } else if (proto == IPPROTO_UDP) {
2552 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2553 txqent->hdr.wi.l4_hdr_size_n_offset =
2554 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2555 (0, skb_transport_offset(skb)));
2556
2557 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2558
2559 BUG_ON(!(skb_headlen(skb) >=
2560 skb_transport_offset(skb) +
2561 sizeof(struct udphdr)));
2562 } else {
2563 err = skb_checksum_help(skb);
2564 BNAD_UPDATE_CTR(bnad, csum_help);
2565 if (err) {
2566 dev_kfree_skb(skb);
2567 BNAD_UPDATE_CTR(bnad, csum_help_err);
2568 return NETDEV_TX_OK;
2569 }
2570 }
2571 } else {
2572 txqent->hdr.wi.lso_mss = 0;
2573 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2574 }
2575
2576 txqent->hdr.wi.flags = htons(flags);
2577
2578 txqent->hdr.wi.frame_length = htonl(skb->len);
2579
2580 unmap_q->unmap_array[unmap_prod].skb = skb;
2581 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2582 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2583 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
2584 PCI_DMA_TODEVICE);
2585 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2586 dma_addr);
2587
2588 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2589 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2590
2591 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2592 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2593 u32 size = frag->size;
2594
2595 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2596 vect_id = 0;
2597 if (--wi_range)
2598 txqent++;
2599 else {
2600 BNA_QE_INDX_ADD(txq_prod, wis_used,
2601 tcb->q_depth);
2602 wis_used = 0;
2603 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2604 txqent, wi_range);
2605 BUG_ON(!(wi_range <= tcb->q_depth));
2606 }
2607 wis_used++;
2608 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2609 }
2610
2611 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2612 txqent->vector[vect_id].length = htons(size);
2613 dma_addr =
2614 pci_map_page(bnad->pcidev, frag->page,
2615 frag->page_offset, size,
2616 PCI_DMA_TODEVICE);
2617 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2618 dma_addr);
2619 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2620 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2621 }
2622
2623 unmap_q->producer_index = unmap_prod;
2624 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2625 tcb->producer_index = txq_prod;
2626
2627 smp_mb();
2628 bna_txq_prod_indx_doorbell(tcb);
2629
2630 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2631 tasklet_schedule(&bnad->tx_free_tasklet);
2632
2633 return NETDEV_TX_OK;
2634}
2635
2636/*
2637 * Used spin_lock to synchronize reading of stats structures, which
2638 * is written by BNA under the same lock.
2639 */
2640static struct rtnl_link_stats64 *
2641bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2642{
2643 struct bnad *bnad = netdev_priv(netdev);
2644 unsigned long flags;
2645
2646 spin_lock_irqsave(&bnad->bna_lock, flags);
2647
2648 bnad_netdev_qstats_fill(bnad, stats);
2649 bnad_netdev_hwstats_fill(bnad, stats);
2650
2651 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2652
2653 return stats;
2654}
2655
2656static void
2657bnad_set_rx_mode(struct net_device *netdev)
2658{
2659 struct bnad *bnad = netdev_priv(netdev);
2660 u32 new_mask, valid_mask;
2661 unsigned long flags;
2662
2663 spin_lock_irqsave(&bnad->bna_lock, flags);
2664
2665 new_mask = valid_mask = 0;
2666
2667 if (netdev->flags & IFF_PROMISC) {
2668 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2669 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2670 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2671 bnad->cfg_flags |= BNAD_CF_PROMISC;
2672 }
2673 } else {
2674 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2675 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2676 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2677 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2678 }
2679 }
2680
2681 if (netdev->flags & IFF_ALLMULTI) {
2682 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2683 new_mask |= BNA_RXMODE_ALLMULTI;
2684 valid_mask |= BNA_RXMODE_ALLMULTI;
2685 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2686 }
2687 } else {
2688 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2689 new_mask &= ~BNA_RXMODE_ALLMULTI;
2690 valid_mask |= BNA_RXMODE_ALLMULTI;
2691 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2692 }
2693 }
2694
2695 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2696
2697 if (!netdev_mc_empty(netdev)) {
2698 u8 *mcaddr_list;
2699 int mc_count = netdev_mc_count(netdev);
2700
2701 /* Index 0 holds the broadcast address */
2702 mcaddr_list =
2703 kzalloc((mc_count + 1) * ETH_ALEN,
2704 GFP_ATOMIC);
2705 if (!mcaddr_list)
2706 goto unlock;
2707
2708 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2709
2710 /* Copy rest of the MC addresses */
2711 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2712
2713 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2714 mcaddr_list, NULL);
2715
2716 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2717 kfree(mcaddr_list);
2718 }
2719unlock:
2720 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2721}
2722
2723/*
2724 * bna_lock is used to sync writes to netdev->addr
2725 * conf_lock cannot be used since this call may be made
2726 * in a non-blocking context.
2727 */
2728static int
2729bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2730{
2731 int err;
2732 struct bnad *bnad = netdev_priv(netdev);
2733 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2734 unsigned long flags;
2735
2736 spin_lock_irqsave(&bnad->bna_lock, flags);
2737
2738 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2739
2740 if (!err)
2741 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2742
2743 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2744
2745 return err;
2746}
2747
2748static int
2749bnad_change_mtu(struct net_device *netdev, int new_mtu)
2750{
2751 int mtu, err = 0;
2752 unsigned long flags;
2753
2754 struct bnad *bnad = netdev_priv(netdev);
2755
2756 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2757 return -EINVAL;
2758
2759 mutex_lock(&bnad->conf_mutex);
2760
2761 netdev->mtu = new_mtu;
2762
2763 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2764
2765 spin_lock_irqsave(&bnad->bna_lock, flags);
2766 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2767 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2768
2769 mutex_unlock(&bnad->conf_mutex);
2770 return err;
2771}
2772
2773static void
2774bnad_vlan_rx_register(struct net_device *netdev,
2775 struct vlan_group *vlan_grp)
2776{
2777 struct bnad *bnad = netdev_priv(netdev);
2778
2779 mutex_lock(&bnad->conf_mutex);
2780 bnad->vlan_grp = vlan_grp;
2781 mutex_unlock(&bnad->conf_mutex);
2782}
2783
2784static void
2785bnad_vlan_rx_add_vid(struct net_device *netdev,
2786 unsigned short vid)
2787{
2788 struct bnad *bnad = netdev_priv(netdev);
2789 unsigned long flags;
2790
2791 if (!bnad->rx_info[0].rx)
2792 return;
2793
2794 mutex_lock(&bnad->conf_mutex);
2795
2796 spin_lock_irqsave(&bnad->bna_lock, flags);
2797 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2798 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2799
2800 mutex_unlock(&bnad->conf_mutex);
2801}
2802
2803static void
2804bnad_vlan_rx_kill_vid(struct net_device *netdev,
2805 unsigned short vid)
2806{
2807 struct bnad *bnad = netdev_priv(netdev);
2808 unsigned long flags;
2809
2810 if (!bnad->rx_info[0].rx)
2811 return;
2812
2813 mutex_lock(&bnad->conf_mutex);
2814
2815 spin_lock_irqsave(&bnad->bna_lock, flags);
2816 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2817 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2818
2819 mutex_unlock(&bnad->conf_mutex);
2820}
2821
2822#ifdef CONFIG_NET_POLL_CONTROLLER
2823static void
2824bnad_netpoll(struct net_device *netdev)
2825{
2826 struct bnad *bnad = netdev_priv(netdev);
2827 struct bnad_rx_info *rx_info;
2828 struct bnad_rx_ctrl *rx_ctrl;
2829 u32 curr_mask;
2830 int i, j;
2831
2832 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2833 bna_intx_disable(&bnad->bna, curr_mask);
2834 bnad_isr(bnad->pcidev->irq, netdev);
2835 bna_intx_enable(&bnad->bna, curr_mask);
2836 } else {
2837 for (i = 0; i < bnad->num_rx; i++) {
2838 rx_info = &bnad->rx_info[i];
2839 if (!rx_info->rx)
2840 continue;
2841 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2842 rx_ctrl = &rx_info->rx_ctrl[j];
2843 if (rx_ctrl->ccb) {
2844 bnad_disable_rx_irq(bnad,
2845 rx_ctrl->ccb);
2846 bnad_netif_rx_schedule_poll(bnad,
2847 rx_ctrl->ccb);
2848 }
2849 }
2850 }
2851 }
2852}
2853#endif
2854
2855static const struct net_device_ops bnad_netdev_ops = {
2856 .ndo_open = bnad_open,
2857 .ndo_stop = bnad_stop,
2858 .ndo_start_xmit = bnad_start_xmit,
2859 .ndo_get_stats64 = bnad_get_stats64,
2860 .ndo_set_rx_mode = bnad_set_rx_mode,
2861 .ndo_set_multicast_list = bnad_set_rx_mode,
2862 .ndo_validate_addr = eth_validate_addr,
2863 .ndo_set_mac_address = bnad_set_mac_address,
2864 .ndo_change_mtu = bnad_change_mtu,
2865 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2866 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2867 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2868#ifdef CONFIG_NET_POLL_CONTROLLER
2869 .ndo_poll_controller = bnad_netpoll
2870#endif
2871};
2872
2873static void
2874bnad_netdev_init(struct bnad *bnad, bool using_dac)
2875{
2876 struct net_device *netdev = bnad->netdev;
2877
2878 netdev->features |= NETIF_F_IPV6_CSUM;
2879 netdev->features |= NETIF_F_TSO;
2880 netdev->features |= NETIF_F_TSO6;
2881
2882 netdev->features |= NETIF_F_GRO;
2883 pr_warn("bna: GRO enabled, using kernel stack GRO\n");
2884
2885 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2886
2887 if (using_dac)
2888 netdev->features |= NETIF_F_HIGHDMA;
2889
2890 netdev->features |=
2891 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2892 NETIF_F_HW_VLAN_FILTER;
2893
2894 netdev->vlan_features = netdev->features;
2895 netdev->mem_start = bnad->mmio_start;
2896 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2897
2898 netdev->netdev_ops = &bnad_netdev_ops;
2899 bnad_set_ethtool_ops(netdev);
2900}
2901
2902/*
2903 * 1. Initialize the bnad structure
2904 * 2. Setup netdev pointer in pci_dev
2905 * 3. Initialze Tx free tasklet
2906 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2907 */
2908static int
2909bnad_init(struct bnad *bnad,
2910 struct pci_dev *pdev, struct net_device *netdev)
2911{
2912 unsigned long flags;
2913
2914 SET_NETDEV_DEV(netdev, &pdev->dev);
2915 pci_set_drvdata(pdev, netdev);
2916
2917 bnad->netdev = netdev;
2918 bnad->pcidev = pdev;
2919 bnad->mmio_start = pci_resource_start(pdev, 0);
2920 bnad->mmio_len = pci_resource_len(pdev, 0);
2921 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2922 if (!bnad->bar0) {
2923 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2924 pci_set_drvdata(pdev, NULL);
2925 return -ENOMEM;
2926 }
2927 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2928 (unsigned long long) bnad->mmio_len);
2929
2930 spin_lock_irqsave(&bnad->bna_lock, flags);
2931 if (!bnad_msix_disable)
2932 bnad->cfg_flags = BNAD_CF_MSIX;
2933
2934 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2935
2936 bnad_q_num_init(bnad);
2937 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2938
2939 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2940 (bnad->num_rx * bnad->num_rxp_per_rx) +
2941 BNAD_MAILBOX_MSIX_VECTORS;
2942 bnad->msix_diag_num = 2; /* 1 for Tx, 1 for Rx */
2943
2944 bnad->txq_depth = BNAD_TXQ_DEPTH;
2945 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2946 bnad->rx_csum = true;
2947
2948 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2949 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2950
2951 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2952 (unsigned long)bnad);
2953
2954 return 0;
2955}
2956
2957/*
2958 * Must be called after bnad_pci_uninit()
2959 * so that iounmap() and pci_set_drvdata(NULL)
2960 * happens only after PCI uninitialization.
2961 */
2962static void
2963bnad_uninit(struct bnad *bnad)
2964{
2965 if (bnad->bar0)
2966 iounmap(bnad->bar0);
2967 pci_set_drvdata(bnad->pcidev, NULL);
2968}
2969
2970/*
2971 * Initialize locks
2972 a) Per device mutes used for serializing configuration
2973 changes from OS interface
2974 b) spin lock used to protect bna state machine
2975 */
2976static void
2977bnad_lock_init(struct bnad *bnad)
2978{
2979 spin_lock_init(&bnad->bna_lock);
2980 mutex_init(&bnad->conf_mutex);
2981}
2982
2983static void
2984bnad_lock_uninit(struct bnad *bnad)
2985{
2986 mutex_destroy(&bnad->conf_mutex);
2987}
2988
2989/* PCI Initialization */
2990static int
2991bnad_pci_init(struct bnad *bnad,
2992 struct pci_dev *pdev, bool *using_dac)
2993{
2994 int err;
2995
2996 err = pci_enable_device(pdev);
2997 if (err)
2998 return err;
2999 err = pci_request_regions(pdev, BNAD_NAME);
3000 if (err)
3001 goto disable_device;
3002 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3003 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3004 *using_dac = 1;
3005 } else {
3006 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3007 if (err) {
3008 err = pci_set_consistent_dma_mask(pdev,
3009 DMA_BIT_MASK(32));
3010 if (err)
3011 goto release_regions;
3012 }
3013 *using_dac = 0;
3014 }
3015 pci_set_master(pdev);
3016 return 0;
3017
3018release_regions:
3019 pci_release_regions(pdev);
3020disable_device:
3021 pci_disable_device(pdev);
3022
3023 return err;
3024}
3025
3026static void
3027bnad_pci_uninit(struct pci_dev *pdev)
3028{
3029 pci_release_regions(pdev);
3030 pci_disable_device(pdev);
3031}
3032
3033static int __devinit
3034bnad_pci_probe(struct pci_dev *pdev,
3035 const struct pci_device_id *pcidev_id)
3036{
3037 bool using_dac;
3038 int err;
3039 struct bnad *bnad;
3040 struct bna *bna;
3041 struct net_device *netdev;
3042 struct bfa_pcidev pcidev_info;
3043 unsigned long flags;
3044
3045 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3046 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3047
3048 mutex_lock(&bnad_fwimg_mutex);
3049 if (!cna_get_firmware_buf(pdev)) {
3050 mutex_unlock(&bnad_fwimg_mutex);
3051 pr_warn("Failed to load Firmware Image!\n");
3052 return -ENODEV;
3053 }
3054 mutex_unlock(&bnad_fwimg_mutex);
3055
3056 /*
3057 * Allocates sizeof(struct net_device + struct bnad)
3058 * bnad = netdev->priv
3059 */
3060 netdev = alloc_etherdev(sizeof(struct bnad));
3061 if (!netdev) {
3062 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3063 err = -ENOMEM;
3064 return err;
3065 }
3066 bnad = netdev_priv(netdev);
3067
3068 /*
3069 * PCI initialization
3070 * Output : using_dac = 1 for 64 bit DMA
3071 * = 0 for 32 bit DMA
3072 */
3073 err = bnad_pci_init(bnad, pdev, &using_dac);
3074 if (err)
3075 goto free_netdev;
3076
3077 bnad_lock_init(bnad);
3078 /*
3079 * Initialize bnad structure
3080 * Setup relation between pci_dev & netdev
3081 * Init Tx free tasklet
3082 */
3083 err = bnad_init(bnad, pdev, netdev);
3084 if (err)
3085 goto pci_uninit;
3086 /* Initialize netdev structure, set up ethtool ops */
3087 bnad_netdev_init(bnad, using_dac);
3088
3089 bnad_enable_msix(bnad);
3090
3091 /* Get resource requirement form bna */
3092 bna_res_req(&bnad->res_info[0]);
3093
3094 /* Allocate resources from bna */
3095 err = bnad_res_alloc(bnad);
3096 if (err)
3097 goto free_netdev;
3098
3099 bna = &bnad->bna;
3100
3101 /* Setup pcidev_info for bna_init() */
3102 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3103 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3104 pcidev_info.device_id = bnad->pcidev->device;
3105 pcidev_info.pci_bar_kva = bnad->bar0;
3106
3107 mutex_lock(&bnad->conf_mutex);
3108
3109 spin_lock_irqsave(&bnad->bna_lock, flags);
3110 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3111
3112 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3113
3114 bnad->stats.bna_stats = &bna->stats;
3115
3116 /* Set up timers */
3117 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3118 ((unsigned long)bnad));
3119 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3120 ((unsigned long)bnad));
3121 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
3122 ((unsigned long)bnad));
3123
3124 /* Now start the timer before calling IOC */
3125 mod_timer(&bnad->bna.device.ioc.ioc_timer,
3126 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3127
3128 /*
3129 * Start the chip
3130 * Don't care even if err != 0, bna state machine will
3131 * deal with it
3132 */
3133 err = bnad_device_enable(bnad);
3134
3135 /* Get the burnt-in mac */
3136 spin_lock_irqsave(&bnad->bna_lock, flags);
3137 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3138 bnad_set_netdev_perm_addr(bnad);
3139 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3140
3141 mutex_unlock(&bnad->conf_mutex);
3142
3143 /*
3144 * Make sure the link appears down to the stack
3145 */
3146 netif_carrier_off(netdev);
3147
3148 /* Finally, reguister with net_device layer */
3149 err = register_netdev(netdev);
3150 if (err) {
3151 pr_err("BNA : Registering with netdev failed\n");
3152 goto disable_device;
3153 }
3154
3155 return 0;
3156
3157disable_device:
3158 mutex_lock(&bnad->conf_mutex);
3159 bnad_device_disable(bnad);
3160 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3161 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3162 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3163 spin_lock_irqsave(&bnad->bna_lock, flags);
3164 bna_uninit(bna);
3165 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3166 mutex_unlock(&bnad->conf_mutex);
3167
3168 bnad_res_free(bnad);
3169 bnad_disable_msix(bnad);
3170pci_uninit:
3171 bnad_pci_uninit(pdev);
3172 bnad_lock_uninit(bnad);
3173 bnad_uninit(bnad);
3174free_netdev:
3175 free_netdev(netdev);
3176 return err;
3177}
3178
3179static void __devexit
3180bnad_pci_remove(struct pci_dev *pdev)
3181{
3182 struct net_device *netdev = pci_get_drvdata(pdev);
3183 struct bnad *bnad;
3184 struct bna *bna;
3185 unsigned long flags;
3186
3187 if (!netdev)
3188 return;
3189
3190 pr_info("%s bnad_pci_remove\n", netdev->name);
3191 bnad = netdev_priv(netdev);
3192 bna = &bnad->bna;
3193
3194 unregister_netdev(netdev);
3195
3196 mutex_lock(&bnad->conf_mutex);
3197 bnad_device_disable(bnad);
3198 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3199 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3200 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3201 spin_lock_irqsave(&bnad->bna_lock, flags);
3202 bna_uninit(bna);
3203 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3204 mutex_unlock(&bnad->conf_mutex);
3205
3206 bnad_res_free(bnad);
3207 bnad_disable_msix(bnad);
3208 bnad_pci_uninit(pdev);
3209 bnad_lock_uninit(bnad);
3210 bnad_uninit(bnad);
3211 free_netdev(netdev);
3212}
3213
3214const struct pci_device_id bnad_pci_id_table[] = {
3215 {
3216 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3217 PCI_DEVICE_ID_BROCADE_CT),
3218 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3219 .class_mask = 0xffff00
3220 }, {0, }
3221};
3222
3223MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3224
3225static struct pci_driver bnad_pci_driver = {
3226 .name = BNAD_NAME,
3227 .id_table = bnad_pci_id_table,
3228 .probe = bnad_pci_probe,
3229 .remove = __devexit_p(bnad_pci_remove),
3230};
3231
3232static int __init
3233bnad_module_init(void)
3234{
3235 int err;
3236
3237 pr_info("Brocade 10G Ethernet driver\n");
3238
3239 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3240
3241 err = pci_register_driver(&bnad_pci_driver);
3242 if (err < 0) {
3243 pr_err("bna : PCI registration failed in module init "
3244 "(%d)\n", err);
3245 return err;
3246 }
3247
3248 return 0;
3249}
3250
3251static void __exit
3252bnad_module_exit(void)
3253{
3254 pci_unregister_driver(&bnad_pci_driver);
3255
3256 if (bfi_fw)
3257 release_firmware(bfi_fw);
3258}
3259
3260module_init(bnad_module_init);
3261module_exit(bnad_module_exit);
3262
3263MODULE_AUTHOR("Brocade");
3264MODULE_LICENSE("GPL");
3265MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3266MODULE_VERSION(BNAD_VERSION);
3267MODULE_FIRMWARE(CNA_FW_FILE_CT);
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
new file mode 100644
index 000000000000..ee377888b905
--- /dev/null
+++ b/drivers/net/bna/bnad.h
@@ -0,0 +1,333 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#ifndef __BNAD_H__
19#define __BNAD_H__
20
21#include <linux/rtnetlink.h>
22#include <linux/workqueue.h>
23#include <linux/ipv6.h>
24#include <linux/etherdevice.h>
25#include <linux/mutex.h>
26#include <linux/firmware.h>
27
28/* Fix for IA64 */
29#include <asm/checksum.h>
30#include <net/ip6_checksum.h>
31
32#include <net/ip.h>
33#include <net/tcp.h>
34
35#include "bna.h"
36
37#define BNAD_TXQ_DEPTH 2048
38#define BNAD_RXQ_DEPTH 2048
39
40#define BNAD_MAX_TXS 1
41#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */
42#define BNAD_TXQ_NUM 1
43
44#define BNAD_MAX_RXS 1
45#define BNAD_MAX_RXPS_PER_RX 16
46
47/*
48 * Control structure pointed to ccb->ctrl, which
49 * determines the NAPI / LRO behavior CCB
50 * There is 1:1 corres. between ccb & ctrl
51 */
52struct bnad_rx_ctrl {
53 struct bna_ccb *ccb;
54 struct napi_struct napi;
55};
56
57#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC
58
59#define BNAD_GET_TX_ID(_skb) (0)
60
61/*
62 * GLOBAL #defines (CONSTANTS)
63 */
64#define BNAD_NAME "bna"
65#define BNAD_NAME_LEN 64
66
67#define BNAD_VERSION "2.3.2.0"
68
69#define BNAD_MAILBOX_MSIX_VECTORS 1
70
71#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
72#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
73
74#define BNAD_MAX_Q_DEPTH 0x10000
75#define BNAD_MIN_Q_DEPTH 0x200
76
77#define BNAD_JUMBO_MTU 9000
78
79#define BNAD_NETIF_WAKE_THRESHOLD 8
80
81#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3
82
83/* Bit positions for tcb->flags */
84#define BNAD_TXQ_FREE_SENT 0
85
86/* Bit positions for rcb->flags */
87#define BNAD_RXQ_REFILL 0
88#define BNAD_RXQ_STARTED 1
89
90/*
91 * DATA STRUCTURES
92 */
93
94/* enums */
95enum bnad_intr_source {
96 BNAD_INTR_TX = 1,
97 BNAD_INTR_RX = 2
98};
99
100enum bnad_link_state {
101 BNAD_LS_DOWN = 0,
102 BNAD_LS_UP = 1
103};
104
105struct bnad_completion {
106 struct completion ioc_comp;
107 struct completion ucast_comp;
108 struct completion mcast_comp;
109 struct completion tx_comp;
110 struct completion rx_comp;
111 struct completion stats_comp;
112 struct completion port_comp;
113
114 u8 ioc_comp_status;
115 u8 ucast_comp_status;
116 u8 mcast_comp_status;
117 u8 tx_comp_status;
118 u8 rx_comp_status;
119 u8 stats_comp_status;
120 u8 port_comp_status;
121};
122
123/* Tx Rx Control Stats */
124struct bnad_drv_stats {
125 u64 netif_queue_stop;
126 u64 netif_queue_wakeup;
127 u64 tso4;
128 u64 tso6;
129 u64 tso_err;
130 u64 tcpcsum_offload;
131 u64 udpcsum_offload;
132 u64 csum_help;
133 u64 csum_help_err;
134
135 u64 hw_stats_updates;
136 u64 netif_rx_schedule;
137 u64 netif_rx_complete;
138 u64 netif_rx_dropped;
139
140 u64 link_toggle;
141 u64 cee_up;
142
143 u64 rxp_info_alloc_failed;
144 u64 mbox_intr_disabled;
145 u64 mbox_intr_enabled;
146 u64 tx_unmap_q_alloc_failed;
147 u64 rx_unmap_q_alloc_failed;
148
149 u64 rxbuf_alloc_failed;
150};
151
152/* Complete driver stats */
153struct bnad_stats {
154 struct bnad_drv_stats drv_stats;
155 struct bna_stats *bna_stats;
156};
157
158/* Tx / Rx Resources */
159struct bnad_tx_res_info {
160 struct bna_res_info res_info[BNA_TX_RES_T_MAX];
161};
162
163struct bnad_rx_res_info {
164 struct bna_res_info res_info[BNA_RX_RES_T_MAX];
165};
166
167struct bnad_tx_info {
168 struct bna_tx *tx; /* 1:1 between tx_info & tx */
169 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
170} ____cacheline_aligned;
171
172struct bnad_rx_info {
173 struct bna_rx *rx; /* 1:1 between rx_info & rx */
174
175 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXPS_PER_RX];
176} ____cacheline_aligned;
177
178/* Unmap queues for Tx / Rx cleanup */
179struct bnad_skb_unmap {
180 struct sk_buff *skb;
181 DECLARE_PCI_UNMAP_ADDR(dma_addr)
182};
183
184struct bnad_unmap_q {
185 u32 producer_index;
186 u32 consumer_index;
187 u32 q_depth;
188 /* This should be the last one */
189 struct bnad_skb_unmap unmap_array[1];
190};
191
192/* Bit mask values for bnad->cfg_flags */
193#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
194#define BNAD_CF_PROMISC 0x02
195#define BNAD_CF_ALLMULTI 0x04
196#define BNAD_CF_MSIX 0x08 /* If in MSIx mode */
197
198/* Defines for run_flags bit-mask */
199/* Set, tested & cleared using xxx_bit() functions */
200/* Values indicated bit positions */
201#define BNAD_RF_CEE_RUNNING 1
202#define BNAD_RF_HW_ERROR 2
203#define BNAD_RF_MBOX_IRQ_DISABLED 3
204#define BNAD_RF_TX_STARTED 4
205#define BNAD_RF_RX_STARTED 5
206#define BNAD_RF_DIM_TIMER_RUNNING 6
207#define BNAD_RF_STATS_TIMER_RUNNING 7
208
209struct bnad {
210 struct net_device *netdev;
211
212 /* Data path */
213 struct bnad_tx_info tx_info[BNAD_MAX_TXS];
214 struct bnad_rx_info rx_info[BNAD_MAX_RXS];
215
216 struct vlan_group *vlan_grp;
217 /*
218 * These q numbers are global only because
219 * they are used to calculate MSIx vectors.
220 * Actually the exact # of queues are per Tx/Rx
221 * object.
222 */
223 u32 num_tx;
224 u32 num_rx;
225 u32 num_txq_per_tx;
226 u32 num_rxp_per_rx;
227
228 u32 txq_depth;
229 u32 rxq_depth;
230
231 u8 tx_coalescing_timeo;
232 u8 rx_coalescing_timeo;
233
234 struct bna_rx_config rx_config[BNAD_MAX_RXS];
235 struct bna_tx_config tx_config[BNAD_MAX_TXS];
236
237 u32 rx_csum;
238
239 void __iomem *bar0; /* BAR0 address */
240
241 struct bna bna;
242
243 u32 cfg_flags;
244 unsigned long run_flags;
245
246 struct pci_dev *pcidev;
247 u64 mmio_start;
248 u64 mmio_len;
249
250 u32 msix_num;
251 u32 msix_diag_num;
252 struct msix_entry *msix_table;
253
254 struct mutex conf_mutex;
255 spinlock_t bna_lock ____cacheline_aligned;
256
257 /* Timers */
258 struct timer_list ioc_timer;
259 struct timer_list dim_timer;
260 struct timer_list stats_timer;
261
262 /* Control path resources, memory & irq */
263 struct bna_res_info res_info[BNA_RES_T_MAX];
264 struct bnad_tx_res_info tx_res_info[BNAD_MAX_TXS];
265 struct bnad_rx_res_info rx_res_info[BNAD_MAX_RXS];
266
267 struct bnad_completion bnad_completions;
268
269 /* Burnt in MAC address */
270 mac_t perm_addr;
271
272 struct tasklet_struct tx_free_tasklet;
273
274 /* Statistics */
275 struct bnad_stats stats;
276
277 struct bnad_diag *diag;
278
279 char adapter_name[BNAD_NAME_LEN];
280 char port_name[BNAD_NAME_LEN];
281 char mbox_irq_name[BNAD_NAME_LEN];
282};
283
284/*
285 * EXTERN VARIABLES
286 */
287extern struct firmware *bfi_fw;
288extern u32 bnad_rxqs_per_cq;
289
290/*
291 * EXTERN PROTOTYPES
292 */
293extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
294/* Netdev entry point prototypes */
295extern void bnad_set_ethtool_ops(struct net_device *netdev);
296
297/* Configuration & setup */
298extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
299extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
300
301extern int bnad_setup_rx(struct bnad *bnad, uint rx_id);
302extern int bnad_setup_tx(struct bnad *bnad, uint tx_id);
303extern void bnad_cleanup_tx(struct bnad *bnad, uint tx_id);
304extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
305
306/* Timer start/stop protos */
307extern void bnad_dim_timer_start(struct bnad *bnad);
308
309/* Statistics */
310extern void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
311extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
312
313/**
314 * MACROS
315 */
316/* To set & get the stats counters */
317#define BNAD_UPDATE_CTR(_bnad, _ctr) \
318 (((_bnad)->stats.drv_stats._ctr)++)
319
320#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr)
321
322#define bnad_enable_rx_irq_unsafe(_ccb) \
323{ \
324 bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
325 (_ccb)->rx_coalescing_timeo); \
326 bna_ib_ack((_ccb)->i_dbell, 0); \
327}
328
329#define bnad_dim_timer_running(_bnad) \
330 (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
331 (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
332
333#endif /* __BNAD_H__ */
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
new file mode 100644
index 000000000000..11fa2ea842c1
--- /dev/null
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -0,0 +1,1277 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "cna.h"
20
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/ethtool.h>
24#include <linux/rtnetlink.h>
25
26#include "bna.h"
27
28#include "bnad.h"
29
30#define BNAD_NUM_TXF_COUNTERS 12
31#define BNAD_NUM_RXF_COUNTERS 10
32#define BNAD_NUM_CQ_COUNTERS 3
33#define BNAD_NUM_RXQ_COUNTERS 6
34#define BNAD_NUM_TXQ_COUNTERS 5
35
36#define BNAD_ETHTOOL_STATS_NUM \
37 (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
38 sizeof(struct bnad_drv_stats) / sizeof(u64) + \
39 offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64))
40
41static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
42 "rx_packets",
43 "tx_packets",
44 "rx_bytes",
45 "tx_bytes",
46 "rx_errors",
47 "tx_errors",
48 "rx_dropped",
49 "tx_dropped",
50 "multicast",
51 "collisions",
52
53 "rx_length_errors",
54 "rx_over_errors",
55 "rx_crc_errors",
56 "rx_frame_errors",
57 "rx_fifo_errors",
58 "rx_missed_errors",
59
60 "tx_aborted_errors",
61 "tx_carrier_errors",
62 "tx_fifo_errors",
63 "tx_heartbeat_errors",
64 "tx_window_errors",
65
66 "rx_compressed",
67 "tx_compressed",
68
69 "netif_queue_stop",
70 "netif_queue_wakeup",
71 "tso4",
72 "tso6",
73 "tso_err",
74 "tcpcsum_offload",
75 "udpcsum_offload",
76 "csum_help",
77 "csum_help_err",
78 "hw_stats_updates",
79 "netif_rx_schedule",
80 "netif_rx_complete",
81 "netif_rx_dropped",
82
83 "link_toggle",
84 "cee_up",
85
86 "rxp_info_alloc_failed",
87 "mbox_intr_disabled",
88 "mbox_intr_enabled",
89 "tx_unmap_q_alloc_failed",
90 "rx_unmap_q_alloc_failed",
91 "rxbuf_alloc_failed",
92
93 "mac_frame_64",
94 "mac_frame_65_127",
95 "mac_frame_128_255",
96 "mac_frame_256_511",
97 "mac_frame_512_1023",
98 "mac_frame_1024_1518",
99 "mac_frame_1518_1522",
100 "mac_rx_bytes",
101 "mac_rx_packets",
102 "mac_rx_fcs_error",
103 "mac_rx_multicast",
104 "mac_rx_broadcast",
105 "mac_rx_control_frames",
106 "mac_rx_pause",
107 "mac_rx_unknown_opcode",
108 "mac_rx_alignment_error",
109 "mac_rx_frame_length_error",
110 "mac_rx_code_error",
111 "mac_rx_carrier_sense_error",
112 "mac_rx_undersize",
113 "mac_rx_oversize",
114 "mac_rx_fragments",
115 "mac_rx_jabber",
116 "mac_rx_drop",
117
118 "mac_tx_bytes",
119 "mac_tx_packets",
120 "mac_tx_multicast",
121 "mac_tx_broadcast",
122 "mac_tx_pause",
123 "mac_tx_deferral",
124 "mac_tx_excessive_deferral",
125 "mac_tx_single_collision",
126 "mac_tx_muliple_collision",
127 "mac_tx_late_collision",
128 "mac_tx_excessive_collision",
129 "mac_tx_total_collision",
130 "mac_tx_pause_honored",
131 "mac_tx_drop",
132 "mac_tx_jabber",
133 "mac_tx_fcs_error",
134 "mac_tx_control_frame",
135 "mac_tx_oversize",
136 "mac_tx_undersize",
137 "mac_tx_fragments",
138
139 "bpc_tx_pause_0",
140 "bpc_tx_pause_1",
141 "bpc_tx_pause_2",
142 "bpc_tx_pause_3",
143 "bpc_tx_pause_4",
144 "bpc_tx_pause_5",
145 "bpc_tx_pause_6",
146 "bpc_tx_pause_7",
147 "bpc_tx_zero_pause_0",
148 "bpc_tx_zero_pause_1",
149 "bpc_tx_zero_pause_2",
150 "bpc_tx_zero_pause_3",
151 "bpc_tx_zero_pause_4",
152 "bpc_tx_zero_pause_5",
153 "bpc_tx_zero_pause_6",
154 "bpc_tx_zero_pause_7",
155 "bpc_tx_first_pause_0",
156 "bpc_tx_first_pause_1",
157 "bpc_tx_first_pause_2",
158 "bpc_tx_first_pause_3",
159 "bpc_tx_first_pause_4",
160 "bpc_tx_first_pause_5",
161 "bpc_tx_first_pause_6",
162 "bpc_tx_first_pause_7",
163
164 "bpc_rx_pause_0",
165 "bpc_rx_pause_1",
166 "bpc_rx_pause_2",
167 "bpc_rx_pause_3",
168 "bpc_rx_pause_4",
169 "bpc_rx_pause_5",
170 "bpc_rx_pause_6",
171 "bpc_rx_pause_7",
172 "bpc_rx_zero_pause_0",
173 "bpc_rx_zero_pause_1",
174 "bpc_rx_zero_pause_2",
175 "bpc_rx_zero_pause_3",
176 "bpc_rx_zero_pause_4",
177 "bpc_rx_zero_pause_5",
178 "bpc_rx_zero_pause_6",
179 "bpc_rx_zero_pause_7",
180 "bpc_rx_first_pause_0",
181 "bpc_rx_first_pause_1",
182 "bpc_rx_first_pause_2",
183 "bpc_rx_first_pause_3",
184 "bpc_rx_first_pause_4",
185 "bpc_rx_first_pause_5",
186 "bpc_rx_first_pause_6",
187 "bpc_rx_first_pause_7",
188
189 "rad_rx_frames",
190 "rad_rx_octets",
191 "rad_rx_vlan_frames",
192 "rad_rx_ucast",
193 "rad_rx_ucast_octets",
194 "rad_rx_ucast_vlan",
195 "rad_rx_mcast",
196 "rad_rx_mcast_octets",
197 "rad_rx_mcast_vlan",
198 "rad_rx_bcast",
199 "rad_rx_bcast_octets",
200 "rad_rx_bcast_vlan",
201 "rad_rx_drops",
202
203 "fc_rx_ucast_octets",
204 "fc_rx_ucast",
205 "fc_rx_ucast_vlan",
206 "fc_rx_mcast_octets",
207 "fc_rx_mcast",
208 "fc_rx_mcast_vlan",
209 "fc_rx_bcast_octets",
210 "fc_rx_bcast",
211 "fc_rx_bcast_vlan",
212
213 "fc_tx_ucast_octets",
214 "fc_tx_ucast",
215 "fc_tx_ucast_vlan",
216 "fc_tx_mcast_octets",
217 "fc_tx_mcast",
218 "fc_tx_mcast_vlan",
219 "fc_tx_bcast_octets",
220 "fc_tx_bcast",
221 "fc_tx_bcast_vlan",
222 "fc_tx_parity_errors",
223 "fc_tx_timeout",
224 "fc_tx_fid_parity_errors",
225};
226
227static int
228bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
229{
230 cmd->supported = SUPPORTED_10000baseT_Full;
231 cmd->advertising = ADVERTISED_10000baseT_Full;
232 cmd->autoneg = AUTONEG_DISABLE;
233 cmd->supported |= SUPPORTED_FIBRE;
234 cmd->advertising |= ADVERTISED_FIBRE;
235 cmd->port = PORT_FIBRE;
236 cmd->phy_address = 0;
237
238 if (netif_carrier_ok(netdev)) {
239 cmd->speed = SPEED_10000;
240 cmd->duplex = DUPLEX_FULL;
241 } else {
242 cmd->speed = -1;
243 cmd->duplex = -1;
244 }
245 cmd->transceiver = XCVR_EXTERNAL;
246 cmd->maxtxpkt = 0;
247 cmd->maxrxpkt = 0;
248
249 return 0;
250}
251
252static int
253bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
254{
255 /* 10G full duplex setting supported only */
256 if (cmd->autoneg == AUTONEG_ENABLE)
257 return -EOPNOTSUPP; else {
258 if ((cmd->speed == SPEED_10000) && (cmd->duplex == DUPLEX_FULL))
259 return 0;
260 }
261
262 return -EOPNOTSUPP;
263}
264
265static void
266bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
267{
268 struct bnad *bnad = netdev_priv(netdev);
269 struct bfa_ioc_attr *ioc_attr;
270 unsigned long flags;
271
272 strcpy(drvinfo->driver, BNAD_NAME);
273 strcpy(drvinfo->version, BNAD_VERSION);
274
275 ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
276 if (ioc_attr) {
277 memset(ioc_attr, 0, sizeof(*ioc_attr));
278 spin_lock_irqsave(&bnad->bna_lock, flags);
279 bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr);
280 spin_unlock_irqrestore(&bnad->bna_lock, flags);
281
282 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
283 sizeof(drvinfo->fw_version) - 1);
284 kfree(ioc_attr);
285 }
286
287 strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
288}
289
290static int
291get_regs(struct bnad *bnad, u32 * regs)
292{
293 int num = 0, i;
294 u32 reg_addr;
295 unsigned long flags;
296
297#define BNAD_GET_REG(addr) \
298do { \
299 if (regs) \
300 regs[num++] = readl(bnad->bar0 + (addr)); \
301 else \
302 num++; \
303} while (0)
304
305 spin_lock_irqsave(&bnad->bna_lock, flags);
306
307 /* DMA Block Internal Registers */
308 BNAD_GET_REG(DMA_CTRL_REG0);
309 BNAD_GET_REG(DMA_CTRL_REG1);
310 BNAD_GET_REG(DMA_ERR_INT_STATUS);
311 BNAD_GET_REG(DMA_ERR_INT_ENABLE);
312 BNAD_GET_REG(DMA_ERR_INT_STATUS_SET);
313
314 /* APP Block Register Address Offset from BAR0 */
315 BNAD_GET_REG(HOSTFN0_INT_STATUS);
316 BNAD_GET_REG(HOSTFN0_INT_MASK);
317 BNAD_GET_REG(HOST_PAGE_NUM_FN0);
318 BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN0);
319 BNAD_GET_REG(FN0_PCIE_ERR_REG);
320 BNAD_GET_REG(FN0_ERR_TYPE_STATUS_REG);
321 BNAD_GET_REG(FN0_ERR_TYPE_MSK_STATUS_REG);
322
323 BNAD_GET_REG(HOSTFN1_INT_STATUS);
324 BNAD_GET_REG(HOSTFN1_INT_MASK);
325 BNAD_GET_REG(HOST_PAGE_NUM_FN1);
326 BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN1);
327 BNAD_GET_REG(FN1_PCIE_ERR_REG);
328 BNAD_GET_REG(FN1_ERR_TYPE_STATUS_REG);
329 BNAD_GET_REG(FN1_ERR_TYPE_MSK_STATUS_REG);
330
331 BNAD_GET_REG(PCIE_MISC_REG);
332
333 BNAD_GET_REG(HOST_SEM0_REG);
334 BNAD_GET_REG(HOST_SEM1_REG);
335 BNAD_GET_REG(HOST_SEM2_REG);
336 BNAD_GET_REG(HOST_SEM3_REG);
337 BNAD_GET_REG(HOST_SEM0_INFO_REG);
338 BNAD_GET_REG(HOST_SEM1_INFO_REG);
339 BNAD_GET_REG(HOST_SEM2_INFO_REG);
340 BNAD_GET_REG(HOST_SEM3_INFO_REG);
341
342 BNAD_GET_REG(TEMPSENSE_CNTL_REG);
343 BNAD_GET_REG(TEMPSENSE_STAT_REG);
344
345 BNAD_GET_REG(APP_LOCAL_ERR_STAT);
346 BNAD_GET_REG(APP_LOCAL_ERR_MSK);
347
348 BNAD_GET_REG(PCIE_LNK_ERR_STAT);
349 BNAD_GET_REG(PCIE_LNK_ERR_MSK);
350
351 BNAD_GET_REG(FCOE_FIP_ETH_TYPE);
352 BNAD_GET_REG(RESV_ETH_TYPE);
353
354 BNAD_GET_REG(HOSTFN2_INT_STATUS);
355 BNAD_GET_REG(HOSTFN2_INT_MASK);
356 BNAD_GET_REG(HOST_PAGE_NUM_FN2);
357 BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN2);
358 BNAD_GET_REG(FN2_PCIE_ERR_REG);
359 BNAD_GET_REG(FN2_ERR_TYPE_STATUS_REG);
360 BNAD_GET_REG(FN2_ERR_TYPE_MSK_STATUS_REG);
361
362 BNAD_GET_REG(HOSTFN3_INT_STATUS);
363 BNAD_GET_REG(HOSTFN3_INT_MASK);
364 BNAD_GET_REG(HOST_PAGE_NUM_FN3);
365 BNAD_GET_REG(HOST_MSIX_ERR_INDEX_FN3);
366 BNAD_GET_REG(FN3_PCIE_ERR_REG);
367 BNAD_GET_REG(FN3_ERR_TYPE_STATUS_REG);
368 BNAD_GET_REG(FN3_ERR_TYPE_MSK_STATUS_REG);
369
370 /* Host Command Status Registers */
371 reg_addr = HOST_CMDSTS0_CLR_REG;
372 for (i = 0; i < 16; i++) {
373 BNAD_GET_REG(reg_addr);
374 BNAD_GET_REG(reg_addr + 4);
375 BNAD_GET_REG(reg_addr + 8);
376 reg_addr += 0x10;
377 }
378
379 /* Function ID register */
380 BNAD_GET_REG(FNC_ID_REG);
381
382 /* Function personality register */
383 BNAD_GET_REG(FNC_PERS_REG);
384
385 /* Operation mode register */
386 BNAD_GET_REG(OP_MODE);
387
388 /* LPU0 Registers */
389 BNAD_GET_REG(LPU0_MBOX_CTL_REG);
390 BNAD_GET_REG(LPU0_MBOX_CMD_REG);
391 BNAD_GET_REG(LPU0_MBOX_LINK_0REG);
392 BNAD_GET_REG(LPU1_MBOX_LINK_0REG);
393 BNAD_GET_REG(LPU0_MBOX_STATUS_0REG);
394 BNAD_GET_REG(LPU1_MBOX_STATUS_0REG);
395 BNAD_GET_REG(LPU0_ERR_STATUS_REG);
396 BNAD_GET_REG(LPU0_ERR_SET_REG);
397
398 /* LPU1 Registers */
399 BNAD_GET_REG(LPU1_MBOX_CTL_REG);
400 BNAD_GET_REG(LPU1_MBOX_CMD_REG);
401 BNAD_GET_REG(LPU0_MBOX_LINK_1REG);
402 BNAD_GET_REG(LPU1_MBOX_LINK_1REG);
403 BNAD_GET_REG(LPU0_MBOX_STATUS_1REG);
404 BNAD_GET_REG(LPU1_MBOX_STATUS_1REG);
405 BNAD_GET_REG(LPU1_ERR_STATUS_REG);
406 BNAD_GET_REG(LPU1_ERR_SET_REG);
407
408 /* PSS Registers */
409 BNAD_GET_REG(PSS_CTL_REG);
410 BNAD_GET_REG(PSS_ERR_STATUS_REG);
411 BNAD_GET_REG(ERR_STATUS_SET);
412 BNAD_GET_REG(PSS_RAM_ERR_STATUS_REG);
413
414 /* Catapult CPQ Registers */
415 BNAD_GET_REG(HOSTFN0_LPU0_MBOX0_CMD_STAT);
416 BNAD_GET_REG(HOSTFN0_LPU1_MBOX0_CMD_STAT);
417 BNAD_GET_REG(LPU0_HOSTFN0_MBOX0_CMD_STAT);
418 BNAD_GET_REG(LPU1_HOSTFN0_MBOX0_CMD_STAT);
419
420 BNAD_GET_REG(HOSTFN0_LPU0_MBOX1_CMD_STAT);
421 BNAD_GET_REG(HOSTFN0_LPU1_MBOX1_CMD_STAT);
422 BNAD_GET_REG(LPU0_HOSTFN0_MBOX1_CMD_STAT);
423 BNAD_GET_REG(LPU1_HOSTFN0_MBOX1_CMD_STAT);
424
425 BNAD_GET_REG(HOSTFN1_LPU0_MBOX0_CMD_STAT);
426 BNAD_GET_REG(HOSTFN1_LPU1_MBOX0_CMD_STAT);
427 BNAD_GET_REG(LPU0_HOSTFN1_MBOX0_CMD_STAT);
428 BNAD_GET_REG(LPU1_HOSTFN1_MBOX0_CMD_STAT);
429
430 BNAD_GET_REG(HOSTFN1_LPU0_MBOX1_CMD_STAT);
431 BNAD_GET_REG(HOSTFN1_LPU1_MBOX1_CMD_STAT);
432 BNAD_GET_REG(LPU0_HOSTFN1_MBOX1_CMD_STAT);
433 BNAD_GET_REG(LPU1_HOSTFN1_MBOX1_CMD_STAT);
434
435 BNAD_GET_REG(HOSTFN2_LPU0_MBOX0_CMD_STAT);
436 BNAD_GET_REG(HOSTFN2_LPU1_MBOX0_CMD_STAT);
437 BNAD_GET_REG(LPU0_HOSTFN2_MBOX0_CMD_STAT);
438 BNAD_GET_REG(LPU1_HOSTFN2_MBOX0_CMD_STAT);
439
440 BNAD_GET_REG(HOSTFN2_LPU0_MBOX1_CMD_STAT);
441 BNAD_GET_REG(HOSTFN2_LPU1_MBOX1_CMD_STAT);
442 BNAD_GET_REG(LPU0_HOSTFN2_MBOX1_CMD_STAT);
443 BNAD_GET_REG(LPU1_HOSTFN2_MBOX1_CMD_STAT);
444
445 BNAD_GET_REG(HOSTFN3_LPU0_MBOX0_CMD_STAT);
446 BNAD_GET_REG(HOSTFN3_LPU1_MBOX0_CMD_STAT);
447 BNAD_GET_REG(LPU0_HOSTFN3_MBOX0_CMD_STAT);
448 BNAD_GET_REG(LPU1_HOSTFN3_MBOX0_CMD_STAT);
449
450 BNAD_GET_REG(HOSTFN3_LPU0_MBOX1_CMD_STAT);
451 BNAD_GET_REG(HOSTFN3_LPU1_MBOX1_CMD_STAT);
452 BNAD_GET_REG(LPU0_HOSTFN3_MBOX1_CMD_STAT);
453 BNAD_GET_REG(LPU1_HOSTFN3_MBOX1_CMD_STAT);
454
455 /* Host Function Force Parity Error Registers */
456 BNAD_GET_REG(HOSTFN0_LPU_FORCE_PERR);
457 BNAD_GET_REG(HOSTFN1_LPU_FORCE_PERR);
458 BNAD_GET_REG(HOSTFN2_LPU_FORCE_PERR);
459 BNAD_GET_REG(HOSTFN3_LPU_FORCE_PERR);
460
461 /* LL Port[0|1] Halt Mask Registers */
462 BNAD_GET_REG(LL_HALT_MSK_P0);
463 BNAD_GET_REG(LL_HALT_MSK_P1);
464
465 /* LL Port[0|1] Error Mask Registers */
466 BNAD_GET_REG(LL_ERR_MSK_P0);
467 BNAD_GET_REG(LL_ERR_MSK_P1);
468
469 /* EMC FLI Registers */
470 BNAD_GET_REG(FLI_CMD_REG);
471 BNAD_GET_REG(FLI_ADDR_REG);
472 BNAD_GET_REG(FLI_CTL_REG);
473 BNAD_GET_REG(FLI_WRDATA_REG);
474 BNAD_GET_REG(FLI_RDDATA_REG);
475 BNAD_GET_REG(FLI_DEV_STATUS_REG);
476 BNAD_GET_REG(FLI_SIG_WD_REG);
477
478 BNAD_GET_REG(FLI_DEV_VENDOR_REG);
479 BNAD_GET_REG(FLI_ERR_STATUS_REG);
480
481 /* RxAdm 0 Registers */
482 BNAD_GET_REG(RAD0_CTL_REG);
483 BNAD_GET_REG(RAD0_PE_PARM_REG);
484 BNAD_GET_REG(RAD0_BCN_REG);
485 BNAD_GET_REG(RAD0_DEFAULT_REG);
486 BNAD_GET_REG(RAD0_PROMISC_REG);
487 BNAD_GET_REG(RAD0_BCNQ_REG);
488 BNAD_GET_REG(RAD0_DEFAULTQ_REG);
489
490 BNAD_GET_REG(RAD0_ERR_STS);
491 BNAD_GET_REG(RAD0_SET_ERR_STS);
492 BNAD_GET_REG(RAD0_ERR_INT_EN);
493 BNAD_GET_REG(RAD0_FIRST_ERR);
494 BNAD_GET_REG(RAD0_FORCE_ERR);
495
496 BNAD_GET_REG(RAD0_MAC_MAN_1H);
497 BNAD_GET_REG(RAD0_MAC_MAN_1L);
498 BNAD_GET_REG(RAD0_MAC_MAN_2H);
499 BNAD_GET_REG(RAD0_MAC_MAN_2L);
500 BNAD_GET_REG(RAD0_MAC_MAN_3H);
501 BNAD_GET_REG(RAD0_MAC_MAN_3L);
502 BNAD_GET_REG(RAD0_MAC_MAN_4H);
503 BNAD_GET_REG(RAD0_MAC_MAN_4L);
504
505 BNAD_GET_REG(RAD0_LAST4_IP);
506
507 /* RxAdm 1 Registers */
508 BNAD_GET_REG(RAD1_CTL_REG);
509 BNAD_GET_REG(RAD1_PE_PARM_REG);
510 BNAD_GET_REG(RAD1_BCN_REG);
511 BNAD_GET_REG(RAD1_DEFAULT_REG);
512 BNAD_GET_REG(RAD1_PROMISC_REG);
513 BNAD_GET_REG(RAD1_BCNQ_REG);
514 BNAD_GET_REG(RAD1_DEFAULTQ_REG);
515
516 BNAD_GET_REG(RAD1_ERR_STS);
517 BNAD_GET_REG(RAD1_SET_ERR_STS);
518 BNAD_GET_REG(RAD1_ERR_INT_EN);
519
520 /* TxA0 Registers */
521 BNAD_GET_REG(TXA0_CTRL_REG);
522 /* TxA0 TSO Sequence # Registers (RO) */
523 for (i = 0; i < 8; i++) {
524 BNAD_GET_REG(TXA0_TSO_TCP_SEQ_REG(i));
525 BNAD_GET_REG(TXA0_TSO_IP_INFO_REG(i));
526 }
527
528 /* TxA1 Registers */
529 BNAD_GET_REG(TXA1_CTRL_REG);
530 /* TxA1 TSO Sequence # Registers (RO) */
531 for (i = 0; i < 8; i++) {
532 BNAD_GET_REG(TXA1_TSO_TCP_SEQ_REG(i));
533 BNAD_GET_REG(TXA1_TSO_IP_INFO_REG(i));
534 }
535
536 /* RxA Registers */
537 BNAD_GET_REG(RXA0_CTL_REG);
538 BNAD_GET_REG(RXA1_CTL_REG);
539
540 /* PLB0 Registers */
541 BNAD_GET_REG(PLB0_ECM_TIMER_REG);
542 BNAD_GET_REG(PLB0_RL_CTL);
543 for (i = 0; i < 8; i++)
544 BNAD_GET_REG(PLB0_RL_MAX_BC(i));
545 BNAD_GET_REG(PLB0_RL_TU_PRIO);
546 for (i = 0; i < 8; i++)
547 BNAD_GET_REG(PLB0_RL_BYTE_CNT(i));
548 BNAD_GET_REG(PLB0_RL_MIN_REG);
549 BNAD_GET_REG(PLB0_RL_MAX_REG);
550 BNAD_GET_REG(PLB0_EMS_ADD_REG);
551
552 /* PLB1 Registers */
553 BNAD_GET_REG(PLB1_ECM_TIMER_REG);
554 BNAD_GET_REG(PLB1_RL_CTL);
555 for (i = 0; i < 8; i++)
556 BNAD_GET_REG(PLB1_RL_MAX_BC(i));
557 BNAD_GET_REG(PLB1_RL_TU_PRIO);
558 for (i = 0; i < 8; i++)
559 BNAD_GET_REG(PLB1_RL_BYTE_CNT(i));
560 BNAD_GET_REG(PLB1_RL_MIN_REG);
561 BNAD_GET_REG(PLB1_RL_MAX_REG);
562 BNAD_GET_REG(PLB1_EMS_ADD_REG);
563
564 /* HQM Control Register */
565 BNAD_GET_REG(HQM0_CTL_REG);
566 BNAD_GET_REG(HQM0_RXQ_STOP_SEM);
567 BNAD_GET_REG(HQM0_TXQ_STOP_SEM);
568 BNAD_GET_REG(HQM1_CTL_REG);
569 BNAD_GET_REG(HQM1_RXQ_STOP_SEM);
570 BNAD_GET_REG(HQM1_TXQ_STOP_SEM);
571
572 /* LUT Registers */
573 BNAD_GET_REG(LUT0_ERR_STS);
574 BNAD_GET_REG(LUT0_SET_ERR_STS);
575 BNAD_GET_REG(LUT1_ERR_STS);
576 BNAD_GET_REG(LUT1_SET_ERR_STS);
577
578 /* TRC Registers */
579 BNAD_GET_REG(TRC_CTL_REG);
580 BNAD_GET_REG(TRC_MODS_REG);
581 BNAD_GET_REG(TRC_TRGC_REG);
582 BNAD_GET_REG(TRC_CNT1_REG);
583 BNAD_GET_REG(TRC_CNT2_REG);
584 BNAD_GET_REG(TRC_NXTS_REG);
585 BNAD_GET_REG(TRC_DIRR_REG);
586 for (i = 0; i < 10; i++)
587 BNAD_GET_REG(TRC_TRGM_REG(i));
588 for (i = 0; i < 10; i++)
589 BNAD_GET_REG(TRC_NXTM_REG(i));
590 for (i = 0; i < 10; i++)
591 BNAD_GET_REG(TRC_STRM_REG(i));
592
593 spin_unlock_irqrestore(&bnad->bna_lock, flags);
594#undef BNAD_GET_REG
595 return num;
596}
597static int
598bnad_get_regs_len(struct net_device *netdev)
599{
600 int ret = get_regs(netdev_priv(netdev), NULL) * sizeof(u32);
601 return ret;
602}
603
604static void
605bnad_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
606{
607 memset(buf, 0, bnad_get_regs_len(netdev));
608 get_regs(netdev_priv(netdev), buf);
609}
610
611static void
612bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
613{
614 wolinfo->supported = 0;
615 wolinfo->wolopts = 0;
616}
617
618static int
619bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
620{
621 struct bnad *bnad = netdev_priv(netdev);
622 unsigned long flags;
623
624 /* Lock rqd. to access bnad->bna_lock */
625 spin_lock_irqsave(&bnad->bna_lock, flags);
626 coalesce->use_adaptive_rx_coalesce =
627 (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
628 spin_unlock_irqrestore(&bnad->bna_lock, flags);
629
630 coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
631 BFI_COALESCING_TIMER_UNIT;
632 coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
633 BFI_COALESCING_TIMER_UNIT;
634 coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
635
636 return 0;
637}
638
639static int
640bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
641{
642 struct bnad *bnad = netdev_priv(netdev);
643 unsigned long flags;
644 int dim_timer_del = 0;
645
646 if (coalesce->rx_coalesce_usecs == 0 ||
647 coalesce->rx_coalesce_usecs >
648 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
649 return -EINVAL;
650
651 if (coalesce->tx_coalesce_usecs == 0 ||
652 coalesce->tx_coalesce_usecs >
653 BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
654 return -EINVAL;
655
656 mutex_lock(&bnad->conf_mutex);
657 /*
658 * Do not need to store rx_coalesce_usecs here
659 * Every time DIM is disabled, we can get it from the
660 * stack.
661 */
662 spin_lock_irqsave(&bnad->bna_lock, flags);
663 if (coalesce->use_adaptive_rx_coalesce) {
664 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
665 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
666 bnad_dim_timer_start(bnad);
667 }
668 } else {
669 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
670 bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
671 dim_timer_del = bnad_dim_timer_running(bnad);
672 if (dim_timer_del) {
673 clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
674 &bnad->run_flags);
675 spin_unlock_irqrestore(&bnad->bna_lock, flags);
676 del_timer_sync(&bnad->dim_timer);
677 spin_lock_irqsave(&bnad->bna_lock, flags);
678 }
679 bnad_rx_coalescing_timeo_set(bnad);
680 }
681 }
682 if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
683 BFI_COALESCING_TIMER_UNIT) {
684 bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
685 BFI_COALESCING_TIMER_UNIT;
686 bnad_tx_coalescing_timeo_set(bnad);
687 }
688
689 if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
690 BFI_COALESCING_TIMER_UNIT) {
691 bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
692 BFI_COALESCING_TIMER_UNIT;
693
694 if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
695 bnad_rx_coalescing_timeo_set(bnad);
696
697 }
698
699 /* Add Tx Inter-pkt DMA count? */
700
701 spin_unlock_irqrestore(&bnad->bna_lock, flags);
702
703 mutex_unlock(&bnad->conf_mutex);
704 return 0;
705}
706
707static void
708bnad_get_ringparam(struct net_device *netdev,
709 struct ethtool_ringparam *ringparam)
710{
711 struct bnad *bnad = netdev_priv(netdev);
712
713 ringparam->rx_max_pending = BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq;
714 ringparam->rx_mini_max_pending = 0;
715 ringparam->rx_jumbo_max_pending = 0;
716 ringparam->tx_max_pending = BNAD_MAX_Q_DEPTH;
717
718 ringparam->rx_pending = bnad->rxq_depth;
719 ringparam->rx_mini_max_pending = 0;
720 ringparam->rx_jumbo_max_pending = 0;
721 ringparam->tx_pending = bnad->txq_depth;
722}
723
724static int
725bnad_set_ringparam(struct net_device *netdev,
726 struct ethtool_ringparam *ringparam)
727{
728 int i, current_err, err = 0;
729 struct bnad *bnad = netdev_priv(netdev);
730
731 mutex_lock(&bnad->conf_mutex);
732 if (ringparam->rx_pending == bnad->rxq_depth &&
733 ringparam->tx_pending == bnad->txq_depth) {
734 mutex_unlock(&bnad->conf_mutex);
735 return 0;
736 }
737
738 if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
739 ringparam->rx_pending > BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq ||
740 !BNA_POWER_OF_2(ringparam->rx_pending)) {
741 mutex_unlock(&bnad->conf_mutex);
742 return -EINVAL;
743 }
744 if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
745 ringparam->tx_pending > BNAD_MAX_Q_DEPTH ||
746 !BNA_POWER_OF_2(ringparam->tx_pending)) {
747 mutex_unlock(&bnad->conf_mutex);
748 return -EINVAL;
749 }
750
751 if (ringparam->rx_pending != bnad->rxq_depth) {
752 bnad->rxq_depth = ringparam->rx_pending;
753 for (i = 0; i < bnad->num_rx; i++) {
754 if (!bnad->rx_info[i].rx)
755 continue;
756 bnad_cleanup_rx(bnad, i);
757 current_err = bnad_setup_rx(bnad, i);
758 if (current_err && !err)
759 err = current_err;
760 }
761 }
762 if (ringparam->tx_pending != bnad->txq_depth) {
763 bnad->txq_depth = ringparam->tx_pending;
764 for (i = 0; i < bnad->num_tx; i++) {
765 if (!bnad->tx_info[i].tx)
766 continue;
767 bnad_cleanup_tx(bnad, i);
768 current_err = bnad_setup_tx(bnad, i);
769 if (current_err && !err)
770 err = current_err;
771 }
772 }
773
774 mutex_unlock(&bnad->conf_mutex);
775 return err;
776}
777
778static void
779bnad_get_pauseparam(struct net_device *netdev,
780 struct ethtool_pauseparam *pauseparam)
781{
782 struct bnad *bnad = netdev_priv(netdev);
783
784 pauseparam->autoneg = 0;
785 pauseparam->rx_pause = bnad->bna.port.pause_config.rx_pause;
786 pauseparam->tx_pause = bnad->bna.port.pause_config.tx_pause;
787}
788
789static int
790bnad_set_pauseparam(struct net_device *netdev,
791 struct ethtool_pauseparam *pauseparam)
792{
793 struct bnad *bnad = netdev_priv(netdev);
794 struct bna_pause_config pause_config;
795 unsigned long flags;
796
797 if (pauseparam->autoneg == AUTONEG_ENABLE)
798 return -EINVAL;
799
800 mutex_lock(&bnad->conf_mutex);
801 if (pauseparam->rx_pause != bnad->bna.port.pause_config.rx_pause ||
802 pauseparam->tx_pause != bnad->bna.port.pause_config.tx_pause) {
803 pause_config.rx_pause = pauseparam->rx_pause;
804 pause_config.tx_pause = pauseparam->tx_pause;
805 spin_lock_irqsave(&bnad->bna_lock, flags);
806 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
807 spin_unlock_irqrestore(&bnad->bna_lock, flags);
808 }
809 mutex_unlock(&bnad->conf_mutex);
810 return 0;
811}
812
813static u32
814bnad_get_rx_csum(struct net_device *netdev)
815{
816 u32 rx_csum;
817 struct bnad *bnad = netdev_priv(netdev);
818
819 rx_csum = bnad->rx_csum;
820 return rx_csum;
821}
822
823static int
824bnad_set_rx_csum(struct net_device *netdev, u32 rx_csum)
825{
826 struct bnad *bnad = netdev_priv(netdev);
827
828 mutex_lock(&bnad->conf_mutex);
829 bnad->rx_csum = rx_csum;
830 mutex_unlock(&bnad->conf_mutex);
831 return 0;
832}
833
834static int
835bnad_set_tx_csum(struct net_device *netdev, u32 tx_csum)
836{
837 struct bnad *bnad = netdev_priv(netdev);
838
839 mutex_lock(&bnad->conf_mutex);
840 if (tx_csum) {
841 netdev->features |= NETIF_F_IP_CSUM;
842 netdev->features |= NETIF_F_IPV6_CSUM;
843 } else {
844 netdev->features &= ~NETIF_F_IP_CSUM;
845 netdev->features &= ~NETIF_F_IPV6_CSUM;
846 }
847 mutex_unlock(&bnad->conf_mutex);
848 return 0;
849}
850
851static int
852bnad_set_tso(struct net_device *netdev, u32 tso)
853{
854 struct bnad *bnad = netdev_priv(netdev);
855
856 mutex_lock(&bnad->conf_mutex);
857 if (tso) {
858 netdev->features |= NETIF_F_TSO;
859 netdev->features |= NETIF_F_TSO6;
860 } else {
861 netdev->features &= ~NETIF_F_TSO;
862 netdev->features &= ~NETIF_F_TSO6;
863 }
864 mutex_unlock(&bnad->conf_mutex);
865 return 0;
866}
867
868static void
869bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
870{
871 struct bnad *bnad = netdev_priv(netdev);
872 int i, j, q_num;
873 u64 bmap;
874
875 mutex_lock(&bnad->conf_mutex);
876
877 switch (stringset) {
878 case ETH_SS_STATS:
879 for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
880 BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
881 ETH_GSTRING_LEN));
882 memcpy(string, bnad_net_stats_strings[i],
883 ETH_GSTRING_LEN);
884 string += ETH_GSTRING_LEN;
885 }
886 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
887 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
888 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
889 if (bmap & 1) {
890 sprintf(string, "txf%d_ucast_octets", i);
891 string += ETH_GSTRING_LEN;
892 sprintf(string, "txf%d_ucast", i);
893 string += ETH_GSTRING_LEN;
894 sprintf(string, "txf%d_ucast_vlan", i);
895 string += ETH_GSTRING_LEN;
896 sprintf(string, "txf%d_mcast_octets", i);
897 string += ETH_GSTRING_LEN;
898 sprintf(string, "txf%d_mcast", i);
899 string += ETH_GSTRING_LEN;
900 sprintf(string, "txf%d_mcast_vlan", i);
901 string += ETH_GSTRING_LEN;
902 sprintf(string, "txf%d_bcast_octets", i);
903 string += ETH_GSTRING_LEN;
904 sprintf(string, "txf%d_bcast", i);
905 string += ETH_GSTRING_LEN;
906 sprintf(string, "txf%d_bcast_vlan", i);
907 string += ETH_GSTRING_LEN;
908 sprintf(string, "txf%d_errors", i);
909 string += ETH_GSTRING_LEN;
910 sprintf(string, "txf%d_filter_vlan", i);
911 string += ETH_GSTRING_LEN;
912 sprintf(string, "txf%d_filter_mac_sa", i);
913 string += ETH_GSTRING_LEN;
914 }
915 bmap >>= 1;
916 }
917
918 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
919 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
920 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
921 if (bmap & 1) {
922 sprintf(string, "rxf%d_ucast_octets", i);
923 string += ETH_GSTRING_LEN;
924 sprintf(string, "rxf%d_ucast", i);
925 string += ETH_GSTRING_LEN;
926 sprintf(string, "rxf%d_ucast_vlan", i);
927 string += ETH_GSTRING_LEN;
928 sprintf(string, "rxf%d_mcast_octets", i);
929 string += ETH_GSTRING_LEN;
930 sprintf(string, "rxf%d_mcast", i);
931 string += ETH_GSTRING_LEN;
932 sprintf(string, "rxf%d_mcast_vlan", i);
933 string += ETH_GSTRING_LEN;
934 sprintf(string, "rxf%d_bcast_octets", i);
935 string += ETH_GSTRING_LEN;
936 sprintf(string, "rxf%d_bcast", i);
937 string += ETH_GSTRING_LEN;
938 sprintf(string, "rxf%d_bcast_vlan", i);
939 string += ETH_GSTRING_LEN;
940 sprintf(string, "rxf%d_frame_drops", i);
941 string += ETH_GSTRING_LEN;
942 }
943 bmap >>= 1;
944 }
945
946 q_num = 0;
947 for (i = 0; i < bnad->num_rx; i++) {
948 if (!bnad->rx_info[i].rx)
949 continue;
950 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
951 sprintf(string, "cq%d_producer_index", q_num);
952 string += ETH_GSTRING_LEN;
953 sprintf(string, "cq%d_consumer_index", q_num);
954 string += ETH_GSTRING_LEN;
955 sprintf(string, "cq%d_hw_producer_index",
956 q_num);
957 string += ETH_GSTRING_LEN;
958 q_num++;
959 }
960 }
961
962 q_num = 0;
963 for (i = 0; i < bnad->num_rx; i++) {
964 if (!bnad->rx_info[i].rx)
965 continue;
966 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
967 sprintf(string, "rxq%d_packets", q_num);
968 string += ETH_GSTRING_LEN;
969 sprintf(string, "rxq%d_bytes", q_num);
970 string += ETH_GSTRING_LEN;
971 sprintf(string, "rxq%d_packets_with_error",
972 q_num);
973 string += ETH_GSTRING_LEN;
974 sprintf(string, "rxq%d_allocbuf_failed", q_num);
975 string += ETH_GSTRING_LEN;
976 sprintf(string, "rxq%d_producer_index", q_num);
977 string += ETH_GSTRING_LEN;
978 sprintf(string, "rxq%d_consumer_index", q_num);
979 string += ETH_GSTRING_LEN;
980 q_num++;
981 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
982 bnad->rx_info[i].rx_ctrl[j].ccb->
983 rcb[1] &&
984 bnad->rx_info[i].rx_ctrl[j].ccb->
985 rcb[1]->rxq) {
986 sprintf(string, "rxq%d_packets", q_num);
987 string += ETH_GSTRING_LEN;
988 sprintf(string, "rxq%d_bytes", q_num);
989 string += ETH_GSTRING_LEN;
990 sprintf(string,
991 "rxq%d_packets_with_error", q_num);
992 string += ETH_GSTRING_LEN;
993 sprintf(string, "rxq%d_allocbuf_failed",
994 q_num);
995 string += ETH_GSTRING_LEN;
996 sprintf(string, "rxq%d_producer_index",
997 q_num);
998 string += ETH_GSTRING_LEN;
999 sprintf(string, "rxq%d_consumer_index",
1000 q_num);
1001 string += ETH_GSTRING_LEN;
1002 q_num++;
1003 }
1004 }
1005 }
1006
1007 q_num = 0;
1008 for (i = 0; i < bnad->num_tx; i++) {
1009 if (!bnad->tx_info[i].tx)
1010 continue;
1011 for (j = 0; j < bnad->num_txq_per_tx; j++) {
1012 sprintf(string, "txq%d_packets", q_num);
1013 string += ETH_GSTRING_LEN;
1014 sprintf(string, "txq%d_bytes", q_num);
1015 string += ETH_GSTRING_LEN;
1016 sprintf(string, "txq%d_producer_index", q_num);
1017 string += ETH_GSTRING_LEN;
1018 sprintf(string, "txq%d_consumer_index", q_num);
1019 string += ETH_GSTRING_LEN;
1020 sprintf(string, "txq%d_hw_consumer_index",
1021 q_num);
1022 string += ETH_GSTRING_LEN;
1023 q_num++;
1024 }
1025 }
1026
1027 break;
1028
1029 default:
1030 break;
1031 }
1032
1033 mutex_unlock(&bnad->conf_mutex);
1034}
1035
1036static int
1037bnad_get_stats_count_locked(struct net_device *netdev)
1038{
1039 struct bnad *bnad = netdev_priv(netdev);
1040 int i, j, count, rxf_active_num = 0, txf_active_num = 0;
1041 u64 bmap;
1042
1043 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
1044 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
1045 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
1046 if (bmap & 1)
1047 txf_active_num++;
1048 bmap >>= 1;
1049 }
1050 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
1051 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
1052 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
1053 if (bmap & 1)
1054 rxf_active_num++;
1055 bmap >>= 1;
1056 }
1057 count = BNAD_ETHTOOL_STATS_NUM +
1058 txf_active_num * BNAD_NUM_TXF_COUNTERS +
1059 rxf_active_num * BNAD_NUM_RXF_COUNTERS;
1060
1061 for (i = 0; i < bnad->num_rx; i++) {
1062 if (!bnad->rx_info[i].rx)
1063 continue;
1064 count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
1065 count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
1066 for (j = 0; j < bnad->num_rxp_per_rx; j++)
1067 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
1068 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1069 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
1070 count += BNAD_NUM_RXQ_COUNTERS;
1071 }
1072
1073 for (i = 0; i < bnad->num_tx; i++) {
1074 if (!bnad->tx_info[i].tx)
1075 continue;
1076 count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
1077 }
1078 return count;
1079}
1080
1081static int
1082bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
1083{
1084 int i, j;
1085 struct bna_rcb *rcb = NULL;
1086 struct bna_tcb *tcb = NULL;
1087
1088 for (i = 0; i < bnad->num_rx; i++) {
1089 if (!bnad->rx_info[i].rx)
1090 continue;
1091 for (j = 0; j < bnad->num_rxp_per_rx; j++)
1092 if (bnad->rx_info[i].rx_ctrl[j].ccb &&
1093 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
1094 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
1095 buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
1096 ccb->producer_index;
1097 buf[bi++] = 0; /* ccb->consumer_index */
1098 buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
1099 ccb->hw_producer_index);
1100 }
1101 }
1102 for (i = 0; i < bnad->num_rx; i++) {
1103 if (!bnad->rx_info[i].rx)
1104 continue;
1105 for (j = 0; j < bnad->num_rxp_per_rx; j++)
1106 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1107 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
1108 bnad->rx_info[i].rx_ctrl[j].ccb->
1109 rcb[0]->rxq) {
1110 rcb = bnad->rx_info[i].rx_ctrl[j].
1111 ccb->rcb[0];
1112 buf[bi++] = rcb->rxq->rx_packets;
1113 buf[bi++] = rcb->rxq->rx_bytes;
1114 buf[bi++] = rcb->rxq->
1115 rx_packets_with_error;
1116 buf[bi++] = rcb->rxq->
1117 rxbuf_alloc_failed;
1118 buf[bi++] = rcb->producer_index;
1119 buf[bi++] = rcb->consumer_index;
1120 }
1121 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1122 bnad->rx_info[i].rx_ctrl[j].ccb->
1123 rcb[1]->rxq) {
1124 rcb = bnad->rx_info[i].rx_ctrl[j].
1125 ccb->rcb[1];
1126 buf[bi++] = rcb->rxq->rx_packets;
1127 buf[bi++] = rcb->rxq->rx_bytes;
1128 buf[bi++] = rcb->rxq->
1129 rx_packets_with_error;
1130 buf[bi++] = rcb->rxq->
1131 rxbuf_alloc_failed;
1132 buf[bi++] = rcb->producer_index;
1133 buf[bi++] = rcb->consumer_index;
1134 }
1135 }
1136 }
1137
1138 for (i = 0; i < bnad->num_tx; i++) {
1139 if (!bnad->tx_info[i].tx)
1140 continue;
1141 for (j = 0; j < bnad->num_txq_per_tx; j++)
1142 if (bnad->tx_info[i].tcb[j] &&
1143 bnad->tx_info[i].tcb[j]->txq) {
1144 tcb = bnad->tx_info[i].tcb[j];
1145 buf[bi++] = tcb->txq->tx_packets;
1146 buf[bi++] = tcb->txq->tx_bytes;
1147 buf[bi++] = tcb->producer_index;
1148 buf[bi++] = tcb->consumer_index;
1149 buf[bi++] = *(tcb->hw_consumer_index);
1150 }
1151 }
1152
1153 return bi;
1154}
1155
1156static void
1157bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
1158 u64 *buf)
1159{
1160 struct bnad *bnad = netdev_priv(netdev);
1161 int i, j, bi;
1162 unsigned long flags;
1163 struct rtnl_link_stats64 *net_stats64;
1164 u64 *stats64;
1165 u64 bmap;
1166
1167 mutex_lock(&bnad->conf_mutex);
1168 if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
1169 mutex_unlock(&bnad->conf_mutex);
1170 return;
1171 }
1172
1173 /*
1174 * Used bna_lock to sync reads from bna_stats, which is written
1175 * under the same lock
1176 */
1177 spin_lock_irqsave(&bnad->bna_lock, flags);
1178 bi = 0;
1179 memset(buf, 0, stats->n_stats * sizeof(u64));
1180
1181 net_stats64 = (struct rtnl_link_stats64 *)buf;
1182 bnad_netdev_qstats_fill(bnad, net_stats64);
1183 bnad_netdev_hwstats_fill(bnad, net_stats64);
1184
1185 bi = sizeof(*net_stats64) / sizeof(u64);
1186
1187 /* Fill driver stats into ethtool buffers */
1188 stats64 = (u64 *)&bnad->stats.drv_stats;
1189 for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
1190 buf[bi++] = stats64[i];
1191
1192 /* Fill hardware stats excluding the rxf/txf into ethtool bufs */
1193 stats64 = (u64 *) bnad->stats.bna_stats->hw_stats;
1194 for (i = 0;
1195 i < offsetof(struct bfi_ll_stats, rxf_stats[0]) / sizeof(u64);
1196 i++)
1197 buf[bi++] = stats64[i];
1198
1199 /* Fill txf stats into ethtool buffers */
1200 bmap = (u64)bnad->bna.tx_mod.txf_bmap[0] |
1201 ((u64)bnad->bna.tx_mod.txf_bmap[1] << 32);
1202 for (i = 0; bmap && (i < BFI_LL_TXF_ID_MAX); i++) {
1203 if (bmap & 1) {
1204 stats64 = (u64 *)&bnad->stats.bna_stats->
1205 hw_stats->txf_stats[i];
1206 for (j = 0; j < sizeof(struct bfi_ll_stats_txf) /
1207 sizeof(u64); j++)
1208 buf[bi++] = stats64[j];
1209 }
1210 bmap >>= 1;
1211 }
1212
1213 /* Fill rxf stats into ethtool buffers */
1214 bmap = (u64)bnad->bna.rx_mod.rxf_bmap[0] |
1215 ((u64)bnad->bna.rx_mod.rxf_bmap[1] << 32);
1216 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
1217 if (bmap & 1) {
1218 stats64 = (u64 *)&bnad->stats.bna_stats->
1219 hw_stats->rxf_stats[i];
1220 for (j = 0; j < sizeof(struct bfi_ll_stats_rxf) /
1221 sizeof(u64); j++)
1222 buf[bi++] = stats64[j];
1223 }
1224 bmap >>= 1;
1225 }
1226
1227 /* Fill per Q stats into ethtool buffers */
1228 bi = bnad_per_q_stats_fill(bnad, buf, bi);
1229
1230 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1231
1232 mutex_unlock(&bnad->conf_mutex);
1233}
1234
1235static int
1236bnad_get_sset_count(struct net_device *netdev, int sset)
1237{
1238 switch (sset) {
1239 case ETH_SS_STATS:
1240 return bnad_get_stats_count_locked(netdev);
1241 default:
1242 return -EOPNOTSUPP;
1243 }
1244}
1245
1246static struct ethtool_ops bnad_ethtool_ops = {
1247 .get_settings = bnad_get_settings,
1248 .set_settings = bnad_set_settings,
1249 .get_drvinfo = bnad_get_drvinfo,
1250 .get_regs_len = bnad_get_regs_len,
1251 .get_regs = bnad_get_regs,
1252 .get_wol = bnad_get_wol,
1253 .get_link = ethtool_op_get_link,
1254 .get_coalesce = bnad_get_coalesce,
1255 .set_coalesce = bnad_set_coalesce,
1256 .get_ringparam = bnad_get_ringparam,
1257 .set_ringparam = bnad_set_ringparam,
1258 .get_pauseparam = bnad_get_pauseparam,
1259 .set_pauseparam = bnad_set_pauseparam,
1260 .get_rx_csum = bnad_get_rx_csum,
1261 .set_rx_csum = bnad_set_rx_csum,
1262 .get_tx_csum = ethtool_op_get_tx_csum,
1263 .set_tx_csum = bnad_set_tx_csum,
1264 .get_sg = ethtool_op_get_sg,
1265 .set_sg = ethtool_op_set_sg,
1266 .get_tso = ethtool_op_get_tso,
1267 .set_tso = bnad_set_tso,
1268 .get_strings = bnad_get_strings,
1269 .get_ethtool_stats = bnad_get_ethtool_stats,
1270 .get_sset_count = bnad_get_sset_count
1271};
1272
1273void
1274bnad_set_ethtool_ops(struct net_device *netdev)
1275{
1276 SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
1277}
diff --git a/drivers/net/bna/cna.h b/drivers/net/bna/cna.h
new file mode 100644
index 000000000000..bbd39dc65972
--- /dev/null
+++ b/drivers/net/bna/cna.h
@@ -0,0 +1,81 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2006-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#ifndef __CNA_H__
20#define __CNA_H__
21
22#include <linux/version.h>
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/bitops.h>
28#include <linux/timer.h>
29#include <linux/interrupt.h>
30#include <linux/if_ether.h>
31#include <asm/page.h>
32#include <asm/io.h>
33#include <asm/string.h>
34
35#include <linux/list.h>
36
37#define bfa_sm_fault(__mod, __event) do { \
38 pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
39 __event); \
40} while (0)
41
42extern char bfa_version[];
43
44#define CNA_FW_FILE_CT "ctfw_cna.bin"
45#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
46
47#pragma pack(1)
48
49#define MAC_ADDRLEN (6)
50typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
51
52#pragma pack()
53
54#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
55#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
56#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
57
58/*
59 * bfa_q_qe_init - to initialize a queue element
60 */
61#define bfa_q_qe_init(_qe) { \
62 bfa_q_next(_qe) = (struct list_head *) NULL; \
63 bfa_q_prev(_qe) = (struct list_head *) NULL; \
64}
65
66/*
67 * bfa_q_deq - dequeue an element from head of the queue
68 */
69#define bfa_q_deq(_q, _qe) { \
70 if (!list_empty(_q)) { \
71 (*((struct list_head **) (_qe))) = bfa_q_next(_q); \
72 bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \
73 (struct list_head *) (_q); \
74 bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe)); \
75 bfa_q_qe_init(*((struct list_head **) _qe)); \
76 } else { \
77 *((struct list_head **) (_qe)) = (struct list_head *) NULL; \
78 } \
79}
80
81#endif /* __CNA_H__ */
diff --git a/drivers/net/bna/cna_fwimg.c b/drivers/net/bna/cna_fwimg.c
new file mode 100644
index 000000000000..0bd1d3790a27
--- /dev/null
+++ b/drivers/net/bna/cna_fwimg.c
@@ -0,0 +1,64 @@
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include <linux/firmware.h>
19#include "cna.h"
20
21const struct firmware *bfi_fw;
22static u32 *bfi_image_ct_cna;
23static u32 bfi_image_ct_cna_size;
24
25u32 *
26cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
27 u32 *bfi_image_size, char *fw_name)
28{
29 const struct firmware *fw;
30
31 if (request_firmware(&fw, fw_name, &pdev->dev)) {
32 pr_alert("Can't locate firmware %s\n", fw_name);
33 goto error;
34 }
35
36 *bfi_image = (u32 *)fw->data;
37 *bfi_image_size = fw->size/sizeof(u32);
38 bfi_fw = fw;
39
40 return *bfi_image;
41error:
42 return NULL;
43}
44
45u32 *
46cna_get_firmware_buf(struct pci_dev *pdev)
47{
48 if (bfi_image_ct_cna_size == 0)
49 cna_read_firmware(pdev, &bfi_image_ct_cna,
50 &bfi_image_ct_cna_size, CNA_FW_FILE_CT);
51 return bfi_image_ct_cna;
52}
53
54u32 *
55bfa_cb_image_get_chunk(int type, u32 off)
56{
57 return (u32 *)(bfi_image_ct_cna + off);
58}
59
60u32
61bfa_cb_image_get_size(int type)
62{
63 return bfi_image_ct_cna_size;
64}
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index e6a803f1c507..3d1a5da98622 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -49,6 +49,7 @@
49#include <linux/cache.h> 49#include <linux/cache.h>
50#include <linux/firmware.h> 50#include <linux/firmware.h>
51#include <linux/log2.h> 51#include <linux/log2.h>
52#include <linux/aer.h>
52 53
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 54#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1 55#define BCM_CNIC 1
@@ -265,7 +266,7 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
265 if (diff == TX_DESC_CNT) 266 if (diff == TX_DESC_CNT)
266 diff = MAX_TX_DESC_CNT; 267 diff = MAX_TX_DESC_CNT;
267 } 268 }
268 return (bp->tx_ring_size - diff); 269 return bp->tx_ring_size - diff;
269} 270}
270 271
271static u32 272static u32
@@ -298,7 +299,7 @@ bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
298static u32 299static u32
299bnx2_shmem_rd(struct bnx2 *bp, u32 offset) 300bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300{ 301{
301 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset)); 302 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
302} 303}
303 304
304static void 305static void
@@ -976,9 +977,9 @@ bnx2_report_fw_link(struct bnx2 *bp)
976static char * 977static char *
977bnx2_xceiver_str(struct bnx2 *bp) 978bnx2_xceiver_str(struct bnx2 *bp)
978{ 979{
979 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" : 980 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
980 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" : 981 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
981 "Copper")); 982 "Copper");
982} 983}
983 984
984static void 985static void
@@ -1757,7 +1758,7 @@ __acquires(&bp->phy_lock)
1757 u32 new_adv = 0; 1758 u32 new_adv = 0;
1758 1759
1759 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) 1760 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1760 return (bnx2_setup_remote_phy(bp, port)); 1761 return bnx2_setup_remote_phy(bp, port);
1761 1762
1762 if (!(bp->autoneg & AUTONEG_SPEED)) { 1763 if (!(bp->autoneg & AUTONEG_SPEED)) {
1763 u32 new_bmcr; 1764 u32 new_bmcr;
@@ -2170,10 +2171,10 @@ __acquires(&bp->phy_lock)
2170 return 0; 2171 return 0;
2171 2172
2172 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { 2173 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2173 return (bnx2_setup_serdes_phy(bp, port)); 2174 return bnx2_setup_serdes_phy(bp, port);
2174 } 2175 }
2175 else { 2176 else {
2176 return (bnx2_setup_copper_phy(bp)); 2177 return bnx2_setup_copper_phy(bp);
2177 } 2178 }
2178} 2179}
2179 2180
@@ -3217,7 +3218,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3217 3218
3218 } 3219 }
3219 3220
3220 skb->ip_summed = CHECKSUM_NONE; 3221 skb_checksum_none_assert(skb);
3221 if (bp->rx_csum && 3222 if (bp->rx_csum &&
3222 (status & (L2_FHDR_STATUS_TCP_SEGMENT | 3223 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3223 L2_FHDR_STATUS_UDP_DATAGRAM))) { 3224 L2_FHDR_STATUS_UDP_DATAGRAM))) {
@@ -7581,9 +7582,9 @@ bnx2_set_tx_csum(struct net_device *dev, u32 data)
7581 struct bnx2 *bp = netdev_priv(dev); 7582 struct bnx2 *bp = netdev_priv(dev);
7582 7583
7583 if (CHIP_NUM(bp) == CHIP_NUM_5709) 7584 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7584 return (ethtool_op_set_tx_ipv6_csum(dev, data)); 7585 return ethtool_op_set_tx_ipv6_csum(dev, data);
7585 else 7586 else
7586 return (ethtool_op_set_tx_csum(dev, data)); 7587 return ethtool_op_set_tx_csum(dev, data);
7587} 7588}
7588 7589
7589static int 7590static int
@@ -7704,7 +7705,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
7704 return -EINVAL; 7705 return -EINVAL;
7705 7706
7706 dev->mtu = new_mtu; 7707 dev->mtu = new_mtu;
7707 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size)); 7708 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
7708} 7709}
7709 7710
7710#ifdef CONFIG_NET_POLL_CONTROLLER 7711#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -7890,6 +7891,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7890 int rc, i, j; 7891 int rc, i, j;
7891 u32 reg; 7892 u32 reg;
7892 u64 dma_mask, persist_dma_mask; 7893 u64 dma_mask, persist_dma_mask;
7894 int err;
7893 7895
7894 SET_NETDEV_DEV(dev, &pdev->dev); 7896 SET_NETDEV_DEV(dev, &pdev->dev);
7895 bp = netdev_priv(dev); 7897 bp = netdev_priv(dev);
@@ -7925,6 +7927,14 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7925 goto err_out_disable; 7927 goto err_out_disable;
7926 } 7928 }
7927 7929
7930 /* AER (Advanced Error Reporting) hooks */
7931 err = pci_enable_pcie_error_reporting(pdev);
7932 if (err) {
7933 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
7934 "0x%x\n", err);
7935 /* non-fatal, continue */
7936 }
7937
7928 pci_set_master(pdev); 7938 pci_set_master(pdev);
7929 pci_save_state(pdev); 7939 pci_save_state(pdev);
7930 7940
@@ -8246,6 +8256,7 @@ err_out_unmap:
8246 } 8256 }
8247 8257
8248err_out_release: 8258err_out_release:
8259 pci_disable_pcie_error_reporting(pdev);
8249 pci_release_regions(pdev); 8260 pci_release_regions(pdev);
8250 8261
8251err_out_disable: 8262err_out_disable:
@@ -8436,6 +8447,9 @@ bnx2_remove_one(struct pci_dev *pdev)
8436 kfree(bp->temp_stats_blk); 8447 kfree(bp->temp_stats_blk);
8437 8448
8438 free_netdev(dev); 8449 free_netdev(dev);
8450
8451 pci_disable_pcie_error_reporting(pdev);
8452
8439 pci_release_regions(pdev); 8453 pci_release_regions(pdev);
8440 pci_disable_device(pdev); 8454 pci_disable_device(pdev);
8441 pci_set_drvdata(pdev, NULL); 8455 pci_set_drvdata(pdev, NULL);
@@ -8527,25 +8541,35 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8527{ 8541{
8528 struct net_device *dev = pci_get_drvdata(pdev); 8542 struct net_device *dev = pci_get_drvdata(pdev);
8529 struct bnx2 *bp = netdev_priv(dev); 8543 struct bnx2 *bp = netdev_priv(dev);
8544 pci_ers_result_t result;
8545 int err;
8530 8546
8531 rtnl_lock(); 8547 rtnl_lock();
8532 if (pci_enable_device(pdev)) { 8548 if (pci_enable_device(pdev)) {
8533 dev_err(&pdev->dev, 8549 dev_err(&pdev->dev,
8534 "Cannot re-enable PCI device after reset\n"); 8550 "Cannot re-enable PCI device after reset\n");
8535 rtnl_unlock(); 8551 result = PCI_ERS_RESULT_DISCONNECT;
8536 return PCI_ERS_RESULT_DISCONNECT; 8552 } else {
8553 pci_set_master(pdev);
8554 pci_restore_state(pdev);
8555 pci_save_state(pdev);
8556
8557 if (netif_running(dev)) {
8558 bnx2_set_power_state(bp, PCI_D0);
8559 bnx2_init_nic(bp, 1);
8560 }
8561 result = PCI_ERS_RESULT_RECOVERED;
8537 } 8562 }
8538 pci_set_master(pdev); 8563 rtnl_unlock();
8539 pci_restore_state(pdev);
8540 pci_save_state(pdev);
8541 8564
8542 if (netif_running(dev)) { 8565 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8543 bnx2_set_power_state(bp, PCI_D0); 8566 if (err) {
8544 bnx2_init_nic(bp, 1); 8567 dev_err(&pdev->dev,
8568 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8569 err); /* non-fatal, continue */
8545 } 8570 }
8546 8571
8547 rtnl_unlock(); 8572 return result;
8548 return PCI_ERS_RESULT_RECOVERED;
8549} 8573}
8550 8574
8551/** 8575/**
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 0c2d96ed561c..64329c5fbdea 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#define DRV_MODULE_VERSION "1.52.53-4" 23#define DRV_MODULE_VERSION "1.52.53-7"
24#define DRV_MODULE_RELDATE "2010/16/08" 24#define DRV_MODULE_RELDATE "2010/09/12"
25#define BNX2X_BC_VER 0x040200 25#define BNX2X_BC_VER 0x040200
26 26
27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -369,6 +369,7 @@ struct bnx2x_fastpath {
369#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) 369#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
370#define MAX_RX_BD (NUM_RX_BD - 1) 370#define MAX_RX_BD (NUM_RX_BD - 1)
371#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 371#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
372#define MIN_RX_AVAIL 128
372#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 373#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
373 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 374 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
374#define RX_BD(x) ((x) & MAX_RX_BD) 375#define RX_BD(x) ((x) & MAX_RX_BD)
@@ -566,13 +567,13 @@ struct bnx2x_common {
566struct bnx2x_port { 567struct bnx2x_port {
567 u32 pmf; 568 u32 pmf;
568 569
569 u32 link_config; 570 u32 link_config[LINK_CONFIG_SIZE];
570 571
571 u32 supported; 572 u32 supported[LINK_CONFIG_SIZE];
572/* link settings - missing defines */ 573/* link settings - missing defines */
573#define SUPPORTED_2500baseX_Full (1 << 15) 574#define SUPPORTED_2500baseX_Full (1 << 15)
574 575
575 u32 advertising; 576 u32 advertising[LINK_CONFIG_SIZE];
576/* link settings - missing defines */ 577/* link settings - missing defines */
577#define ADVERTISED_2500baseX_Full (1 << 15) 578#define ADVERTISED_2500baseX_Full (1 << 15)
578 579
@@ -931,7 +932,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
931int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); 932int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
932int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 933int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
933int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 934int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
934u32 bnx2x_fw_command(struct bnx2x *bp, u32 command); 935u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
935void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); 936void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
936void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 937void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
937 u32 addr, u32 len); 938 u32 addr, u32 len);
@@ -939,7 +940,7 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp);
939int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 940int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
940 u32 data_hi, u32 data_lo, int common); 941 u32 data_hi, u32 data_lo, int common);
941void bnx2x_update_coalesce(struct bnx2x *bp); 942void bnx2x_update_coalesce(struct bnx2x *bp);
942 943int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
943static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 944static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
944 int wait) 945 int wait)
945{ 946{
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 02bf710629a3..efc7be4aefb5 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -20,6 +20,7 @@
20#include <linux/ip.h> 20#include <linux/ip.h>
21#include <linux/ipv6.h> 21#include <linux/ipv6.h>
22#include <net/ip6_checksum.h> 22#include <net/ip6_checksum.h>
23#include <linux/firmware.h>
23#include "bnx2x_cmn.h" 24#include "bnx2x_cmn.h"
24 25
25#ifdef BCM_VLAN 26#ifdef BCM_VLAN
@@ -622,7 +623,7 @@ reuse_rx:
622 /* Set Toeplitz hash for a none-LRO skb */ 623 /* Set Toeplitz hash for a none-LRO skb */
623 bnx2x_set_skb_rxhash(bp, cqe, skb); 624 bnx2x_set_skb_rxhash(bp, cqe, skb);
624 625
625 skb->ip_summed = CHECKSUM_NONE; 626 skb_checksum_none_assert(skb);
626 if (bp->rx_csum) { 627 if (bp->rx_csum) {
627 if (likely(BNX2X_RX_CSUM_OK(cqe))) 628 if (likely(BNX2X_RX_CSUM_OK(cqe)))
628 skb->ip_summed = CHECKSUM_UNNECESSARY; 629 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -780,6 +781,10 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
780 ETH_MAX_AGGREGATION_QUEUES_E1H; 781 ETH_MAX_AGGREGATION_QUEUES_E1H;
781 u16 ring_prod, cqe_ring_prod; 782 u16 ring_prod, cqe_ring_prod;
782 int i, j; 783 int i, j;
784 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
785 MAX_RX_AVAIL/bp->num_queues;
786
787 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
783 788
784 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN; 789 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
785 DP(NETIF_MSG_IFUP, 790 DP(NETIF_MSG_IFUP,
@@ -882,7 +887,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
882 /* Allocate BDs and initialize BD ring */ 887 /* Allocate BDs and initialize BD ring */
883 fp->rx_comp_cons = 0; 888 fp->rx_comp_cons = 0;
884 cqe_ring_prod = ring_prod = 0; 889 cqe_ring_prod = ring_prod = 0;
885 for (i = 0; i < bp->rx_ring_size; i++) { 890 for (i = 0; i < rx_ring_size; i++) {
886 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { 891 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
887 BNX2X_ERR("was only able to allocate " 892 BNX2X_ERR("was only able to allocate "
888 "%d rx skbs on queue[%d]\n", i, j); 893 "%d rx skbs on queue[%d]\n", i, j);
@@ -1206,12 +1211,27 @@ static int bnx2x_set_num_queues(struct bnx2x *bp)
1206 return rc; 1211 return rc;
1207} 1212}
1208 1213
1214static void bnx2x_release_firmware(struct bnx2x *bp)
1215{
1216 kfree(bp->init_ops_offsets);
1217 kfree(bp->init_ops);
1218 kfree(bp->init_data);
1219 release_firmware(bp->firmware);
1220}
1221
1209/* must be called with rtnl_lock */ 1222/* must be called with rtnl_lock */
1210int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1223int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1211{ 1224{
1212 u32 load_code; 1225 u32 load_code;
1213 int i, rc; 1226 int i, rc;
1214 1227
1228 /* Set init arrays */
1229 rc = bnx2x_init_firmware(bp);
1230 if (rc) {
1231 BNX2X_ERR("Error loading firmware\n");
1232 return rc;
1233 }
1234
1215#ifdef BNX2X_STOP_ON_ERROR 1235#ifdef BNX2X_STOP_ON_ERROR
1216 if (unlikely(bp->panic)) 1236 if (unlikely(bp->panic))
1217 return -EPERM; 1237 return -EPERM;
@@ -1267,7 +1287,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1267 common blocks should be initialized, otherwise - not 1287 common blocks should be initialized, otherwise - not
1268 */ 1288 */
1269 if (!BP_NOMCP(bp)) { 1289 if (!BP_NOMCP(bp)) {
1270 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 1290 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1271 if (!load_code) { 1291 if (!load_code) {
1272 BNX2X_ERR("MCP response failure, aborting\n"); 1292 BNX2X_ERR("MCP response failure, aborting\n");
1273 rc = -EBUSY; 1293 rc = -EBUSY;
@@ -1306,9 +1326,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1306 rc = bnx2x_init_hw(bp, load_code); 1326 rc = bnx2x_init_hw(bp, load_code);
1307 if (rc) { 1327 if (rc) {
1308 BNX2X_ERR("HW init failed, aborting\n"); 1328 BNX2X_ERR("HW init failed, aborting\n");
1309 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 1329 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1310 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); 1330 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1311 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 1331 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1312 goto load_error2; 1332 goto load_error2;
1313 } 1333 }
1314 1334
@@ -1323,7 +1343,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1323 1343
1324 /* Send LOAD_DONE command to MCP */ 1344 /* Send LOAD_DONE command to MCP */
1325 if (!BP_NOMCP(bp)) { 1345 if (!BP_NOMCP(bp)) {
1326 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 1346 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1327 if (!load_code) { 1347 if (!load_code) {
1328 BNX2X_ERR("MCP response failure, aborting\n"); 1348 BNX2X_ERR("MCP response failure, aborting\n");
1329 rc = -EBUSY; 1349 rc = -EBUSY;
@@ -1427,6 +1447,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1427#endif 1447#endif
1428 bnx2x_inc_load_cnt(bp); 1448 bnx2x_inc_load_cnt(bp);
1429 1449
1450 bnx2x_release_firmware(bp);
1451
1430 return 0; 1452 return 0;
1431 1453
1432#ifdef BCM_CNIC 1454#ifdef BCM_CNIC
@@ -1437,8 +1459,8 @@ load_error4:
1437load_error3: 1459load_error3:
1438 bnx2x_int_disable_sync(bp, 1); 1460 bnx2x_int_disable_sync(bp, 1);
1439 if (!BP_NOMCP(bp)) { 1461 if (!BP_NOMCP(bp)) {
1440 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP); 1462 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1441 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 1463 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1442 } 1464 }
1443 bp->port.pmf = 0; 1465 bp->port.pmf = 0;
1444 /* Free SKBs, SGEs, TPA pool and driver internals */ 1466 /* Free SKBs, SGEs, TPA pool and driver internals */
@@ -1454,6 +1476,8 @@ load_error1:
1454 netif_napi_del(&bnx2x_fp(bp, i, napi)); 1476 netif_napi_del(&bnx2x_fp(bp, i, napi));
1455 bnx2x_free_mem(bp); 1477 bnx2x_free_mem(bp);
1456 1478
1479 bnx2x_release_firmware(bp);
1480
1457 return rc; 1481 return rc;
1458} 1482}
1459 1483
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index d1979b1a7ed2..1ad08e4e88f4 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -49,10 +49,11 @@ void bnx2x_link_set(struct bnx2x *bp);
49 * Query link status 49 * Query link status
50 * 50 *
51 * @param bp 51 * @param bp
52 * @param is_serdes
52 * 53 *
53 * @return 0 - link is UP 54 * @return 0 - link is UP
54 */ 55 */
55u8 bnx2x_link_test(struct bnx2x *bp); 56u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
56 57
57/** 58/**
58 * Handles link status change 59 * Handles link status change
@@ -115,6 +116,15 @@ void bnx2x_int_enable(struct bnx2x *bp);
115void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 116void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
116 117
117/** 118/**
119 * Loads device firmware
120 *
121 * @param bp
122 *
123 * @return int
124 */
125int bnx2x_init_firmware(struct bnx2x *bp);
126
127/**
118 * Init HW blocks according to current initialization stage: 128 * Init HW blocks according to current initialization stage:
119 * COMMON, PORT or FUNCTION. 129 * COMMON, PORT or FUNCTION.
120 * 130 *
@@ -389,7 +399,7 @@ static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
389{ 399{
390 /* Tell compiler that consumer and producer can change */ 400 /* Tell compiler that consumer and producer can change */
391 barrier(); 401 barrier();
392 return (fp->tx_pkt_prod != fp->tx_pkt_cons); 402 return fp->tx_pkt_prod != fp->tx_pkt_cons;
393} 403}
394 404
395static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) 405static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
@@ -622,7 +632,7 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
622 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 632 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
623 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 633 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
624 rx_cons_sb++; 634 rx_cons_sb++;
625 return (fp->rx_comp_cons != rx_cons_sb); 635 return fp->rx_comp_cons != rx_cons_sb;
626} 636}
627 637
628/* HW Lock for shared dual port PHYs */ 638/* HW Lock for shared dual port PHYs */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 8b75b05e34c5..d9748e97fad3 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -29,9 +29,12 @@
29static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 29static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
30{ 30{
31 struct bnx2x *bp = netdev_priv(dev); 31 struct bnx2x *bp = netdev_priv(dev);
32 32 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
33 cmd->supported = bp->port.supported; 33 /* Dual Media boards present all available port types */
34 cmd->advertising = bp->port.advertising; 34 cmd->supported = bp->port.supported[cfg_idx] |
35 (bp->port.supported[cfg_idx ^ 1] &
36 (SUPPORTED_TP | SUPPORTED_FIBRE));
37 cmd->advertising = bp->port.advertising[cfg_idx];
35 38
36 if ((bp->state == BNX2X_STATE_OPEN) && 39 if ((bp->state == BNX2X_STATE_OPEN) &&
37 !(bp->flags & MF_FUNC_DIS) && 40 !(bp->flags & MF_FUNC_DIS) &&
@@ -48,47 +51,21 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
48 cmd->speed = vn_max_rate; 51 cmd->speed = vn_max_rate;
49 } 52 }
50 } else { 53 } else {
51 cmd->speed = -1; 54 cmd->speed = bp->link_params.req_line_speed[cfg_idx];
52 cmd->duplex = -1; 55 cmd->duplex = bp->link_params.req_duplex[cfg_idx];
53 } 56 }
54 57
55 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { 58 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
56 u32 ext_phy_type =
57 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
58
59 switch (ext_phy_type) {
60 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
61 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
62 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
63 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
64 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
65 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
66 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
67 cmd->port = PORT_FIBRE;
68 break;
69
70 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
71 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
72 cmd->port = PORT_TP;
73 break;
74
75 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
76 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
77 bp->link_params.ext_phy_config);
78 break;
79
80 default:
81 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
82 bp->link_params.ext_phy_config);
83 break;
84 }
85 } else
86 cmd->port = PORT_TP; 59 cmd->port = PORT_TP;
60 else if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
61 cmd->port = PORT_FIBRE;
62 else
63 BNX2X_ERR("XGXS PHY Failure detected\n");
87 64
88 cmd->phy_address = bp->mdio.prtad; 65 cmd->phy_address = bp->mdio.prtad;
89 cmd->transceiver = XCVR_INTERNAL; 66 cmd->transceiver = XCVR_INTERNAL;
90 67
91 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) 68 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
92 cmd->autoneg = AUTONEG_ENABLE; 69 cmd->autoneg = AUTONEG_ENABLE;
93 else 70 else
94 cmd->autoneg = AUTONEG_DISABLE; 71 cmd->autoneg = AUTONEG_DISABLE;
@@ -110,7 +87,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
110static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 87static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
111{ 88{
112 struct bnx2x *bp = netdev_priv(dev); 89 struct bnx2x *bp = netdev_priv(dev);
113 u32 advertising; 90 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
114 91
115 if (IS_E1HMF(bp)) 92 if (IS_E1HMF(bp))
116 return 0; 93 return 0;
@@ -123,26 +100,81 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
123 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 100 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
124 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 101 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
125 102
103 cfg_idx = bnx2x_get_link_cfg_idx(bp);
104 old_multi_phy_config = bp->link_params.multi_phy_config;
105 switch (cmd->port) {
106 case PORT_TP:
107 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
108 break; /* no port change */
109
110 if (!(bp->port.supported[0] & SUPPORTED_TP ||
111 bp->port.supported[1] & SUPPORTED_TP)) {
112 DP(NETIF_MSG_LINK, "Unsupported port type\n");
113 return -EINVAL;
114 }
115 bp->link_params.multi_phy_config &=
116 ~PORT_HW_CFG_PHY_SELECTION_MASK;
117 if (bp->link_params.multi_phy_config &
118 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
119 bp->link_params.multi_phy_config |=
120 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
121 else
122 bp->link_params.multi_phy_config |=
123 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
124 break;
125 case PORT_FIBRE:
126 if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
127 break; /* no port change */
128
129 if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
130 bp->port.supported[1] & SUPPORTED_FIBRE)) {
131 DP(NETIF_MSG_LINK, "Unsupported port type\n");
132 return -EINVAL;
133 }
134 bp->link_params.multi_phy_config &=
135 ~PORT_HW_CFG_PHY_SELECTION_MASK;
136 if (bp->link_params.multi_phy_config &
137 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
138 bp->link_params.multi_phy_config |=
139 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
140 else
141 bp->link_params.multi_phy_config |=
142 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
143 break;
144 default:
145 DP(NETIF_MSG_LINK, "Unsupported port type\n");
146 return -EINVAL;
147 }
148 /* Save new config in case command complete successuly */
149 new_multi_phy_config = bp->link_params.multi_phy_config;
150 /* Get the new cfg_idx */
151 cfg_idx = bnx2x_get_link_cfg_idx(bp);
152 /* Restore old config in case command failed */
153 bp->link_params.multi_phy_config = old_multi_phy_config;
154 DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
155
126 if (cmd->autoneg == AUTONEG_ENABLE) { 156 if (cmd->autoneg == AUTONEG_ENABLE) {
127 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 157 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
128 DP(NETIF_MSG_LINK, "Autoneg not supported\n"); 158 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
129 return -EINVAL; 159 return -EINVAL;
130 } 160 }
131 161
132 /* advertise the requested speed and duplex if supported */ 162 /* advertise the requested speed and duplex if supported */
133 cmd->advertising &= bp->port.supported; 163 cmd->advertising &= bp->port.supported[cfg_idx];
134 164
135 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 165 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
136 bp->link_params.req_duplex = DUPLEX_FULL; 166 bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
137 bp->port.advertising |= (ADVERTISED_Autoneg | 167 bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
138 cmd->advertising); 168 cmd->advertising);
139 169
140 } else { /* forced speed */ 170 } else { /* forced speed */
141 /* advertise the requested speed and duplex if supported */ 171 /* advertise the requested speed and duplex if supported */
142 switch (cmd->speed) { 172 u32 speed = cmd->speed;
173 speed |= (cmd->speed_hi << 16);
174 switch (speed) {
143 case SPEED_10: 175 case SPEED_10:
144 if (cmd->duplex == DUPLEX_FULL) { 176 if (cmd->duplex == DUPLEX_FULL) {
145 if (!(bp->port.supported & 177 if (!(bp->port.supported[cfg_idx] &
146 SUPPORTED_10baseT_Full)) { 178 SUPPORTED_10baseT_Full)) {
147 DP(NETIF_MSG_LINK, 179 DP(NETIF_MSG_LINK,
148 "10M full not supported\n"); 180 "10M full not supported\n");
@@ -152,7 +184,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
152 advertising = (ADVERTISED_10baseT_Full | 184 advertising = (ADVERTISED_10baseT_Full |
153 ADVERTISED_TP); 185 ADVERTISED_TP);
154 } else { 186 } else {
155 if (!(bp->port.supported & 187 if (!(bp->port.supported[cfg_idx] &
156 SUPPORTED_10baseT_Half)) { 188 SUPPORTED_10baseT_Half)) {
157 DP(NETIF_MSG_LINK, 189 DP(NETIF_MSG_LINK,
158 "10M half not supported\n"); 190 "10M half not supported\n");
@@ -166,7 +198,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
166 198
167 case SPEED_100: 199 case SPEED_100:
168 if (cmd->duplex == DUPLEX_FULL) { 200 if (cmd->duplex == DUPLEX_FULL) {
169 if (!(bp->port.supported & 201 if (!(bp->port.supported[cfg_idx] &
170 SUPPORTED_100baseT_Full)) { 202 SUPPORTED_100baseT_Full)) {
171 DP(NETIF_MSG_LINK, 203 DP(NETIF_MSG_LINK,
172 "100M full not supported\n"); 204 "100M full not supported\n");
@@ -176,7 +208,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
176 advertising = (ADVERTISED_100baseT_Full | 208 advertising = (ADVERTISED_100baseT_Full |
177 ADVERTISED_TP); 209 ADVERTISED_TP);
178 } else { 210 } else {
179 if (!(bp->port.supported & 211 if (!(bp->port.supported[cfg_idx] &
180 SUPPORTED_100baseT_Half)) { 212 SUPPORTED_100baseT_Half)) {
181 DP(NETIF_MSG_LINK, 213 DP(NETIF_MSG_LINK,
182 "100M half not supported\n"); 214 "100M half not supported\n");
@@ -194,7 +226,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
194 return -EINVAL; 226 return -EINVAL;
195 } 227 }
196 228
197 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) { 229 if (!(bp->port.supported[cfg_idx] &
230 SUPPORTED_1000baseT_Full)) {
198 DP(NETIF_MSG_LINK, "1G full not supported\n"); 231 DP(NETIF_MSG_LINK, "1G full not supported\n");
199 return -EINVAL; 232 return -EINVAL;
200 } 233 }
@@ -210,7 +243,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
210 return -EINVAL; 243 return -EINVAL;
211 } 244 }
212 245
213 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) { 246 if (!(bp->port.supported[cfg_idx]
247 & SUPPORTED_2500baseX_Full)) {
214 DP(NETIF_MSG_LINK, 248 DP(NETIF_MSG_LINK,
215 "2.5G full not supported\n"); 249 "2.5G full not supported\n");
216 return -EINVAL; 250 return -EINVAL;
@@ -226,7 +260,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
226 return -EINVAL; 260 return -EINVAL;
227 } 261 }
228 262
229 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) { 263 if (!(bp->port.supported[cfg_idx]
264 & SUPPORTED_10000baseT_Full)) {
230 DP(NETIF_MSG_LINK, "10G full not supported\n"); 265 DP(NETIF_MSG_LINK, "10G full not supported\n");
231 return -EINVAL; 266 return -EINVAL;
232 } 267 }
@@ -236,20 +271,23 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
236 break; 271 break;
237 272
238 default: 273 default:
239 DP(NETIF_MSG_LINK, "Unsupported speed\n"); 274 DP(NETIF_MSG_LINK, "Unsupported speed %d\n", speed);
240 return -EINVAL; 275 return -EINVAL;
241 } 276 }
242 277
243 bp->link_params.req_line_speed = cmd->speed; 278 bp->link_params.req_line_speed[cfg_idx] = speed;
244 bp->link_params.req_duplex = cmd->duplex; 279 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
245 bp->port.advertising = advertising; 280 bp->port.advertising[cfg_idx] = advertising;
246 } 281 }
247 282
248 DP(NETIF_MSG_LINK, "req_line_speed %d\n" 283 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
249 DP_LEVEL " req_duplex %d advertising 0x%x\n", 284 DP_LEVEL " req_duplex %d advertising 0x%x\n",
250 bp->link_params.req_line_speed, bp->link_params.req_duplex, 285 bp->link_params.req_line_speed[cfg_idx],
251 bp->port.advertising); 286 bp->link_params.req_duplex[cfg_idx],
287 bp->port.advertising[cfg_idx]);
252 288
289 /* Set new config */
290 bp->link_params.multi_phy_config = new_multi_phy_config;
253 if (netif_running(dev)) { 291 if (netif_running(dev)) {
254 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 292 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
255 bnx2x_link_set(bp); 293 bnx2x_link_set(bp);
@@ -811,7 +849,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
811 struct bnx2x *bp = netdev_priv(dev); 849 struct bnx2x *bp = netdev_priv(dev);
812 int port = BP_PORT(bp); 850 int port = BP_PORT(bp);
813 int rc = 0; 851 int rc = 0;
814 852 u32 ext_phy_config;
815 if (!netif_running(dev)) 853 if (!netif_running(dev))
816 return -EAGAIN; 854 return -EAGAIN;
817 855
@@ -827,6 +865,10 @@ static int bnx2x_set_eeprom(struct net_device *dev,
827 !bp->port.pmf) 865 !bp->port.pmf)
828 return -EINVAL; 866 return -EINVAL;
829 867
868 ext_phy_config =
869 SHMEM_RD(bp,
870 dev_info.port_hw_config[port].external_phy_config);
871
830 if (eeprom->magic == 0x50485950) { 872 if (eeprom->magic == 0x50485950) {
831 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */ 873 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
832 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 874 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -834,7 +876,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
834 bnx2x_acquire_phy_lock(bp); 876 bnx2x_acquire_phy_lock(bp);
835 rc |= bnx2x_link_reset(&bp->link_params, 877 rc |= bnx2x_link_reset(&bp->link_params,
836 &bp->link_vars, 0); 878 &bp->link_vars, 0);
837 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == 879 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
838 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) 880 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
839 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 881 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
840 MISC_REGISTERS_GPIO_HIGH, port); 882 MISC_REGISTERS_GPIO_HIGH, port);
@@ -855,10 +897,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
855 } 897 }
856 } else if (eeprom->magic == 0x53985943) { 898 } else if (eeprom->magic == 0x53985943) {
857 /* 'PHYC' (0x53985943): PHY FW upgrade completed */ 899 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
858 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) == 900 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
859 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) { 901 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
860 u8 ext_phy_addr =
861 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
862 902
863 /* DSP Remove Download Mode */ 903 /* DSP Remove Download Mode */
864 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 904 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
@@ -866,7 +906,8 @@ static int bnx2x_set_eeprom(struct net_device *dev,
866 906
867 bnx2x_acquire_phy_lock(bp); 907 bnx2x_acquire_phy_lock(bp);
868 908
869 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); 909 bnx2x_sfx7101_sp_sw_reset(bp,
910 &bp->link_params.phy[EXT_PHY1]);
870 911
871 /* wait 0.5 sec to allow it to run */ 912 /* wait 0.5 sec to allow it to run */
872 msleep(500); 913 msleep(500);
@@ -920,7 +961,14 @@ static void bnx2x_get_ringparam(struct net_device *dev,
920 ering->rx_mini_max_pending = 0; 961 ering->rx_mini_max_pending = 0;
921 ering->rx_jumbo_max_pending = 0; 962 ering->rx_jumbo_max_pending = 0;
922 963
923 ering->rx_pending = bp->rx_ring_size; 964 if (bp->rx_ring_size)
965 ering->rx_pending = bp->rx_ring_size;
966 else
967 if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
968 ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
969 else
970 ering->rx_pending = MAX_RX_AVAIL;
971
924 ering->rx_mini_pending = 0; 972 ering->rx_mini_pending = 0;
925 ering->rx_jumbo_pending = 0; 973 ering->rx_jumbo_pending = 0;
926 974
@@ -940,6 +988,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
940 } 988 }
941 989
942 if ((ering->rx_pending > MAX_RX_AVAIL) || 990 if ((ering->rx_pending > MAX_RX_AVAIL) ||
991 (ering->rx_pending < MIN_RX_AVAIL) ||
943 (ering->tx_pending > MAX_TX_AVAIL) || 992 (ering->tx_pending > MAX_TX_AVAIL) ||
944 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) 993 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
945 return -EINVAL; 994 return -EINVAL;
@@ -959,10 +1008,9 @@ static void bnx2x_get_pauseparam(struct net_device *dev,
959 struct ethtool_pauseparam *epause) 1008 struct ethtool_pauseparam *epause)
960{ 1009{
961 struct bnx2x *bp = netdev_priv(dev); 1010 struct bnx2x *bp = netdev_priv(dev);
962 1011 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
963 epause->autoneg = (bp->link_params.req_flow_ctrl == 1012 epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
964 BNX2X_FLOW_CTRL_AUTO) && 1013 BNX2X_FLOW_CTRL_AUTO);
965 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
966 1014
967 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) == 1015 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
968 BNX2X_FLOW_CTRL_RX); 1016 BNX2X_FLOW_CTRL_RX);
@@ -978,7 +1026,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
978 struct ethtool_pauseparam *epause) 1026 struct ethtool_pauseparam *epause)
979{ 1027{
980 struct bnx2x *bp = netdev_priv(dev); 1028 struct bnx2x *bp = netdev_priv(dev);
981 1029 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
982 if (IS_E1HMF(bp)) 1030 if (IS_E1HMF(bp))
983 return 0; 1031 return 0;
984 1032
@@ -986,29 +1034,31 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
986 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", 1034 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
987 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); 1035 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
988 1036
989 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; 1037 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
990 1038
991 if (epause->rx_pause) 1039 if (epause->rx_pause)
992 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX; 1040 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
993 1041
994 if (epause->tx_pause) 1042 if (epause->tx_pause)
995 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX; 1043 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
996 1044
997 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) 1045 if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
998 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; 1046 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
999 1047
1000 if (epause->autoneg) { 1048 if (epause->autoneg) {
1001 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 1049 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
1002 DP(NETIF_MSG_LINK, "autoneg not supported\n"); 1050 DP(NETIF_MSG_LINK, "autoneg not supported\n");
1003 return -EINVAL; 1051 return -EINVAL;
1004 } 1052 }
1005 1053
1006 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) 1054 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
1007 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO; 1055 bp->link_params.req_flow_ctrl[cfg_idx] =
1056 BNX2X_FLOW_CTRL_AUTO;
1057 }
1008 } 1058 }
1009 1059
1010 DP(NETIF_MSG_LINK, 1060 DP(NETIF_MSG_LINK,
1011 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl); 1061 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
1012 1062
1013 if (netif_running(dev)) { 1063 if (netif_running(dev)) {
1014 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1064 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -1272,12 +1322,12 @@ test_mem_exit:
1272 return rc; 1322 return rc;
1273} 1323}
1274 1324
1275static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) 1325static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
1276{ 1326{
1277 int cnt = 1000; 1327 int cnt = 1000;
1278 1328
1279 if (link_up) 1329 if (link_up)
1280 while (bnx2x_link_test(bp) && cnt--) 1330 while (bnx2x_link_test(bp, is_serdes) && cnt--)
1281 msleep(10); 1331 msleep(10);
1282} 1332}
1283 1333
@@ -1304,7 +1354,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1304 /* check the loopback mode */ 1354 /* check the loopback mode */
1305 switch (loopback_mode) { 1355 switch (loopback_mode) {
1306 case BNX2X_PHY_LOOPBACK: 1356 case BNX2X_PHY_LOOPBACK:
1307 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10) 1357 if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
1308 return -EINVAL; 1358 return -EINVAL;
1309 break; 1359 break;
1310 case BNX2X_MAC_LOOPBACK: 1360 case BNX2X_MAC_LOOPBACK:
@@ -1549,7 +1599,7 @@ static void bnx2x_self_test(struct net_device *dev,
1549 struct ethtool_test *etest, u64 *buf) 1599 struct ethtool_test *etest, u64 *buf)
1550{ 1600{
1551 struct bnx2x *bp = netdev_priv(dev); 1601 struct bnx2x *bp = netdev_priv(dev);
1552 1602 u8 is_serdes;
1553 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 1603 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1554 printk(KERN_ERR "Handling parity error recovery. Try again later\n"); 1604 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1555 etest->flags |= ETH_TEST_FL_FAILED; 1605 etest->flags |= ETH_TEST_FL_FAILED;
@@ -1564,6 +1614,7 @@ static void bnx2x_self_test(struct net_device *dev,
1564 /* offline tests are not supported in MF mode */ 1614 /* offline tests are not supported in MF mode */
1565 if (IS_E1HMF(bp)) 1615 if (IS_E1HMF(bp))
1566 etest->flags &= ~ETH_TEST_FL_OFFLINE; 1616 etest->flags &= ~ETH_TEST_FL_OFFLINE;
1617 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
1567 1618
1568 if (etest->flags & ETH_TEST_FL_OFFLINE) { 1619 if (etest->flags & ETH_TEST_FL_OFFLINE) {
1569 int port = BP_PORT(bp); 1620 int port = BP_PORT(bp);
@@ -1575,11 +1626,12 @@ static void bnx2x_self_test(struct net_device *dev,
1575 /* disable input for TX port IF */ 1626 /* disable input for TX port IF */
1576 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); 1627 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
1577 1628
1578 link_up = (bnx2x_link_test(bp) == 0); 1629 link_up = bp->link_vars.link_up;
1630
1579 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 1631 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1580 bnx2x_nic_load(bp, LOAD_DIAG); 1632 bnx2x_nic_load(bp, LOAD_DIAG);
1581 /* wait until link state is restored */ 1633 /* wait until link state is restored */
1582 bnx2x_wait_for_link(bp, link_up); 1634 bnx2x_wait_for_link(bp, link_up, is_serdes);
1583 1635
1584 if (bnx2x_test_registers(bp) != 0) { 1636 if (bnx2x_test_registers(bp) != 0) {
1585 buf[0] = 1; 1637 buf[0] = 1;
@@ -1600,7 +1652,7 @@ static void bnx2x_self_test(struct net_device *dev,
1600 1652
1601 bnx2x_nic_load(bp, LOAD_NORMAL); 1653 bnx2x_nic_load(bp, LOAD_NORMAL);
1602 /* wait until link state is restored */ 1654 /* wait until link state is restored */
1603 bnx2x_wait_for_link(bp, link_up); 1655 bnx2x_wait_for_link(bp, link_up, is_serdes);
1604 } 1656 }
1605 if (bnx2x_test_nvram(bp) != 0) { 1657 if (bnx2x_test_nvram(bp) != 0) {
1606 buf[3] = 1; 1658 buf[3] = 1;
@@ -1611,7 +1663,7 @@ static void bnx2x_self_test(struct net_device *dev,
1611 etest->flags |= ETH_TEST_FL_FAILED; 1663 etest->flags |= ETH_TEST_FL_FAILED;
1612 } 1664 }
1613 if (bp->port.pmf) 1665 if (bp->port.pmf)
1614 if (bnx2x_link_test(bp) != 0) { 1666 if (bnx2x_link_test(bp, is_serdes) != 0) {
1615 buf[5] = 1; 1667 buf[5] = 1;
1616 etest->flags |= ETH_TEST_FL_FAILED; 1668 etest->flags |= ETH_TEST_FL_FAILED;
1617 } 1669 }
@@ -1910,10 +1962,11 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
1910 1962
1911 for (i = 0; i < (data * 2); i++) { 1963 for (i = 0; i < (data * 2); i++) {
1912 if ((i % 2) == 0) 1964 if ((i % 2) == 0)
1913 bnx2x_set_led(&bp->link_params, LED_MODE_OPER, 1965 bnx2x_set_led(&bp->link_params, &bp->link_vars,
1914 SPEED_1000); 1966 LED_MODE_OPER, SPEED_1000);
1915 else 1967 else
1916 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0); 1968 bnx2x_set_led(&bp->link_params, &bp->link_vars,
1969 LED_MODE_OFF, 0);
1917 1970
1918 msleep_interruptible(500); 1971 msleep_interruptible(500);
1919 if (signal_pending(current)) 1972 if (signal_pending(current))
@@ -1921,7 +1974,7 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
1921 } 1974 }
1922 1975
1923 if (bp->link_vars.link_up) 1976 if (bp->link_vars.link_up)
1924 bnx2x_set_led(&bp->link_params, LED_MODE_OPER, 1977 bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER,
1925 bp->link_vars.line_speed); 1978 bp->link_vars.line_speed);
1926 1979
1927 return 0; 1980 return 0;
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index fd1f29e0317d..60d141cd9950 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -78,6 +78,8 @@ struct shared_hw_cfg { /* NVRAM Offset */
78#define SHARED_HW_CFG_LED_PHY11 0x000b0000 78#define SHARED_HW_CFG_LED_PHY11 0x000b0000
79#define SHARED_HW_CFG_LED_MAC4 0x000c0000 79#define SHARED_HW_CFG_LED_MAC4 0x000c0000
80#define SHARED_HW_CFG_LED_PHY8 0x000d0000 80#define SHARED_HW_CFG_LED_PHY8 0x000d0000
81#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000
82
81 83
82#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 84#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000
83#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 85#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24
@@ -120,6 +122,23 @@ struct shared_hw_cfg { /* NVRAM Offset */
120#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 122#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000
121#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 123#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000
122 124
125 /* Set the MDC/MDIO access for the first external phy */
126#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000
127#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26
128#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000
129#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000
130#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000
131#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000
132#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000
133
134 /* Set the MDC/MDIO access for the second external phy */
135#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000
136#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29
137#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000
138#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000
139#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000
140#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000
141#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000
123 u32 power_dissipated; /* 0x11c */ 142 u32 power_dissipated; /* 0x11c */
124#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 143#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
125#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 144#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24
@@ -221,7 +240,88 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
221 240
222 u16 xgxs_config_tx[4]; /* 0x1A0 */ 241 u16 xgxs_config_tx[4]; /* 0x1A0 */
223 242
224 u32 Reserved1[64]; /* 0x1A8 */ 243 u32 Reserved1[57]; /* 0x1A8 */
244 u32 speed_capability_mask2; /* 0x28C */
245#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
246#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
247#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001
248#define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002
249#define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004
250#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008
251#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010
252#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020
253#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040
254#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12G 0x00000080
255#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12_DOT_5G 0x00000100
256#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_13G 0x00000200
257#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_15G 0x00000400
258#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_16G 0x00000800
259
260#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000
261#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16
262#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000
263#define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000
264#define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000
265#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000
266#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000
267#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000
268#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000
269#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12G 0x00800000
270#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12_DOT_5G 0x01000000
271#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_13G 0x02000000
272#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_15G 0x04000000
273#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_16G 0x08000000
274
275 /* In the case where two media types (e.g. copper and fiber) are
276 present and electrically active at the same time, PHY Selection
277 will determine which of the two PHYs will be designated as the
278 Active PHY and used for a connection to the network. */
279 u32 multi_phy_config; /* 0x290 */
280#define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007
281#define PORT_HW_CFG_PHY_SELECTION_SHIFT 0
282#define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000
283#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001
284#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002
285#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
286#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
287
288 /* When enabled, all second phy nvram parameters will be swapped
289 with the first phy parameters */
290#define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008
291#define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3
292#define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000
293#define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008
294
295
296 /* Address of the second external phy */
297 u32 external_phy_config2; /* 0x294 */
298#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF
299#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0
300
301 /* The second XGXS external PHY type */
302#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00
303#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8
304#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000
305#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100
306#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200
307#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300
308#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400
309#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500
310#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600
311#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700
312#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800
313#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900
314#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00
315#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00
316#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00
317#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00
318#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
319#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
320
321 /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as
322 8706, 8726 and 8727) not all 4 values are needed. */
323 u16 xgxs_config2_rx[4]; /* 0x296 */
324 u16 xgxs_config2_tx[4]; /* 0x2A0 */
225 325
226 u32 lane_config; 326 u32 lane_config;
227#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff 327#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
@@ -515,10 +615,17 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
515#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 615#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400
516 616
517 /* The default for MCP link configuration, 617 /* The default for MCP link configuration,
518 uses the same defines as link_config */ 618 uses the same defines as link_config */
519 u32 mfw_wol_link_cfg; 619 u32 mfw_wol_link_cfg;
620 /* The default for the driver of the second external phy,
621 uses the same defines as link_config */
622 u32 link_config2; /* 0x47C */
623
624 /* The default for MCP of the second external phy,
625 uses the same defines as link_config */
626 u32 mfw_wol_link_cfg2; /* 0x480 */
520 627
521 u32 reserved[19]; 628 u32 Reserved2[17]; /* 0x484 */
522 629
523}; 630};
524 631
@@ -686,8 +793,14 @@ struct drv_func_mb {
686 * The optic module verification commands require bootcode 793 * The optic module verification commands require bootcode
687 * v5.0.6 or later 794 * v5.0.6 or later
688 */ 795 */
689#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000 796#define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000
690#define REQ_BC_VER_4_VRFY_OPT_MDL 0x00050006 797#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
798 /*
799 * The specific optic module verification command requires bootcode
800 * v5.2.12 or later
801 */
802#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
803#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
691 804
692#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 805#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
693#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 806#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
@@ -922,7 +1035,12 @@ struct shmem2_region {
922#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 1035#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040
923#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 1036#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080
924#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE 1037#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE
925 1038 u32 ext_phy_fw_version2[PORT_MAX];
1039 /*
1040 * For backwards compatibility, if the mf_cfg_addr does not exist
1041 * (the size filed is smaller than 0xc) the mf_cfg resides at the
1042 * end of struct shmem_region
1043 */
926}; 1044};
927 1045
928 1046
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 0383e3066313..a07a3a6abd40 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -168,50 +168,19 @@
168/**********************************************************/ 168/**********************************************************/
169/* INTERFACE */ 169/* INTERFACE */
170/**********************************************************/ 170/**********************************************************/
171#define CL45_WR_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \ 171
172 bnx2x_cl45_write(_bp, _port, 0, _phy_addr, \ 172#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
173 DEFAULT_PHY_DEV_ADDR, \ 173 bnx2x_cl45_write(_bp, _phy, \
174 (_phy)->def_md_devad, \
174 (_bank + (_addr & 0xf)), \ 175 (_bank + (_addr & 0xf)), \
175 _val) 176 _val)
176 177
177#define CL45_RD_OVER_CL22(_bp, _port, _phy_addr, _bank, _addr, _val) \ 178#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
178 bnx2x_cl45_read(_bp, _port, 0, _phy_addr, \ 179 bnx2x_cl45_read(_bp, _phy, \
179 DEFAULT_PHY_DEV_ADDR, \ 180 (_phy)->def_md_devad, \
180 (_bank + (_addr & 0xf)), \ 181 (_bank + (_addr & 0xf)), \
181 _val) 182 _val)
182 183
183static void bnx2x_set_serdes_access(struct link_params *params)
184{
185 struct bnx2x *bp = params->bp;
186 u32 emac_base = (params->port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
187
188 /* Set Clause 22 */
189 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 1);
190 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
191 udelay(500);
192 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
193 udelay(500);
194 /* Set Clause 45 */
195 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + params->port*0x10, 0);
196}
197static void bnx2x_set_phy_mdio(struct link_params *params, u8 phy_flags)
198{
199 struct bnx2x *bp = params->bp;
200
201 if (phy_flags & PHY_XGXS_FLAG) {
202 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
203 params->port*0x18, 0);
204 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
205 DEFAULT_PHY_DEV_ADDR);
206 } else {
207 bnx2x_set_serdes_access(params);
208
209 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
210 params->port*0x10,
211 DEFAULT_PHY_DEV_ADDR);
212 }
213}
214
215static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) 184static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
216{ 185{
217 u32 val = REG_RD(bp, reg); 186 u32 val = REG_RD(bp, reg);
@@ -527,162 +496,6 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
527 return 0; 496 return 0;
528} 497}
529 498
530static void bnx2x_phy_deassert(struct link_params *params, u8 phy_flags)
531{
532 struct bnx2x *bp = params->bp;
533 u32 val;
534
535 if (phy_flags & PHY_XGXS_FLAG) {
536 DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:XGXS\n");
537 val = XGXS_RESET_BITS;
538
539 } else { /* SerDes */
540 DP(NETIF_MSG_LINK, "bnx2x_phy_deassert:SerDes\n");
541 val = SERDES_RESET_BITS;
542 }
543
544 val = val << (params->port*16);
545
546 /* reset and unreset the SerDes/XGXS */
547 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
548 val);
549 udelay(500);
550 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET,
551 val);
552 bnx2x_set_phy_mdio(params, phy_flags);
553}
554
555void bnx2x_link_status_update(struct link_params *params,
556 struct link_vars *vars)
557{
558 struct bnx2x *bp = params->bp;
559 u8 link_10g;
560 u8 port = params->port;
561
562 if (params->switch_cfg == SWITCH_CFG_1G)
563 vars->phy_flags = PHY_SERDES_FLAG;
564 else
565 vars->phy_flags = PHY_XGXS_FLAG;
566 vars->link_status = REG_RD(bp, params->shmem_base +
567 offsetof(struct shmem_region,
568 port_mb[port].link_status));
569
570 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
571
572 if (vars->link_up) {
573 DP(NETIF_MSG_LINK, "phy link up\n");
574
575 vars->phy_link_up = 1;
576 vars->duplex = DUPLEX_FULL;
577 switch (vars->link_status &
578 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
579 case LINK_10THD:
580 vars->duplex = DUPLEX_HALF;
581 /* fall thru */
582 case LINK_10TFD:
583 vars->line_speed = SPEED_10;
584 break;
585
586 case LINK_100TXHD:
587 vars->duplex = DUPLEX_HALF;
588 /* fall thru */
589 case LINK_100T4:
590 case LINK_100TXFD:
591 vars->line_speed = SPEED_100;
592 break;
593
594 case LINK_1000THD:
595 vars->duplex = DUPLEX_HALF;
596 /* fall thru */
597 case LINK_1000TFD:
598 vars->line_speed = SPEED_1000;
599 break;
600
601 case LINK_2500THD:
602 vars->duplex = DUPLEX_HALF;
603 /* fall thru */
604 case LINK_2500TFD:
605 vars->line_speed = SPEED_2500;
606 break;
607
608 case LINK_10GTFD:
609 vars->line_speed = SPEED_10000;
610 break;
611
612 case LINK_12GTFD:
613 vars->line_speed = SPEED_12000;
614 break;
615
616 case LINK_12_5GTFD:
617 vars->line_speed = SPEED_12500;
618 break;
619
620 case LINK_13GTFD:
621 vars->line_speed = SPEED_13000;
622 break;
623
624 case LINK_15GTFD:
625 vars->line_speed = SPEED_15000;
626 break;
627
628 case LINK_16GTFD:
629 vars->line_speed = SPEED_16000;
630 break;
631
632 default:
633 break;
634 }
635
636 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
637 vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
638 else
639 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_TX;
640
641 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
642 vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
643 else
644 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_RX;
645
646 if (vars->phy_flags & PHY_XGXS_FLAG) {
647 if (vars->line_speed &&
648 ((vars->line_speed == SPEED_10) ||
649 (vars->line_speed == SPEED_100))) {
650 vars->phy_flags |= PHY_SGMII_FLAG;
651 } else {
652 vars->phy_flags &= ~PHY_SGMII_FLAG;
653 }
654 }
655
656 /* anything 10 and over uses the bmac */
657 link_10g = ((vars->line_speed == SPEED_10000) ||
658 (vars->line_speed == SPEED_12000) ||
659 (vars->line_speed == SPEED_12500) ||
660 (vars->line_speed == SPEED_13000) ||
661 (vars->line_speed == SPEED_15000) ||
662 (vars->line_speed == SPEED_16000));
663 if (link_10g)
664 vars->mac_type = MAC_TYPE_BMAC;
665 else
666 vars->mac_type = MAC_TYPE_EMAC;
667
668 } else { /* link down */
669 DP(NETIF_MSG_LINK, "phy link down\n");
670
671 vars->phy_link_up = 0;
672
673 vars->line_speed = 0;
674 vars->duplex = DUPLEX_FULL;
675 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
676
677 /* indicate no mac active */
678 vars->mac_type = MAC_TYPE_NONE;
679 }
680
681 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
682 vars->link_status, vars->phy_link_up);
683 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
684 vars->line_speed, vars->duplex, vars->flow_ctrl);
685}
686 499
687static void bnx2x_update_mng(struct link_params *params, u32 link_status) 500static void bnx2x_update_mng(struct link_params *params, u32 link_status)
688{ 501{
@@ -800,62 +613,69 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
800 return 0; 613 return 0;
801} 614}
802 615
803static u32 bnx2x_get_emac_base(struct bnx2x *bp, u32 ext_phy_type, u8 port) 616static u32 bnx2x_get_emac_base(struct bnx2x *bp,
617 u32 mdc_mdio_access, u8 port)
804{ 618{
805 u32 emac_base; 619 u32 emac_base = 0;
806 620 switch (mdc_mdio_access) {
807 switch (ext_phy_type) { 621 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
808 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 622 break;
809 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 623 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
810 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 624 if (REG_RD(bp, NIG_REG_PORT_SWAP))
811 /* All MDC/MDIO is directed through single EMAC */ 625 emac_base = GRCBASE_EMAC1;
626 else
627 emac_base = GRCBASE_EMAC0;
628 break;
629 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
812 if (REG_RD(bp, NIG_REG_PORT_SWAP)) 630 if (REG_RD(bp, NIG_REG_PORT_SWAP))
813 emac_base = GRCBASE_EMAC0; 631 emac_base = GRCBASE_EMAC0;
814 else 632 else
815 emac_base = GRCBASE_EMAC1; 633 emac_base = GRCBASE_EMAC1;
816 break; 634 break;
817 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 635 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
636 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
637 break;
638 case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
818 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1; 639 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
819 break; 640 break;
820 default: 641 default:
821 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
822 break; 642 break;
823 } 643 }
824 return emac_base; 644 return emac_base;
825 645
826} 646}
827 647
828u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type, 648u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
829 u8 phy_addr, u8 devad, u16 reg, u16 val) 649 u8 devad, u16 reg, u16 val)
830{ 650{
831 u32 tmp, saved_mode; 651 u32 tmp, saved_mode;
832 u8 i, rc = 0; 652 u8 i, rc = 0;
833 u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
834 653
835 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 654 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
836 * (a value of 49==0x31) and make sure that the AUTO poll is off 655 * (a value of 49==0x31) and make sure that the AUTO poll is off
837 */ 656 */
838 657
839 saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 658 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
840 tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL | 659 tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL |
841 EMAC_MDIO_MODE_CLOCK_CNT); 660 EMAC_MDIO_MODE_CLOCK_CNT);
842 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 | 661 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
843 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); 662 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
844 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp); 663 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
845 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 664 REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
846 udelay(40); 665 udelay(40);
847 666
848 /* address */ 667 /* address */
849 668
850 tmp = ((phy_addr << 21) | (devad << 16) | reg | 669 tmp = ((phy->addr << 21) | (devad << 16) | reg |
851 EMAC_MDIO_COMM_COMMAND_ADDRESS | 670 EMAC_MDIO_COMM_COMMAND_ADDRESS |
852 EMAC_MDIO_COMM_START_BUSY); 671 EMAC_MDIO_COMM_START_BUSY);
853 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); 672 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
854 673
855 for (i = 0; i < 50; i++) { 674 for (i = 0; i < 50; i++) {
856 udelay(10); 675 udelay(10);
857 676
858 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); 677 tmp = REG_RD(bp, phy->mdio_ctrl +
678 EMAC_REG_EMAC_MDIO_COMM);
859 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 679 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
860 udelay(5); 680 udelay(5);
861 break; 681 break;
@@ -866,15 +686,15 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
866 rc = -EFAULT; 686 rc = -EFAULT;
867 } else { 687 } else {
868 /* data */ 688 /* data */
869 tmp = ((phy_addr << 21) | (devad << 16) | val | 689 tmp = ((phy->addr << 21) | (devad << 16) | val |
870 EMAC_MDIO_COMM_COMMAND_WRITE_45 | 690 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
871 EMAC_MDIO_COMM_START_BUSY); 691 EMAC_MDIO_COMM_START_BUSY);
872 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); 692 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
873 693
874 for (i = 0; i < 50; i++) { 694 for (i = 0; i < 50; i++) {
875 udelay(10); 695 udelay(10);
876 696
877 tmp = REG_RD(bp, mdio_ctrl + 697 tmp = REG_RD(bp, phy->mdio_ctrl +
878 EMAC_REG_EMAC_MDIO_COMM); 698 EMAC_REG_EMAC_MDIO_COMM);
879 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 699 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
880 udelay(5); 700 udelay(5);
@@ -888,42 +708,41 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
888 } 708 }
889 709
890 /* Restore the saved mode */ 710 /* Restore the saved mode */
891 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode); 711 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
892 712
893 return rc; 713 return rc;
894} 714}
895 715
896u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type, 716u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
897 u8 phy_addr, u8 devad, u16 reg, u16 *ret_val) 717 u8 devad, u16 reg, u16 *ret_val)
898{ 718{
899 u32 val, saved_mode; 719 u32 val, saved_mode;
900 u16 i; 720 u16 i;
901 u8 rc = 0; 721 u8 rc = 0;
902 722
903 u32 mdio_ctrl = bnx2x_get_emac_base(bp, ext_phy_type, port);
904 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz 723 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
905 * (a value of 49==0x31) and make sure that the AUTO poll is off 724 * (a value of 49==0x31) and make sure that the AUTO poll is off
906 */ 725 */
907 726
908 saved_mode = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 727 saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
909 val = saved_mode & ((EMAC_MDIO_MODE_AUTO_POLL | 728 val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
910 EMAC_MDIO_MODE_CLOCK_CNT)); 729 EMAC_MDIO_MODE_CLOCK_CNT));
911 val |= (EMAC_MDIO_MODE_CLAUSE_45 | 730 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
912 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); 731 (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
913 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); 732 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
914 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); 733 REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
915 udelay(40); 734 udelay(40);
916 735
917 /* address */ 736 /* address */
918 val = ((phy_addr << 21) | (devad << 16) | reg | 737 val = ((phy->addr << 21) | (devad << 16) | reg |
919 EMAC_MDIO_COMM_COMMAND_ADDRESS | 738 EMAC_MDIO_COMM_COMMAND_ADDRESS |
920 EMAC_MDIO_COMM_START_BUSY); 739 EMAC_MDIO_COMM_START_BUSY);
921 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); 740 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
922 741
923 for (i = 0; i < 50; i++) { 742 for (i = 0; i < 50; i++) {
924 udelay(10); 743 udelay(10);
925 744
926 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); 745 val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
927 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 746 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
928 udelay(5); 747 udelay(5);
929 break; 748 break;
@@ -937,15 +756,15 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
937 756
938 } else { 757 } else {
939 /* data */ 758 /* data */
940 val = ((phy_addr << 21) | (devad << 16) | 759 val = ((phy->addr << 21) | (devad << 16) |
941 EMAC_MDIO_COMM_COMMAND_READ_45 | 760 EMAC_MDIO_COMM_COMMAND_READ_45 |
942 EMAC_MDIO_COMM_START_BUSY); 761 EMAC_MDIO_COMM_START_BUSY);
943 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); 762 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
944 763
945 for (i = 0; i < 50; i++) { 764 for (i = 0; i < 50; i++) {
946 udelay(10); 765 udelay(10);
947 766
948 val = REG_RD(bp, mdio_ctrl + 767 val = REG_RD(bp, phy->mdio_ctrl +
949 EMAC_REG_EMAC_MDIO_COMM); 768 EMAC_REG_EMAC_MDIO_COMM);
950 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 769 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
951 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); 770 *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
@@ -961,13 +780,49 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type,
961 } 780 }
962 781
963 /* Restore the saved mode */ 782 /* Restore the saved mode */
964 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode); 783 REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode);
965 784
966 return rc; 785 return rc;
967} 786}
968 787
788u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
789 u8 devad, u16 reg, u16 *ret_val)
790{
791 u8 phy_index;
792 /**
793 * Probe for the phy according to the given phy_addr, and execute
794 * the read request on it
795 */
796 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
797 if (params->phy[phy_index].addr == phy_addr) {
798 return bnx2x_cl45_read(params->bp,
799 &params->phy[phy_index], devad,
800 reg, ret_val);
801 }
802 }
803 return -EINVAL;
804}
805
806u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
807 u8 devad, u16 reg, u16 val)
808{
809 u8 phy_index;
810 /**
811 * Probe for the phy according to the given phy_addr, and execute
812 * the write request on it
813 */
814 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
815 if (params->phy[phy_index].addr == phy_addr) {
816 return bnx2x_cl45_write(params->bp,
817 &params->phy[phy_index], devad,
818 reg, val);
819 }
820 }
821 return -EINVAL;
822}
823
969static void bnx2x_set_aer_mmd(struct link_params *params, 824static void bnx2x_set_aer_mmd(struct link_params *params,
970 struct link_vars *vars) 825 struct bnx2x_phy *phy)
971{ 826{
972 struct bnx2x *bp = params->bp; 827 struct bnx2x *bp = params->bp;
973 u32 ser_lane; 828 u32 ser_lane;
@@ -977,16 +832,202 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
977 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 832 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
978 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 833 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
979 834
980 offset = (vars->phy_flags & PHY_XGXS_FLAG) ? 835 offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
981 (params->phy_addr + ser_lane) : 0; 836 (phy->addr + ser_lane) : 0;
982 837
983 CL45_WR_OVER_CL22(bp, params->port, 838 CL45_WR_OVER_CL22(bp, phy,
984 params->phy_addr,
985 MDIO_REG_BANK_AER_BLOCK, 839 MDIO_REG_BANK_AER_BLOCK,
986 MDIO_AER_BLOCK_AER_REG, 0x3800 + offset); 840 MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
987} 841}
988 842
989static void bnx2x_set_master_ln(struct link_params *params) 843/******************************************************************/
844/* Internal phy section */
845/******************************************************************/
846
847static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
848{
849 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
850
851 /* Set Clause 22 */
852 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
853 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
854 udelay(500);
855 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
856 udelay(500);
857 /* Set Clause 45 */
858 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
859}
860
861static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
862{
863 u32 val;
864
865 DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
866
867 val = SERDES_RESET_BITS << (port*16);
868
869 /* reset and unreset the SerDes/XGXS */
870 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
871 udelay(500);
872 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
873
874 bnx2x_set_serdes_access(bp, port);
875
876 REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
877 port*0x10,
878 DEFAULT_PHY_DEV_ADDR);
879}
880
881static void bnx2x_xgxs_deassert(struct link_params *params)
882{
883 struct bnx2x *bp = params->bp;
884 u8 port;
885 u32 val;
886 DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
887 port = params->port;
888
889 val = XGXS_RESET_BITS << (port*16);
890
891 /* reset and unreset the SerDes/XGXS */
892 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
893 udelay(500);
894 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
895
896 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
897 port*0x18, 0);
898 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
899 params->phy[INT_PHY].def_md_devad);
900}
901
902
903void bnx2x_link_status_update(struct link_params *params,
904 struct link_vars *vars)
905{
906 struct bnx2x *bp = params->bp;
907 u8 link_10g;
908 u8 port = params->port;
909
910 vars->link_status = REG_RD(bp, params->shmem_base +
911 offsetof(struct shmem_region,
912 port_mb[port].link_status));
913
914 vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
915
916 if (vars->link_up) {
917 DP(NETIF_MSG_LINK, "phy link up\n");
918
919 vars->phy_link_up = 1;
920 vars->duplex = DUPLEX_FULL;
921 switch (vars->link_status &
922 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
923 case LINK_10THD:
924 vars->duplex = DUPLEX_HALF;
925 /* fall thru */
926 case LINK_10TFD:
927 vars->line_speed = SPEED_10;
928 break;
929
930 case LINK_100TXHD:
931 vars->duplex = DUPLEX_HALF;
932 /* fall thru */
933 case LINK_100T4:
934 case LINK_100TXFD:
935 vars->line_speed = SPEED_100;
936 break;
937
938 case LINK_1000THD:
939 vars->duplex = DUPLEX_HALF;
940 /* fall thru */
941 case LINK_1000TFD:
942 vars->line_speed = SPEED_1000;
943 break;
944
945 case LINK_2500THD:
946 vars->duplex = DUPLEX_HALF;
947 /* fall thru */
948 case LINK_2500TFD:
949 vars->line_speed = SPEED_2500;
950 break;
951
952 case LINK_10GTFD:
953 vars->line_speed = SPEED_10000;
954 break;
955
956 case LINK_12GTFD:
957 vars->line_speed = SPEED_12000;
958 break;
959
960 case LINK_12_5GTFD:
961 vars->line_speed = SPEED_12500;
962 break;
963
964 case LINK_13GTFD:
965 vars->line_speed = SPEED_13000;
966 break;
967
968 case LINK_15GTFD:
969 vars->line_speed = SPEED_15000;
970 break;
971
972 case LINK_16GTFD:
973 vars->line_speed = SPEED_16000;
974 break;
975
976 default:
977 break;
978 }
979 vars->flow_ctrl = 0;
980 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
981 vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
982
983 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
984 vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
985
986 if (!vars->flow_ctrl)
987 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
988
989 if (vars->line_speed &&
990 ((vars->line_speed == SPEED_10) ||
991 (vars->line_speed == SPEED_100))) {
992 vars->phy_flags |= PHY_SGMII_FLAG;
993 } else {
994 vars->phy_flags &= ~PHY_SGMII_FLAG;
995 }
996
997 /* anything 10 and over uses the bmac */
998 link_10g = ((vars->line_speed == SPEED_10000) ||
999 (vars->line_speed == SPEED_12000) ||
1000 (vars->line_speed == SPEED_12500) ||
1001 (vars->line_speed == SPEED_13000) ||
1002 (vars->line_speed == SPEED_15000) ||
1003 (vars->line_speed == SPEED_16000));
1004 if (link_10g)
1005 vars->mac_type = MAC_TYPE_BMAC;
1006 else
1007 vars->mac_type = MAC_TYPE_EMAC;
1008
1009 } else { /* link down */
1010 DP(NETIF_MSG_LINK, "phy link down\n");
1011
1012 vars->phy_link_up = 0;
1013
1014 vars->line_speed = 0;
1015 vars->duplex = DUPLEX_FULL;
1016 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1017
1018 /* indicate no mac active */
1019 vars->mac_type = MAC_TYPE_NONE;
1020 }
1021
1022 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n",
1023 vars->link_status, vars->phy_link_up);
1024 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
1025 vars->line_speed, vars->duplex, vars->flow_ctrl);
1026}
1027
1028
1029static void bnx2x_set_master_ln(struct link_params *params,
1030 struct bnx2x_phy *phy)
990{ 1031{
991 struct bnx2x *bp = params->bp; 1032 struct bnx2x *bp = params->bp;
992 u16 new_master_ln, ser_lane; 1033 u16 new_master_ln, ser_lane;
@@ -995,47 +1036,44 @@ static void bnx2x_set_master_ln(struct link_params *params)
995 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 1036 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
996 1037
997 /* set the master_ln for AN */ 1038 /* set the master_ln for AN */
998 CL45_RD_OVER_CL22(bp, params->port, 1039 CL45_RD_OVER_CL22(bp, phy,
999 params->phy_addr,
1000 MDIO_REG_BANK_XGXS_BLOCK2, 1040 MDIO_REG_BANK_XGXS_BLOCK2,
1001 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1041 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1002 &new_master_ln); 1042 &new_master_ln);
1003 1043
1004 CL45_WR_OVER_CL22(bp, params->port, 1044 CL45_WR_OVER_CL22(bp, phy,
1005 params->phy_addr,
1006 MDIO_REG_BANK_XGXS_BLOCK2 , 1045 MDIO_REG_BANK_XGXS_BLOCK2 ,
1007 MDIO_XGXS_BLOCK2_TEST_MODE_LANE, 1046 MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
1008 (new_master_ln | ser_lane)); 1047 (new_master_ln | ser_lane));
1009} 1048}
1010 1049
1011static u8 bnx2x_reset_unicore(struct link_params *params) 1050static u8 bnx2x_reset_unicore(struct link_params *params,
1051 struct bnx2x_phy *phy,
1052 u8 set_serdes)
1012{ 1053{
1013 struct bnx2x *bp = params->bp; 1054 struct bnx2x *bp = params->bp;
1014 u16 mii_control; 1055 u16 mii_control;
1015 u16 i; 1056 u16 i;
1016 1057
1017 CL45_RD_OVER_CL22(bp, params->port, 1058 CL45_RD_OVER_CL22(bp, phy,
1018 params->phy_addr,
1019 MDIO_REG_BANK_COMBO_IEEE0, 1059 MDIO_REG_BANK_COMBO_IEEE0,
1020 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); 1060 MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
1021 1061
1022 /* reset the unicore */ 1062 /* reset the unicore */
1023 CL45_WR_OVER_CL22(bp, params->port, 1063 CL45_WR_OVER_CL22(bp, phy,
1024 params->phy_addr,
1025 MDIO_REG_BANK_COMBO_IEEE0, 1064 MDIO_REG_BANK_COMBO_IEEE0,
1026 MDIO_COMBO_IEEE0_MII_CONTROL, 1065 MDIO_COMBO_IEEE0_MII_CONTROL,
1027 (mii_control | 1066 (mii_control |
1028 MDIO_COMBO_IEEO_MII_CONTROL_RESET)); 1067 MDIO_COMBO_IEEO_MII_CONTROL_RESET));
1029 if (params->switch_cfg == SWITCH_CFG_1G) 1068 if (set_serdes)
1030 bnx2x_set_serdes_access(params); 1069 bnx2x_set_serdes_access(bp, params->port);
1031 1070
1032 /* wait for the reset to self clear */ 1071 /* wait for the reset to self clear */
1033 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { 1072 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
1034 udelay(5); 1073 udelay(5);
1035 1074
1036 /* the reset erased the previous bank value */ 1075 /* the reset erased the previous bank value */
1037 CL45_RD_OVER_CL22(bp, params->port, 1076 CL45_RD_OVER_CL22(bp, phy,
1038 params->phy_addr,
1039 MDIO_REG_BANK_COMBO_IEEE0, 1077 MDIO_REG_BANK_COMBO_IEEE0,
1040 MDIO_COMBO_IEEE0_MII_CONTROL, 1078 MDIO_COMBO_IEEE0_MII_CONTROL,
1041 &mii_control); 1079 &mii_control);
@@ -1051,7 +1089,8 @@ static u8 bnx2x_reset_unicore(struct link_params *params)
1051 1089
1052} 1090}
1053 1091
1054static void bnx2x_set_swap_lanes(struct link_params *params) 1092static void bnx2x_set_swap_lanes(struct link_params *params,
1093 struct bnx2x_phy *phy)
1055{ 1094{
1056 struct bnx2x *bp = params->bp; 1095 struct bnx2x *bp = params->bp;
1057 /* Each two bits represents a lane number: 1096 /* Each two bits represents a lane number:
@@ -1069,71 +1108,62 @@ static void bnx2x_set_swap_lanes(struct link_params *params)
1069 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); 1108 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
1070 1109
1071 if (rx_lane_swap != 0x1b) { 1110 if (rx_lane_swap != 0x1b) {
1072 CL45_WR_OVER_CL22(bp, params->port, 1111 CL45_WR_OVER_CL22(bp, phy,
1073 params->phy_addr,
1074 MDIO_REG_BANK_XGXS_BLOCK2, 1112 MDIO_REG_BANK_XGXS_BLOCK2,
1075 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 1113 MDIO_XGXS_BLOCK2_RX_LN_SWAP,
1076 (rx_lane_swap | 1114 (rx_lane_swap |
1077 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | 1115 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
1078 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); 1116 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
1079 } else { 1117 } else {
1080 CL45_WR_OVER_CL22(bp, params->port, 1118 CL45_WR_OVER_CL22(bp, phy,
1081 params->phy_addr,
1082 MDIO_REG_BANK_XGXS_BLOCK2, 1119 MDIO_REG_BANK_XGXS_BLOCK2,
1083 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); 1120 MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
1084 } 1121 }
1085 1122
1086 if (tx_lane_swap != 0x1b) { 1123 if (tx_lane_swap != 0x1b) {
1087 CL45_WR_OVER_CL22(bp, params->port, 1124 CL45_WR_OVER_CL22(bp, phy,
1088 params->phy_addr,
1089 MDIO_REG_BANK_XGXS_BLOCK2, 1125 MDIO_REG_BANK_XGXS_BLOCK2,
1090 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 1126 MDIO_XGXS_BLOCK2_TX_LN_SWAP,
1091 (tx_lane_swap | 1127 (tx_lane_swap |
1092 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); 1128 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
1093 } else { 1129 } else {
1094 CL45_WR_OVER_CL22(bp, params->port, 1130 CL45_WR_OVER_CL22(bp, phy,
1095 params->phy_addr,
1096 MDIO_REG_BANK_XGXS_BLOCK2, 1131 MDIO_REG_BANK_XGXS_BLOCK2,
1097 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); 1132 MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
1098 } 1133 }
1099} 1134}
1100 1135
1101static void bnx2x_set_parallel_detection(struct link_params *params, 1136static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
1102 u8 phy_flags) 1137 struct link_params *params)
1103{ 1138{
1104 struct bnx2x *bp = params->bp; 1139 struct bnx2x *bp = params->bp;
1105 u16 control2; 1140 u16 control2;
1106 1141 CL45_RD_OVER_CL22(bp, phy,
1107 CL45_RD_OVER_CL22(bp, params->port,
1108 params->phy_addr,
1109 MDIO_REG_BANK_SERDES_DIGITAL, 1142 MDIO_REG_BANK_SERDES_DIGITAL,
1110 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1143 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1111 &control2); 1144 &control2);
1112 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1145 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1113 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1146 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1114 else 1147 else
1115 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1148 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1116 DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n", 1149 DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1117 params->speed_cap_mask, control2); 1150 phy->speed_cap_mask, control2);
1118 CL45_WR_OVER_CL22(bp, params->port, 1151 CL45_WR_OVER_CL22(bp, phy,
1119 params->phy_addr,
1120 MDIO_REG_BANK_SERDES_DIGITAL, 1152 MDIO_REG_BANK_SERDES_DIGITAL,
1121 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1153 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1122 control2); 1154 control2);
1123 1155
1124 if ((phy_flags & PHY_XGXS_FLAG) && 1156 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
1125 (params->speed_cap_mask & 1157 (phy->speed_cap_mask &
1126 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { 1158 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1127 DP(NETIF_MSG_LINK, "XGXS\n"); 1159 DP(NETIF_MSG_LINK, "XGXS\n");
1128 1160
1129 CL45_WR_OVER_CL22(bp, params->port, 1161 CL45_WR_OVER_CL22(bp, phy,
1130 params->phy_addr,
1131 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1162 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1132 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, 1163 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
1133 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); 1164 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
1134 1165
1135 CL45_RD_OVER_CL22(bp, params->port, 1166 CL45_RD_OVER_CL22(bp, phy,
1136 params->phy_addr,
1137 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1167 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1138 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1168 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1139 &control2); 1169 &control2);
@@ -1142,15 +1172,13 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
1142 control2 |= 1172 control2 |=
1143 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; 1173 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
1144 1174
1145 CL45_WR_OVER_CL22(bp, params->port, 1175 CL45_WR_OVER_CL22(bp, phy,
1146 params->phy_addr,
1147 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1176 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1148 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 1177 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
1149 control2); 1178 control2);
1150 1179
1151 /* Disable parallel detection of HiG */ 1180 /* Disable parallel detection of HiG */
1152 CL45_WR_OVER_CL22(bp, params->port, 1181 CL45_WR_OVER_CL22(bp, phy,
1153 params->phy_addr,
1154 MDIO_REG_BANK_XGXS_BLOCK2, 1182 MDIO_REG_BANK_XGXS_BLOCK2,
1155 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, 1183 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
1156 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | 1184 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
@@ -1158,7 +1186,8 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
1158 } 1186 }
1159} 1187}
1160 1188
1161static void bnx2x_set_autoneg(struct link_params *params, 1189static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
1190 struct link_params *params,
1162 struct link_vars *vars, 1191 struct link_vars *vars,
1163 u8 enable_cl73) 1192 u8 enable_cl73)
1164{ 1193{
@@ -1166,9 +1195,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1166 u16 reg_val; 1195 u16 reg_val;
1167 1196
1168 /* CL37 Autoneg */ 1197 /* CL37 Autoneg */
1169 1198 CL45_RD_OVER_CL22(bp, phy,
1170 CL45_RD_OVER_CL22(bp, params->port,
1171 params->phy_addr,
1172 MDIO_REG_BANK_COMBO_IEEE0, 1199 MDIO_REG_BANK_COMBO_IEEE0,
1173 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 1200 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1174 1201
@@ -1179,15 +1206,13 @@ static void bnx2x_set_autoneg(struct link_params *params,
1179 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1206 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1180 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); 1207 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
1181 1208
1182 CL45_WR_OVER_CL22(bp, params->port, 1209 CL45_WR_OVER_CL22(bp, phy,
1183 params->phy_addr,
1184 MDIO_REG_BANK_COMBO_IEEE0, 1210 MDIO_REG_BANK_COMBO_IEEE0,
1185 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 1211 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1186 1212
1187 /* Enable/Disable Autodetection */ 1213 /* Enable/Disable Autodetection */
1188 1214
1189 CL45_RD_OVER_CL22(bp, params->port, 1215 CL45_RD_OVER_CL22(bp, phy,
1190 params->phy_addr,
1191 MDIO_REG_BANK_SERDES_DIGITAL, 1216 MDIO_REG_BANK_SERDES_DIGITAL,
1192 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val); 1217 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1193 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | 1218 reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
@@ -1198,14 +1223,12 @@ static void bnx2x_set_autoneg(struct link_params *params,
1198 else 1223 else
1199 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1224 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1200 1225
1201 CL45_WR_OVER_CL22(bp, params->port, 1226 CL45_WR_OVER_CL22(bp, phy,
1202 params->phy_addr,
1203 MDIO_REG_BANK_SERDES_DIGITAL, 1227 MDIO_REG_BANK_SERDES_DIGITAL,
1204 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); 1228 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
1205 1229
1206 /* Enable TetonII and BAM autoneg */ 1230 /* Enable TetonII and BAM autoneg */
1207 CL45_RD_OVER_CL22(bp, params->port, 1231 CL45_RD_OVER_CL22(bp, phy,
1208 params->phy_addr,
1209 MDIO_REG_BANK_BAM_NEXT_PAGE, 1232 MDIO_REG_BANK_BAM_NEXT_PAGE,
1210 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1233 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1211 &reg_val); 1234 &reg_val);
@@ -1218,23 +1241,20 @@ static void bnx2x_set_autoneg(struct link_params *params,
1218 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 1241 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1219 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 1242 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
1220 } 1243 }
1221 CL45_WR_OVER_CL22(bp, params->port, 1244 CL45_WR_OVER_CL22(bp, phy,
1222 params->phy_addr,
1223 MDIO_REG_BANK_BAM_NEXT_PAGE, 1245 MDIO_REG_BANK_BAM_NEXT_PAGE,
1224 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1246 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1225 reg_val); 1247 reg_val);
1226 1248
1227 if (enable_cl73) { 1249 if (enable_cl73) {
1228 /* Enable Cl73 FSM status bits */ 1250 /* Enable Cl73 FSM status bits */
1229 CL45_WR_OVER_CL22(bp, params->port, 1251 CL45_WR_OVER_CL22(bp, phy,
1230 params->phy_addr,
1231 MDIO_REG_BANK_CL73_USERB0, 1252 MDIO_REG_BANK_CL73_USERB0,
1232 MDIO_CL73_USERB0_CL73_UCTRL, 1253 MDIO_CL73_USERB0_CL73_UCTRL,
1233 0xe); 1254 0xe);
1234 1255
1235 /* Enable BAM Station Manager*/ 1256 /* Enable BAM Station Manager*/
1236 CL45_WR_OVER_CL22(bp, params->port, 1257 CL45_WR_OVER_CL22(bp, phy,
1237 params->phy_addr,
1238 MDIO_REG_BANK_CL73_USERB0, 1258 MDIO_REG_BANK_CL73_USERB0,
1239 MDIO_CL73_USERB0_CL73_BAM_CTRL1, 1259 MDIO_CL73_USERB0_CL73_BAM_CTRL1,
1240 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | 1260 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -1242,20 +1262,18 @@ static void bnx2x_set_autoneg(struct link_params *params,
1242 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); 1262 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
1243 1263
1244 /* Advertise CL73 link speeds */ 1264 /* Advertise CL73 link speeds */
1245 CL45_RD_OVER_CL22(bp, params->port, 1265 CL45_RD_OVER_CL22(bp, phy,
1246 params->phy_addr,
1247 MDIO_REG_BANK_CL73_IEEEB1, 1266 MDIO_REG_BANK_CL73_IEEEB1,
1248 MDIO_CL73_IEEEB1_AN_ADV2, 1267 MDIO_CL73_IEEEB1_AN_ADV2,
1249 &reg_val); 1268 &reg_val);
1250 if (params->speed_cap_mask & 1269 if (phy->speed_cap_mask &
1251 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 1270 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1252 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; 1271 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
1253 if (params->speed_cap_mask & 1272 if (phy->speed_cap_mask &
1254 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 1273 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1255 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; 1274 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
1256 1275
1257 CL45_WR_OVER_CL22(bp, params->port, 1276 CL45_WR_OVER_CL22(bp, phy,
1258 params->phy_addr,
1259 MDIO_REG_BANK_CL73_IEEEB1, 1277 MDIO_REG_BANK_CL73_IEEEB1,
1260 MDIO_CL73_IEEEB1_AN_ADV2, 1278 MDIO_CL73_IEEEB1_AN_ADV2,
1261 reg_val); 1279 reg_val);
@@ -1266,38 +1284,35 @@ static void bnx2x_set_autoneg(struct link_params *params,
1266 } else /* CL73 Autoneg Disabled */ 1284 } else /* CL73 Autoneg Disabled */
1267 reg_val = 0; 1285 reg_val = 0;
1268 1286
1269 CL45_WR_OVER_CL22(bp, params->port, 1287 CL45_WR_OVER_CL22(bp, phy,
1270 params->phy_addr,
1271 MDIO_REG_BANK_CL73_IEEEB0, 1288 MDIO_REG_BANK_CL73_IEEEB0,
1272 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); 1289 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
1273} 1290}
1274 1291
1275/* program SerDes, forced speed */ 1292/* program SerDes, forced speed */
1276static void bnx2x_program_serdes(struct link_params *params, 1293static void bnx2x_program_serdes(struct bnx2x_phy *phy,
1294 struct link_params *params,
1277 struct link_vars *vars) 1295 struct link_vars *vars)
1278{ 1296{
1279 struct bnx2x *bp = params->bp; 1297 struct bnx2x *bp = params->bp;
1280 u16 reg_val; 1298 u16 reg_val;
1281 1299
1282 /* program duplex, disable autoneg and sgmii*/ 1300 /* program duplex, disable autoneg and sgmii*/
1283 CL45_RD_OVER_CL22(bp, params->port, 1301 CL45_RD_OVER_CL22(bp, phy,
1284 params->phy_addr,
1285 MDIO_REG_BANK_COMBO_IEEE0, 1302 MDIO_REG_BANK_COMBO_IEEE0,
1286 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 1303 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1287 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | 1304 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
1288 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1305 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
1289 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); 1306 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
1290 if (params->req_duplex == DUPLEX_FULL) 1307 if (phy->req_duplex == DUPLEX_FULL)
1291 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 1308 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
1292 CL45_WR_OVER_CL22(bp, params->port, 1309 CL45_WR_OVER_CL22(bp, phy,
1293 params->phy_addr,
1294 MDIO_REG_BANK_COMBO_IEEE0, 1310 MDIO_REG_BANK_COMBO_IEEE0,
1295 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 1311 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
1296 1312
1297 /* program speed 1313 /* program speed
1298 - needed only if the speed is greater than 1G (2.5G or 10G) */ 1314 - needed only if the speed is greater than 1G (2.5G or 10G) */
1299 CL45_RD_OVER_CL22(bp, params->port, 1315 CL45_RD_OVER_CL22(bp, phy,
1300 params->phy_addr,
1301 MDIO_REG_BANK_SERDES_DIGITAL, 1316 MDIO_REG_BANK_SERDES_DIGITAL,
1302 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 1317 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
1303 /* clearing the speed value before setting the right speed */ 1318 /* clearing the speed value before setting the right speed */
@@ -1320,14 +1335,14 @@ static void bnx2x_program_serdes(struct link_params *params,
1320 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; 1335 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
1321 } 1336 }
1322 1337
1323 CL45_WR_OVER_CL22(bp, params->port, 1338 CL45_WR_OVER_CL22(bp, phy,
1324 params->phy_addr,
1325 MDIO_REG_BANK_SERDES_DIGITAL, 1339 MDIO_REG_BANK_SERDES_DIGITAL,
1326 MDIO_SERDES_DIGITAL_MISC1, reg_val); 1340 MDIO_SERDES_DIGITAL_MISC1, reg_val);
1327 1341
1328} 1342}
1329 1343
1330static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params) 1344static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
1345 struct link_params *params)
1331{ 1346{
1332 struct bnx2x *bp = params->bp; 1347 struct bnx2x *bp = params->bp;
1333 u16 val = 0; 1348 u16 val = 0;
@@ -1335,29 +1350,28 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
1335 /* configure the 48 bits for BAM AN */ 1350 /* configure the 48 bits for BAM AN */
1336 1351
1337 /* set extended capabilities */ 1352 /* set extended capabilities */
1338 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 1353 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
1339 val |= MDIO_OVER_1G_UP1_2_5G; 1354 val |= MDIO_OVER_1G_UP1_2_5G;
1340 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 1355 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1341 val |= MDIO_OVER_1G_UP1_10G; 1356 val |= MDIO_OVER_1G_UP1_10G;
1342 CL45_WR_OVER_CL22(bp, params->port, 1357 CL45_WR_OVER_CL22(bp, phy,
1343 params->phy_addr,
1344 MDIO_REG_BANK_OVER_1G, 1358 MDIO_REG_BANK_OVER_1G,
1345 MDIO_OVER_1G_UP1, val); 1359 MDIO_OVER_1G_UP1, val);
1346 1360
1347 CL45_WR_OVER_CL22(bp, params->port, 1361 CL45_WR_OVER_CL22(bp, phy,
1348 params->phy_addr,
1349 MDIO_REG_BANK_OVER_1G, 1362 MDIO_REG_BANK_OVER_1G,
1350 MDIO_OVER_1G_UP3, 0x400); 1363 MDIO_OVER_1G_UP3, 0x400);
1351} 1364}
1352 1365
1353static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc) 1366static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
1367 struct link_params *params, u16 *ieee_fc)
1354{ 1368{
1355 struct bnx2x *bp = params->bp; 1369 struct bnx2x *bp = params->bp;
1356 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 1370 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1357 /* resolve pause mode and advertisement 1371 /* resolve pause mode and advertisement
1358 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 1372 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
1359 1373
1360 switch (params->req_flow_ctrl) { 1374 switch (phy->req_flow_ctrl) {
1361 case BNX2X_FLOW_CTRL_AUTO: 1375 case BNX2X_FLOW_CTRL_AUTO:
1362 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) { 1376 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
1363 *ieee_fc |= 1377 *ieee_fc |=
@@ -1385,30 +1399,30 @@ static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
1385 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc); 1399 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
1386} 1400}
1387 1401
1388static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, 1402static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
1403 struct link_params *params,
1389 u16 ieee_fc) 1404 u16 ieee_fc)
1390{ 1405{
1391 struct bnx2x *bp = params->bp; 1406 struct bnx2x *bp = params->bp;
1392 u16 val; 1407 u16 val;
1393 /* for AN, we are always publishing full duplex */ 1408 /* for AN, we are always publishing full duplex */
1394 1409
1395 CL45_WR_OVER_CL22(bp, params->port, 1410 CL45_WR_OVER_CL22(bp, phy,
1396 params->phy_addr,
1397 MDIO_REG_BANK_COMBO_IEEE0, 1411 MDIO_REG_BANK_COMBO_IEEE0,
1398 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); 1412 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
1399 CL45_RD_OVER_CL22(bp, params->port, 1413 CL45_RD_OVER_CL22(bp, phy,
1400 params->phy_addr,
1401 MDIO_REG_BANK_CL73_IEEEB1, 1414 MDIO_REG_BANK_CL73_IEEEB1,
1402 MDIO_CL73_IEEEB1_AN_ADV1, &val); 1415 MDIO_CL73_IEEEB1_AN_ADV1, &val);
1403 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; 1416 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
1404 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); 1417 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
1405 CL45_WR_OVER_CL22(bp, params->port, 1418 CL45_WR_OVER_CL22(bp, phy,
1406 params->phy_addr,
1407 MDIO_REG_BANK_CL73_IEEEB1, 1419 MDIO_REG_BANK_CL73_IEEEB1,
1408 MDIO_CL73_IEEEB1_AN_ADV1, val); 1420 MDIO_CL73_IEEEB1_AN_ADV1, val);
1409} 1421}
1410 1422
1411static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73) 1423static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
1424 struct link_params *params,
1425 u8 enable_cl73)
1412{ 1426{
1413 struct bnx2x *bp = params->bp; 1427 struct bnx2x *bp = params->bp;
1414 u16 mii_control; 1428 u16 mii_control;
@@ -1417,14 +1431,12 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
1417 /* Enable and restart BAM/CL37 aneg */ 1431 /* Enable and restart BAM/CL37 aneg */
1418 1432
1419 if (enable_cl73) { 1433 if (enable_cl73) {
1420 CL45_RD_OVER_CL22(bp, params->port, 1434 CL45_RD_OVER_CL22(bp, phy,
1421 params->phy_addr,
1422 MDIO_REG_BANK_CL73_IEEEB0, 1435 MDIO_REG_BANK_CL73_IEEEB0,
1423 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 1436 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1424 &mii_control); 1437 &mii_control);
1425 1438
1426 CL45_WR_OVER_CL22(bp, params->port, 1439 CL45_WR_OVER_CL22(bp, phy,
1427 params->phy_addr,
1428 MDIO_REG_BANK_CL73_IEEEB0, 1440 MDIO_REG_BANK_CL73_IEEEB0,
1429 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 1441 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1430 (mii_control | 1442 (mii_control |
@@ -1432,16 +1444,14 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
1432 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); 1444 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
1433 } else { 1445 } else {
1434 1446
1435 CL45_RD_OVER_CL22(bp, params->port, 1447 CL45_RD_OVER_CL22(bp, phy,
1436 params->phy_addr,
1437 MDIO_REG_BANK_COMBO_IEEE0, 1448 MDIO_REG_BANK_COMBO_IEEE0,
1438 MDIO_COMBO_IEEE0_MII_CONTROL, 1449 MDIO_COMBO_IEEE0_MII_CONTROL,
1439 &mii_control); 1450 &mii_control);
1440 DP(NETIF_MSG_LINK, 1451 DP(NETIF_MSG_LINK,
1441 "bnx2x_restart_autoneg mii_control before = 0x%x\n", 1452 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
1442 mii_control); 1453 mii_control);
1443 CL45_WR_OVER_CL22(bp, params->port, 1454 CL45_WR_OVER_CL22(bp, phy,
1444 params->phy_addr,
1445 MDIO_REG_BANK_COMBO_IEEE0, 1455 MDIO_REG_BANK_COMBO_IEEE0,
1446 MDIO_COMBO_IEEE0_MII_CONTROL, 1456 MDIO_COMBO_IEEE0_MII_CONTROL,
1447 (mii_control | 1457 (mii_control |
@@ -1450,7 +1460,8 @@ static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
1450 } 1460 }
1451} 1461}
1452 1462
1453static void bnx2x_initialize_sgmii_process(struct link_params *params, 1463static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
1464 struct link_params *params,
1454 struct link_vars *vars) 1465 struct link_vars *vars)
1455{ 1466{
1456 struct bnx2x *bp = params->bp; 1467 struct bnx2x *bp = params->bp;
@@ -1458,8 +1469,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
1458 1469
1459 /* in SGMII mode, the unicore is always slave */ 1470 /* in SGMII mode, the unicore is always slave */
1460 1471
1461 CL45_RD_OVER_CL22(bp, params->port, 1472 CL45_RD_OVER_CL22(bp, phy,
1462 params->phy_addr,
1463 MDIO_REG_BANK_SERDES_DIGITAL, 1473 MDIO_REG_BANK_SERDES_DIGITAL,
1464 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 1474 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
1465 &control1); 1475 &control1);
@@ -1468,8 +1478,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
1468 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | 1478 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
1469 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | 1479 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
1470 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); 1480 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
1471 CL45_WR_OVER_CL22(bp, params->port, 1481 CL45_WR_OVER_CL22(bp, phy,
1472 params->phy_addr,
1473 MDIO_REG_BANK_SERDES_DIGITAL, 1482 MDIO_REG_BANK_SERDES_DIGITAL,
1474 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, 1483 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
1475 control1); 1484 control1);
@@ -1479,8 +1488,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
1479 /* set speed, disable autoneg */ 1488 /* set speed, disable autoneg */
1480 u16 mii_control; 1489 u16 mii_control;
1481 1490
1482 CL45_RD_OVER_CL22(bp, params->port, 1491 CL45_RD_OVER_CL22(bp, phy,
1483 params->phy_addr,
1484 MDIO_REG_BANK_COMBO_IEEE0, 1492 MDIO_REG_BANK_COMBO_IEEE0,
1485 MDIO_COMBO_IEEE0_MII_CONTROL, 1493 MDIO_COMBO_IEEE0_MII_CONTROL,
1486 &mii_control); 1494 &mii_control);
@@ -1508,18 +1516,17 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params,
1508 } 1516 }
1509 1517
1510 /* setting the full duplex */ 1518 /* setting the full duplex */
1511 if (params->req_duplex == DUPLEX_FULL) 1519 if (phy->req_duplex == DUPLEX_FULL)
1512 mii_control |= 1520 mii_control |=
1513 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; 1521 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
1514 CL45_WR_OVER_CL22(bp, params->port, 1522 CL45_WR_OVER_CL22(bp, phy,
1515 params->phy_addr,
1516 MDIO_REG_BANK_COMBO_IEEE0, 1523 MDIO_REG_BANK_COMBO_IEEE0,
1517 MDIO_COMBO_IEEE0_MII_CONTROL, 1524 MDIO_COMBO_IEEE0_MII_CONTROL,
1518 mii_control); 1525 mii_control);
1519 1526
1520 } else { /* AN mode */ 1527 } else { /* AN mode */
1521 /* enable and restart AN */ 1528 /* enable and restart AN */
1522 bnx2x_restart_autoneg(params, 0); 1529 bnx2x_restart_autoneg(phy, params, 0);
1523 } 1530 }
1524} 1531}
1525 1532
@@ -1549,91 +1556,24 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
1549 default: 1556 default:
1550 break; 1557 break;
1551 } 1558 }
1559 if (pause_result & (1<<0))
1560 vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
1561 if (pause_result & (1<<1))
1562 vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
1552} 1563}
1553 1564
1554static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params, 1565static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
1555 struct link_vars *vars) 1566 struct link_params *params)
1556{
1557 struct bnx2x *bp = params->bp;
1558 u8 ext_phy_addr;
1559 u16 ld_pause; /* local */
1560 u16 lp_pause; /* link partner */
1561 u16 an_complete; /* AN complete */
1562 u16 pause_result;
1563 u8 ret = 0;
1564 u32 ext_phy_type;
1565 u8 port = params->port;
1566 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
1567 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
1568 /* read twice */
1569
1570 bnx2x_cl45_read(bp, port,
1571 ext_phy_type,
1572 ext_phy_addr,
1573 MDIO_AN_DEVAD,
1574 MDIO_AN_REG_STATUS, &an_complete);
1575 bnx2x_cl45_read(bp, port,
1576 ext_phy_type,
1577 ext_phy_addr,
1578 MDIO_AN_DEVAD,
1579 MDIO_AN_REG_STATUS, &an_complete);
1580
1581 if (an_complete & MDIO_AN_REG_STATUS_AN_COMPLETE) {
1582 ret = 1;
1583 bnx2x_cl45_read(bp, port,
1584 ext_phy_type,
1585 ext_phy_addr,
1586 MDIO_AN_DEVAD,
1587 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
1588 bnx2x_cl45_read(bp, port,
1589 ext_phy_type,
1590 ext_phy_addr,
1591 MDIO_AN_DEVAD,
1592 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
1593 pause_result = (ld_pause &
1594 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
1595 pause_result |= (lp_pause &
1596 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
1597 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
1598 pause_result);
1599 bnx2x_pause_resolve(vars, pause_result);
1600 if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
1601 ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
1602 bnx2x_cl45_read(bp, port,
1603 ext_phy_type,
1604 ext_phy_addr,
1605 MDIO_AN_DEVAD,
1606 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
1607
1608 bnx2x_cl45_read(bp, port,
1609 ext_phy_type,
1610 ext_phy_addr,
1611 MDIO_AN_DEVAD,
1612 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
1613 pause_result = (ld_pause &
1614 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
1615 pause_result |= (lp_pause &
1616 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
1617
1618 bnx2x_pause_resolve(vars, pause_result);
1619 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
1620 pause_result);
1621 }
1622 }
1623 return ret;
1624}
1625
1626static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1627{ 1567{
1628 struct bnx2x *bp = params->bp; 1568 struct bnx2x *bp = params->bp;
1629 u16 pd_10g, status2_1000x; 1569 u16 pd_10g, status2_1000x;
1630 CL45_RD_OVER_CL22(bp, params->port, 1570 if (phy->req_line_speed != SPEED_AUTO_NEG)
1631 params->phy_addr, 1571 return 0;
1572 CL45_RD_OVER_CL22(bp, phy,
1632 MDIO_REG_BANK_SERDES_DIGITAL, 1573 MDIO_REG_BANK_SERDES_DIGITAL,
1633 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 1574 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1634 &status2_1000x); 1575 &status2_1000x);
1635 CL45_RD_OVER_CL22(bp, params->port, 1576 CL45_RD_OVER_CL22(bp, phy,
1636 params->phy_addr,
1637 MDIO_REG_BANK_SERDES_DIGITAL, 1577 MDIO_REG_BANK_SERDES_DIGITAL,
1638 MDIO_SERDES_DIGITAL_A_1000X_STATUS2, 1578 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1639 &status2_1000x); 1579 &status2_1000x);
@@ -1643,8 +1583,7 @@ static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1643 return 1; 1583 return 1;
1644 } 1584 }
1645 1585
1646 CL45_RD_OVER_CL22(bp, params->port, 1586 CL45_RD_OVER_CL22(bp, phy,
1647 params->phy_addr,
1648 MDIO_REG_BANK_10G_PARALLEL_DETECT, 1587 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1649 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, 1588 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
1650 &pd_10g); 1589 &pd_10g);
@@ -1657,9 +1596,10 @@ static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1657 return 0; 1596 return 0;
1658} 1597}
1659 1598
1660static void bnx2x_flow_ctrl_resolve(struct link_params *params, 1599static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
1661 struct link_vars *vars, 1600 struct link_params *params,
1662 u32 gp_status) 1601 struct link_vars *vars,
1602 u32 gp_status)
1663{ 1603{
1664 struct bnx2x *bp = params->bp; 1604 struct bnx2x *bp = params->bp;
1665 u16 ld_pause; /* local driver */ 1605 u16 ld_pause; /* local driver */
@@ -1669,12 +1609,13 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1669 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 1609 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1670 1610
1671 /* resolve from gp_status in case of AN complete and not sgmii */ 1611 /* resolve from gp_status in case of AN complete and not sgmii */
1672 if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) && 1612 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
1673 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && 1613 vars->flow_ctrl = phy->req_flow_ctrl;
1674 (!(vars->phy_flags & PHY_SGMII_FLAG)) && 1614 else if (phy->req_line_speed != SPEED_AUTO_NEG)
1675 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1615 vars->flow_ctrl = params->req_fc_auto_adv;
1676 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) { 1616 else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1677 if (bnx2x_direct_parallel_detect_used(params)) { 1617 (!(vars->phy_flags & PHY_SGMII_FLAG))) {
1618 if (bnx2x_direct_parallel_detect_used(phy, params)) {
1678 vars->flow_ctrl = params->req_fc_auto_adv; 1619 vars->flow_ctrl = params->req_fc_auto_adv;
1679 return; 1620 return;
1680 } 1621 }
@@ -1684,13 +1625,11 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1684 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | 1625 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1685 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { 1626 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
1686 1627
1687 CL45_RD_OVER_CL22(bp, params->port, 1628 CL45_RD_OVER_CL22(bp, phy,
1688 params->phy_addr,
1689 MDIO_REG_BANK_CL73_IEEEB1, 1629 MDIO_REG_BANK_CL73_IEEEB1,
1690 MDIO_CL73_IEEEB1_AN_ADV1, 1630 MDIO_CL73_IEEEB1_AN_ADV1,
1691 &ld_pause); 1631 &ld_pause);
1692 CL45_RD_OVER_CL22(bp, params->port, 1632 CL45_RD_OVER_CL22(bp, phy,
1693 params->phy_addr,
1694 MDIO_REG_BANK_CL73_IEEEB1, 1633 MDIO_REG_BANK_CL73_IEEEB1,
1695 MDIO_CL73_IEEEB1_AN_LP_ADV1, 1634 MDIO_CL73_IEEEB1_AN_LP_ADV1,
1696 &lp_pause); 1635 &lp_pause);
@@ -1703,14 +1642,11 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1703 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", 1642 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
1704 pause_result); 1643 pause_result);
1705 } else { 1644 } else {
1706 1645 CL45_RD_OVER_CL22(bp, phy,
1707 CL45_RD_OVER_CL22(bp, params->port,
1708 params->phy_addr,
1709 MDIO_REG_BANK_COMBO_IEEE0, 1646 MDIO_REG_BANK_COMBO_IEEE0,
1710 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, 1647 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1711 &ld_pause); 1648 &ld_pause);
1712 CL45_RD_OVER_CL22(bp, params->port, 1649 CL45_RD_OVER_CL22(bp, phy,
1713 params->phy_addr,
1714 MDIO_REG_BANK_COMBO_IEEE0, 1650 MDIO_REG_BANK_COMBO_IEEE0,
1715 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, 1651 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1716 &lp_pause); 1652 &lp_pause);
@@ -1722,26 +1658,18 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1722 pause_result); 1658 pause_result);
1723 } 1659 }
1724 bnx2x_pause_resolve(vars, pause_result); 1660 bnx2x_pause_resolve(vars, pause_result);
1725 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1726 (bnx2x_ext_phy_resolve_fc(params, vars))) {
1727 return;
1728 } else {
1729 if (params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
1730 vars->flow_ctrl = params->req_fc_auto_adv;
1731 else
1732 vars->flow_ctrl = params->req_flow_ctrl;
1733 } 1661 }
1734 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); 1662 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
1735} 1663}
1736 1664
1737static void bnx2x_check_fallback_to_cl37(struct link_params *params) 1665static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
1666 struct link_params *params)
1738{ 1667{
1739 struct bnx2x *bp = params->bp; 1668 struct bnx2x *bp = params->bp;
1740 u16 rx_status, ustat_val, cl37_fsm_recieved; 1669 u16 rx_status, ustat_val, cl37_fsm_recieved;
1741 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); 1670 DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
1742 /* Step 1: Make sure signal is detected */ 1671 /* Step 1: Make sure signal is detected */
1743 CL45_RD_OVER_CL22(bp, params->port, 1672 CL45_RD_OVER_CL22(bp, phy,
1744 params->phy_addr,
1745 MDIO_REG_BANK_RX0, 1673 MDIO_REG_BANK_RX0,
1746 MDIO_RX0_RX_STATUS, 1674 MDIO_RX0_RX_STATUS,
1747 &rx_status); 1675 &rx_status);
@@ -1749,16 +1677,14 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
1749 (MDIO_RX0_RX_STATUS_SIGDET)) { 1677 (MDIO_RX0_RX_STATUS_SIGDET)) {
1750 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." 1678 DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
1751 "rx_status(0x80b0) = 0x%x\n", rx_status); 1679 "rx_status(0x80b0) = 0x%x\n", rx_status);
1752 CL45_WR_OVER_CL22(bp, params->port, 1680 CL45_WR_OVER_CL22(bp, phy,
1753 params->phy_addr,
1754 MDIO_REG_BANK_CL73_IEEEB0, 1681 MDIO_REG_BANK_CL73_IEEEB0,
1755 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 1682 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1756 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); 1683 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
1757 return; 1684 return;
1758 } 1685 }
1759 /* Step 2: Check CL73 state machine */ 1686 /* Step 2: Check CL73 state machine */
1760 CL45_RD_OVER_CL22(bp, params->port, 1687 CL45_RD_OVER_CL22(bp, phy,
1761 params->phy_addr,
1762 MDIO_REG_BANK_CL73_USERB0, 1688 MDIO_REG_BANK_CL73_USERB0,
1763 MDIO_CL73_USERB0_CL73_USTAT1, 1689 MDIO_CL73_USERB0_CL73_USTAT1,
1764 &ustat_val); 1690 &ustat_val);
@@ -1773,8 +1699,7 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
1773 } 1699 }
1774 /* Step 3: Check CL37 Message Pages received to indicate LP 1700 /* Step 3: Check CL37 Message Pages received to indicate LP
1775 supports only CL37 */ 1701 supports only CL37 */
1776 CL45_RD_OVER_CL22(bp, params->port, 1702 CL45_RD_OVER_CL22(bp, phy,
1777 params->phy_addr,
1778 MDIO_REG_BANK_REMOTE_PHY, 1703 MDIO_REG_BANK_REMOTE_PHY,
1779 MDIO_REMOTE_PHY_MISC_RX_STATUS, 1704 MDIO_REMOTE_PHY_MISC_RX_STATUS,
1780 &cl37_fsm_recieved); 1705 &cl37_fsm_recieved);
@@ -1792,25 +1717,45 @@ static void bnx2x_check_fallback_to_cl37(struct link_params *params)
1792 connected to a device which does not support cl73, but does support 1717 connected to a device which does not support cl73, but does support
1793 cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */ 1718 cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
1794 /* Disable CL73 */ 1719 /* Disable CL73 */
1795 CL45_WR_OVER_CL22(bp, params->port, 1720 CL45_WR_OVER_CL22(bp, phy,
1796 params->phy_addr,
1797 MDIO_REG_BANK_CL73_IEEEB0, 1721 MDIO_REG_BANK_CL73_IEEEB0,
1798 MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 1722 MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
1799 0); 1723 0);
1800 /* Restart CL37 autoneg */ 1724 /* Restart CL37 autoneg */
1801 bnx2x_restart_autoneg(params, 0); 1725 bnx2x_restart_autoneg(phy, params, 0);
1802 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); 1726 DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
1803} 1727}
1804static u8 bnx2x_link_settings_status(struct link_params *params, 1728
1805 struct link_vars *vars, 1729static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
1806 u32 gp_status, 1730 struct link_params *params,
1807 u8 ext_phy_link_up) 1731 struct link_vars *vars,
1732 u32 gp_status)
1733{
1734 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1735 vars->link_status |=
1736 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1737
1738 if (bnx2x_direct_parallel_detect_used(phy, params))
1739 vars->link_status |=
1740 LINK_STATUS_PARALLEL_DETECTION_USED;
1741}
1742
1743static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
1744 struct link_params *params,
1745 struct link_vars *vars)
1808{ 1746{
1809 struct bnx2x *bp = params->bp; 1747 struct bnx2x *bp = params->bp;
1810 u16 new_line_speed; 1748 u16 new_line_speed , gp_status;
1811 u8 rc = 0; 1749 u8 rc = 0;
1812 vars->link_status = 0;
1813 1750
1751 /* Read gp_status */
1752 CL45_RD_OVER_CL22(bp, phy,
1753 MDIO_REG_BANK_GP_STATUS,
1754 MDIO_GP_STATUS_TOP_AN_STATUS1,
1755 &gp_status);
1756
1757 if (phy->req_line_speed == SPEED_AUTO_NEG)
1758 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1814 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { 1759 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1815 DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n", 1760 DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n",
1816 gp_status); 1761 gp_status);
@@ -1823,7 +1768,12 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1823 else 1768 else
1824 vars->duplex = DUPLEX_HALF; 1769 vars->duplex = DUPLEX_HALF;
1825 1770
1826 bnx2x_flow_ctrl_resolve(params, vars, gp_status); 1771 if (SINGLE_MEDIA_DIRECT(params)) {
1772 bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
1773 if (phy->req_line_speed == SPEED_AUTO_NEG)
1774 bnx2x_xgxs_an_resolve(phy, params, vars,
1775 gp_status);
1776 }
1827 1777
1828 switch (gp_status & GP_STATUS_SPEED_MASK) { 1778 switch (gp_status & GP_STATUS_SPEED_MASK) {
1829 case GP_STATUS_10M: 1779 case GP_STATUS_10M:
@@ -1905,56 +1855,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1905 return -EINVAL; 1855 return -EINVAL;
1906 } 1856 }
1907 1857
1908 /* Upon link speed change set the NIG into drain mode.
1909 Comes to deals with possible FIFO glitch due to clk change
1910 when speed is decreased without link down indicator */
1911 if (new_line_speed != vars->line_speed) {
1912 if (XGXS_EXT_PHY_TYPE(params->ext_phy_config) !=
1913 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT &&
1914 ext_phy_link_up) {
1915 DP(NETIF_MSG_LINK, "Internal link speed %d is"
1916 " different than the external"
1917 " link speed %d\n", new_line_speed,
1918 vars->line_speed);
1919 vars->phy_link_up = 0;
1920 return 0;
1921 }
1922 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
1923 + params->port*4, 0);
1924 msleep(1);
1925 }
1926 vars->line_speed = new_line_speed; 1858 vars->line_speed = new_line_speed;
1927 vars->link_status |= LINK_STATUS_SERDES_LINK;
1928
1929 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
1930 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
1932 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1933 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
1934 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1935 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
1936 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1937 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
1938 vars->autoneg = AUTO_NEG_ENABLED;
1939
1940 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
1941 vars->autoneg |= AUTO_NEG_COMPLETE;
1942 vars->link_status |=
1943 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1944 }
1945
1946 vars->autoneg |= AUTO_NEG_PARALLEL_DETECTION_USED;
1947 vars->link_status |=
1948 LINK_STATUS_PARALLEL_DETECTION_USED;
1949
1950 }
1951 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1952 vars->link_status |=
1953 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1954
1955 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1956 vars->link_status |=
1957 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1958 1859
1959 } else { /* link_down */ 1860 } else { /* link_down */
1960 DP(NETIF_MSG_LINK, "phy link down\n"); 1861 DP(NETIF_MSG_LINK, "phy link down\n");
@@ -1963,38 +1864,32 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1963 1864
1964 vars->duplex = DUPLEX_FULL; 1865 vars->duplex = DUPLEX_FULL;
1965 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 1866 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1966 vars->autoneg = AUTO_NEG_DISABLED;
1967 vars->mac_type = MAC_TYPE_NONE; 1867 vars->mac_type = MAC_TYPE_NONE;
1968 1868
1969 if ((params->req_line_speed == SPEED_AUTO_NEG) && 1869 if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
1970 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1870 SINGLE_MEDIA_DIRECT(params)) {
1971 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT))) {
1972 /* Check signal is detected */ 1871 /* Check signal is detected */
1973 bnx2x_check_fallback_to_cl37(params); 1872 bnx2x_check_fallback_to_cl37(phy, params);
1974 } 1873 }
1975 } 1874 }
1976 1875
1977 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n", 1876 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
1978 gp_status, vars->phy_link_up, vars->line_speed); 1877 gp_status, vars->phy_link_up, vars->line_speed);
1979 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x" 1878 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
1980 " autoneg 0x%x\n", 1879 vars->duplex, vars->flow_ctrl, vars->link_status);
1981 vars->duplex,
1982 vars->flow_ctrl, vars->autoneg);
1983 DP(NETIF_MSG_LINK, "link_status 0x%x\n", vars->link_status);
1984
1985 return rc; 1880 return rc;
1986} 1881}
1987 1882
1988static void bnx2x_set_gmii_tx_driver(struct link_params *params) 1883static void bnx2x_set_gmii_tx_driver(struct link_params *params)
1989{ 1884{
1990 struct bnx2x *bp = params->bp; 1885 struct bnx2x *bp = params->bp;
1886 struct bnx2x_phy *phy = &params->phy[INT_PHY];
1991 u16 lp_up2; 1887 u16 lp_up2;
1992 u16 tx_driver; 1888 u16 tx_driver;
1993 u16 bank; 1889 u16 bank;
1994 1890
1995 /* read precomp */ 1891 /* read precomp */
1996 CL45_RD_OVER_CL22(bp, params->port, 1892 CL45_RD_OVER_CL22(bp, phy,
1997 params->phy_addr,
1998 MDIO_REG_BANK_OVER_1G, 1893 MDIO_REG_BANK_OVER_1G,
1999 MDIO_OVER_1G_LP_UP2, &lp_up2); 1894 MDIO_OVER_1G_LP_UP2, &lp_up2);
2000 1895
@@ -2008,8 +1903,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2008 1903
2009 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; 1904 for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
2010 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { 1905 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
2011 CL45_RD_OVER_CL22(bp, params->port, 1906 CL45_RD_OVER_CL22(bp, phy,
2012 params->phy_addr,
2013 bank, 1907 bank,
2014 MDIO_TX0_TX_DRIVER, &tx_driver); 1908 MDIO_TX0_TX_DRIVER, &tx_driver);
2015 1909
@@ -2018,8 +1912,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2018 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { 1912 (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
2019 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; 1913 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2020 tx_driver |= lp_up2; 1914 tx_driver |= lp_up2;
2021 CL45_WR_OVER_CL22(bp, params->port, 1915 CL45_WR_OVER_CL22(bp, phy,
2022 params->phy_addr,
2023 bank, 1916 bank,
2024 MDIO_TX0_TX_DRIVER, tx_driver); 1917 MDIO_TX0_TX_DRIVER, tx_driver);
2025 } 1918 }
@@ -2027,7 +1920,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
2027} 1920}
2028 1921
2029static u8 bnx2x_emac_program(struct link_params *params, 1922static u8 bnx2x_emac_program(struct link_params *params,
2030 u32 line_speed, u32 duplex) 1923 struct link_vars *vars)
2031{ 1924{
2032 struct bnx2x *bp = params->bp; 1925 struct bnx2x *bp = params->bp;
2033 u8 port = params->port; 1926 u8 port = params->port;
@@ -2039,7 +1932,7 @@ static u8 bnx2x_emac_program(struct link_params *params,
2039 (EMAC_MODE_25G_MODE | 1932 (EMAC_MODE_25G_MODE |
2040 EMAC_MODE_PORT_MII_10M | 1933 EMAC_MODE_PORT_MII_10M |
2041 EMAC_MODE_HALF_DUPLEX)); 1934 EMAC_MODE_HALF_DUPLEX));
2042 switch (line_speed) { 1935 switch (vars->line_speed) {
2043 case SPEED_10: 1936 case SPEED_10:
2044 mode |= EMAC_MODE_PORT_MII_10M; 1937 mode |= EMAC_MODE_PORT_MII_10M;
2045 break; 1938 break;
@@ -2058,371 +1951,1369 @@ static u8 bnx2x_emac_program(struct link_params *params,
2058 1951
2059 default: 1952 default:
2060 /* 10G not valid for EMAC */ 1953 /* 10G not valid for EMAC */
2061 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", line_speed); 1954 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
1955 vars->line_speed);
2062 return -EINVAL; 1956 return -EINVAL;
2063 } 1957 }
2064 1958
2065 if (duplex == DUPLEX_HALF) 1959 if (vars->duplex == DUPLEX_HALF)
2066 mode |= EMAC_MODE_HALF_DUPLEX; 1960 mode |= EMAC_MODE_HALF_DUPLEX;
2067 bnx2x_bits_en(bp, 1961 bnx2x_bits_en(bp,
2068 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, 1962 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2069 mode); 1963 mode);
2070 1964
2071 bnx2x_set_led(params, LED_MODE_OPER, line_speed); 1965 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
2072 return 0; 1966 return 0;
2073} 1967}
2074 1968
2075/*****************************************************************************/ 1969static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
2076/* External Phy section */ 1970 struct link_params *params)
2077/*****************************************************************************/
2078void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
2079{ 1971{
2080 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1972
2081 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 1973 u16 bank, i = 0;
2082 msleep(1); 1974 struct bnx2x *bp = params->bp;
2083 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1975
2084 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 1976 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
1977 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
1978 CL45_WR_OVER_CL22(bp, phy,
1979 bank,
1980 MDIO_RX0_RX_EQ_BOOST,
1981 phy->rx_preemphasis[i]);
1982 }
1983
1984 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
1985 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
1986 CL45_WR_OVER_CL22(bp, phy,
1987 bank,
1988 MDIO_TX0_TX_DRIVER,
1989 phy->tx_preemphasis[i]);
1990 }
2085} 1991}
2086 1992
2087static void bnx2x_ext_phy_reset(struct link_params *params, 1993static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
2088 struct link_vars *vars) 1994 struct link_params *params,
1995 struct link_vars *vars)
2089{ 1996{
2090 struct bnx2x *bp = params->bp; 1997 struct bnx2x *bp = params->bp;
2091 u32 ext_phy_type; 1998 u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
2092 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 1999 (params->loopback_mode == LOOPBACK_XGXS));
2000 if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
2001 if (SINGLE_MEDIA_DIRECT(params) &&
2002 (params->feature_config_flags &
2003 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
2004 bnx2x_set_preemphasis(phy, params);
2093 2005
2094 DP(NETIF_MSG_LINK, "Port %x: bnx2x_ext_phy_reset\n", params->port); 2006 /* forced speed requested? */
2095 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 2007 if (vars->line_speed != SPEED_AUTO_NEG ||
2096 /* The PHY reset is controled by GPIO 1 2008 (SINGLE_MEDIA_DIRECT(params) &&
2097 * Give it 1ms of reset pulse 2009 params->loopback_mode == LOOPBACK_EXT)) {
2098 */ 2010 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2099 if (vars->phy_flags & PHY_XGXS_FLAG) {
2100 2011
2101 switch (ext_phy_type) { 2012 /* disable autoneg */
2102 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 2013 bnx2x_set_autoneg(phy, params, vars, 0);
2103 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2104 break;
2105 2014
2106 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 2015 /* program speed and duplex */
2107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 2016 bnx2x_program_serdes(phy, params, vars);
2108 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
2109 2017
2110 /* Restore normal power mode*/ 2018 } else { /* AN_mode */
2111 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2019 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
2112 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2113 params->port);
2114 2020
2115 /* HW reset */ 2021 /* AN enabled */
2116 bnx2x_ext_phy_hw_reset(bp, params->port); 2022 bnx2x_set_brcm_cl37_advertisment(phy, params);
2117 2023
2118 bnx2x_cl45_write(bp, params->port, 2024 /* program duplex & pause advertisement (for aneg) */
2119 ext_phy_type, 2025 bnx2x_set_ieee_aneg_advertisment(phy, params,
2120 ext_phy_addr, 2026 vars->ieee_fc);
2121 MDIO_PMA_DEVAD,
2122 MDIO_PMA_REG_CTRL, 0xa040);
2123 break;
2124 2027
2125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 2028 /* enable autoneg */
2126 break; 2029 bnx2x_set_autoneg(phy, params, vars, enable_cl73);
2127 2030
2128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 2031 /* enable and restart AN */
2032 bnx2x_restart_autoneg(phy, params, enable_cl73);
2033 }
2129 2034
2130 /* Restore normal power mode*/ 2035 } else { /* SGMII mode */
2131 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2036 DP(NETIF_MSG_LINK, "SGMII\n");
2132 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2133 params->port);
2134 2037
2135 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2038 bnx2x_initialize_sgmii_process(phy, params, vars);
2136 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2039 }
2137 params->port); 2040}
2138 2041
2139 bnx2x_cl45_write(bp, params->port, 2042static u8 bnx2x_init_serdes(struct bnx2x_phy *phy,
2140 ext_phy_type, 2043 struct link_params *params,
2141 ext_phy_addr, 2044 struct link_vars *vars)
2142 MDIO_PMA_DEVAD, 2045{
2143 MDIO_PMA_REG_CTRL, 2046 u8 rc;
2144 1<<15); 2047 vars->phy_flags |= PHY_SGMII_FLAG;
2145 break; 2048 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2049 bnx2x_set_aer_mmd(params, phy);
2050 rc = bnx2x_reset_unicore(params, phy, 1);
2051 /* reset the SerDes and wait for reset bit return low */
2052 if (rc != 0)
2053 return rc;
2054 bnx2x_set_aer_mmd(params, phy);
2146 2055
2147 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 2056 return rc;
2148 DP(NETIF_MSG_LINK, "XGXS 8072\n"); 2057}
2149 2058
2150 /* Unset Low Power Mode and SW reset */ 2059static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
2151 /* Restore normal power mode*/ 2060 struct link_params *params,
2152 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2061 struct link_vars *vars)
2153 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2062{
2154 params->port); 2063 u8 rc;
2064 vars->phy_flags = PHY_XGXS_FLAG;
2065 if ((phy->req_line_speed &&
2066 ((phy->req_line_speed == SPEED_100) ||
2067 (phy->req_line_speed == SPEED_10))) ||
2068 (!phy->req_line_speed &&
2069 (phy->speed_cap_mask >=
2070 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
2071 (phy->speed_cap_mask <
2072 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
2073 ))
2074 vars->phy_flags |= PHY_SGMII_FLAG;
2075 else
2076 vars->phy_flags &= ~PHY_SGMII_FLAG;
2155 2077
2156 bnx2x_cl45_write(bp, params->port, 2078 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2157 ext_phy_type, 2079 bnx2x_set_aer_mmd(params, phy);
2158 ext_phy_addr, 2080 bnx2x_set_master_ln(params, phy);
2159 MDIO_PMA_DEVAD,
2160 MDIO_PMA_REG_CTRL,
2161 1<<15);
2162 break;
2163 2081
2164 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 2082 rc = bnx2x_reset_unicore(params, phy, 0);
2165 DP(NETIF_MSG_LINK, "XGXS 8073\n"); 2083 /* reset the SerDes and wait for reset bit return low */
2084 if (rc != 0)
2085 return rc;
2166 2086
2167 /* Restore normal power mode*/ 2087 bnx2x_set_aer_mmd(params, phy);
2168 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2169 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2170 params->port);
2171 2088
2172 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2089 /* setting the masterLn_def again after the reset */
2173 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2090 bnx2x_set_master_ln(params, phy);
2174 params->port); 2091 bnx2x_set_swap_lanes(params, phy);
2092
2093 return rc;
2094}
2095
2096static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
2097 struct bnx2x_phy *phy)
2098{
2099 u16 cnt, ctrl;
2100 /* Wait for soft reset to get cleared upto 1 sec */
2101 for (cnt = 0; cnt < 1000; cnt++) {
2102 bnx2x_cl45_read(bp, phy,
2103 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, &ctrl);
2104 if (!(ctrl & (1<<15)))
2175 break; 2105 break;
2106 msleep(1);
2107 }
2108 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
2109 return cnt;
2110}
2176 2111
2177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 2112static void bnx2x_link_int_enable(struct link_params *params)
2178 DP(NETIF_MSG_LINK, "XGXS SFX7101\n"); 2113{
2114 u8 port = params->port;
2115 u32 mask;
2116 struct bnx2x *bp = params->bp;
2179 2117
2180 /* Restore normal power mode*/ 2118 /* setting the status to report on link up
2181 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2119 for either XGXS or SerDes */
2182 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
2183 params->port);
2184 2120
2185 /* HW reset */ 2121 if (params->switch_cfg == SWITCH_CFG_10G) {
2186 bnx2x_ext_phy_hw_reset(bp, params->port); 2122 mask = (NIG_MASK_XGXS0_LINK10G |
2187 break; 2123 NIG_MASK_XGXS0_LINK_STATUS);
2124 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
2125 if (!(SINGLE_MEDIA_DIRECT(params)) &&
2126 params->phy[INT_PHY].type !=
2127 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
2128 mask |= NIG_MASK_MI_INT;
2129 DP(NETIF_MSG_LINK, "enabled external phy int\n");
2130 }
2188 2131
2189 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 2132 } else { /* SerDes */
2190 /* Restore normal power mode*/ 2133 mask = NIG_MASK_SERDES0_LINK_STATUS;
2191 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2134 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
2192 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 2135 if (!(SINGLE_MEDIA_DIRECT(params)) &&
2193 params->port); 2136 params->phy[INT_PHY].type !=
2137 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
2138 mask |= NIG_MASK_MI_INT;
2139 DP(NETIF_MSG_LINK, "enabled external phy int\n");
2140 }
2141 }
2142 bnx2x_bits_en(bp,
2143 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2144 mask);
2194 2145
2195 /* HW reset */ 2146 DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
2196 bnx2x_ext_phy_hw_reset(bp, params->port); 2147 (params->switch_cfg == SWITCH_CFG_10G),
2148 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
2149 DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
2150 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
2151 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2152 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
2153 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
2154 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2155 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
2156}
2197 2157
2198 bnx2x_cl45_write(bp, params->port, 2158static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
2199 ext_phy_type, 2159 u8 exp_mi_int)
2200 ext_phy_addr, 2160{
2201 MDIO_PMA_DEVAD, 2161 u32 latch_status = 0;
2202 MDIO_PMA_REG_CTRL,
2203 1<<15);
2204 break;
2205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
2206 break;
2207 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
2208 DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
2209 break;
2210 2162
2211 default: 2163 /**
2212 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", 2164 * Disable the MI INT ( external phy int ) by writing 1 to the
2213 params->ext_phy_config); 2165 * status register. Link down indication is high-active-signal,
2214 break; 2166 * so in this case we need to write the status to clear the XOR
2167 */
2168 /* Read Latched signals */
2169 latch_status = REG_RD(bp,
2170 NIG_REG_LATCH_STATUS_0 + port*8);
2171 DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
2172 /* Handle only those with latched-signal=up.*/
2173 if (exp_mi_int)
2174 bnx2x_bits_en(bp,
2175 NIG_REG_STATUS_INTERRUPT_PORT0
2176 + port*4,
2177 NIG_STATUS_EMAC0_MI_INT);
2178 else
2179 bnx2x_bits_dis(bp,
2180 NIG_REG_STATUS_INTERRUPT_PORT0
2181 + port*4,
2182 NIG_STATUS_EMAC0_MI_INT);
2183
2184 if (latch_status & 1) {
2185
2186 /* For all latched-signal=up : Re-Arm Latch signals */
2187 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
2188 (latch_status & 0xfffe) | (latch_status & 1));
2189 }
2190 /* For all latched-signal=up,Write original_signal to status */
2191}
2192
2193static void bnx2x_link_int_ack(struct link_params *params,
2194 struct link_vars *vars, u8 is_10g)
2195{
2196 struct bnx2x *bp = params->bp;
2197 u8 port = params->port;
2198
2199 /* first reset all status
2200 * we assume only one line will be change at a time */
2201 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2202 (NIG_STATUS_XGXS0_LINK10G |
2203 NIG_STATUS_XGXS0_LINK_STATUS |
2204 NIG_STATUS_SERDES0_LINK_STATUS));
2205 if (vars->phy_link_up) {
2206 if (is_10g) {
2207 /* Disable the 10G link interrupt
2208 * by writing 1 to the status register
2209 */
2210 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
2211 bnx2x_bits_en(bp,
2212 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2213 NIG_STATUS_XGXS0_LINK10G);
2214
2215 } else if (params->switch_cfg == SWITCH_CFG_10G) {
2216 /* Disable the link interrupt
2217 * by writing 1 to the relevant lane
2218 * in the status register
2219 */
2220 u32 ser_lane = ((params->lane_config &
2221 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
2222 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
2223
2224 DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n",
2225 vars->line_speed);
2226 bnx2x_bits_en(bp,
2227 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2228 ((1 << ser_lane) <<
2229 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
2230
2231 } else { /* SerDes */
2232 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
2233 /* Disable the link interrupt
2234 * by writing 1 to the status register
2235 */
2236 bnx2x_bits_en(bp,
2237 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2238 NIG_STATUS_SERDES0_LINK_STATUS);
2215 } 2239 }
2216 2240
2217 } else { /* SerDes */ 2241 }
2218 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 2242}
2219 switch (ext_phy_type) { 2243
2220 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 2244static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len)
2221 DP(NETIF_MSG_LINK, "SerDes Direct\n"); 2245{
2222 break; 2246 u8 *str_ptr = str;
2247 u32 mask = 0xf0000000;
2248 u8 shift = 8*4;
2249 u8 digit;
2250 u8 remove_leading_zeros = 1;
2251 if (*len < 10) {
2252 /* Need more than 10chars for this format */
2253 *str_ptr = '\0';
2254 (*len)--;
2255 return -EINVAL;
2256 }
2257 while (shift > 0) {
2258
2259 shift -= 4;
2260 digit = ((num & mask) >> shift);
2261 if (digit == 0 && remove_leading_zeros) {
2262 mask = mask >> 4;
2263 continue;
2264 } else if (digit < 0xa)
2265 *str_ptr = digit + '0';
2266 else
2267 *str_ptr = digit - 0xa + 'a';
2268 remove_leading_zeros = 0;
2269 str_ptr++;
2270 (*len)--;
2271 mask = mask >> 4;
2272 if (shift == 4*4) {
2273 *str_ptr = '.';
2274 str_ptr++;
2275 (*len)--;
2276 remove_leading_zeros = 1;
2277 }
2278 }
2279 return 0;
2280}
2281
2282
2283static u8 bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
2284{
2285 str[0] = '\0';
2286 (*len)--;
2287 return 0;
2288}
2289
2290u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
2291 u8 *version, u16 len)
2292{
2293 struct bnx2x *bp;
2294 u32 spirom_ver = 0;
2295 u8 status = 0;
2296 u8 *ver_p = version;
2297 u16 remain_len = len;
2298 if (version == NULL || params == NULL)
2299 return -EINVAL;
2300 bp = params->bp;
2301
2302 /* Extract first external phy*/
2303 version[0] = '\0';
2304 spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
2305
2306 if (params->phy[EXT_PHY1].format_fw_ver) {
2307 status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
2308 ver_p,
2309 &remain_len);
2310 ver_p += (len - remain_len);
2311 }
2312 if ((params->num_phys == MAX_PHYS) &&
2313 (params->phy[EXT_PHY2].ver_addr != 0)) {
2314 spirom_ver = REG_RD(bp,
2315 params->phy[EXT_PHY2].ver_addr);
2316 if (params->phy[EXT_PHY2].format_fw_ver) {
2317 *ver_p = '/';
2318 ver_p++;
2319 remain_len--;
2320 status |= params->phy[EXT_PHY2].format_fw_ver(
2321 spirom_ver,
2322 ver_p,
2323 &remain_len);
2324 ver_p = version + (len - remain_len);
2325 }
2326 }
2327 *ver_p = '\0';
2328 return status;
2329}
2330
2331static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
2332 struct link_params *params)
2333{
2334 u8 port = params->port;
2335 struct bnx2x *bp = params->bp;
2336
2337 if (phy->req_line_speed != SPEED_1000) {
2338 u32 md_devad;
2339
2340 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
2341
2342 /* change the uni_phy_addr in the nig */
2343 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
2344 port*0x18));
2345
2346 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
2347
2348 bnx2x_cl45_write(bp, phy,
2349 5,
2350 (MDIO_REG_BANK_AER_BLOCK +
2351 (MDIO_AER_BLOCK_AER_REG & 0xf)),
2352 0x2800);
2353
2354 bnx2x_cl45_write(bp, phy,
2355 5,
2356 (MDIO_REG_BANK_CL73_IEEEB0 +
2357 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
2358 0x6041);
2359 msleep(200);
2360 /* set aer mmd back */
2361 bnx2x_set_aer_mmd(params, phy);
2362
2363 /* and md_devad */
2364 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
2365 md_devad);
2366
2367 } else {
2368 u16 mii_ctrl;
2369 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
2370 bnx2x_cl45_read(bp, phy, 5,
2371 (MDIO_REG_BANK_COMBO_IEEE0 +
2372 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
2373 &mii_ctrl);
2374 bnx2x_cl45_write(bp, phy, 5,
2375 (MDIO_REG_BANK_COMBO_IEEE0 +
2376 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
2377 mii_ctrl |
2378 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
2379 }
2380}
2381
2382/*
2383 *------------------------------------------------------------------------
2384 * bnx2x_override_led_value -
2385 *
2386 * Override the led value of the requested led
2387 *
2388 *------------------------------------------------------------------------
2389 */
2390u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
2391 u32 led_idx, u32 value)
2392{
2393 u32 reg_val;
2223 2394
2224 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 2395 /* If port 0 then use EMAC0, else use EMAC1*/
2225 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 2396 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2226 bnx2x_ext_phy_hw_reset(bp, params->port); 2397
2398 DP(NETIF_MSG_LINK,
2399 "bnx2x_override_led_value() port %x led_idx %d value %d\n",
2400 port, led_idx, value);
2401
2402 switch (led_idx) {
2403 case 0: /* 10MB led */
2404 /* Read the current value of the LED register in
2405 the EMAC block */
2406 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2407 /* Set the OVERRIDE bit to 1 */
2408 reg_val |= EMAC_LED_OVERRIDE;
2409 /* If value is 1, set the 10M_OVERRIDE bit,
2410 otherwise reset it.*/
2411 reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
2412 (reg_val & ~EMAC_LED_10MB_OVERRIDE);
2413 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2414 break;
2415 case 1: /*100MB led */
2416 /*Read the current value of the LED register in
2417 the EMAC block */
2418 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2419 /* Set the OVERRIDE bit to 1 */
2420 reg_val |= EMAC_LED_OVERRIDE;
2421 /* If value is 1, set the 100M_OVERRIDE bit,
2422 otherwise reset it.*/
2423 reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
2424 (reg_val & ~EMAC_LED_100MB_OVERRIDE);
2425 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2426 break;
2427 case 2: /* 1000MB led */
2428 /* Read the current value of the LED register in the
2429 EMAC block */
2430 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2431 /* Set the OVERRIDE bit to 1 */
2432 reg_val |= EMAC_LED_OVERRIDE;
2433 /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
2434 reset it. */
2435 reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
2436 (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
2437 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2438 break;
2439 case 3: /* 2500MB led */
2440 /* Read the current value of the LED register in the
2441 EMAC block*/
2442 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
2443 /* Set the OVERRIDE bit to 1 */
2444 reg_val |= EMAC_LED_OVERRIDE;
2445 /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
2446 reset it.*/
2447 reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
2448 (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
2449 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2450 break;
2451 case 4: /*10G led */
2452 if (port == 0) {
2453 REG_WR(bp, NIG_REG_LED_10G_P0,
2454 value);
2455 } else {
2456 REG_WR(bp, NIG_REG_LED_10G_P1,
2457 value);
2458 }
2459 break;
2460 case 5: /* TRAFFIC led */
2461 /* Find if the traffic control is via BMAC or EMAC */
2462 if (port == 0)
2463 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
2464 else
2465 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
2466
2467 /* Override the traffic led in the EMAC:*/
2468 if (reg_val == 1) {
2469 /* Read the current value of the LED register in
2470 the EMAC block */
2471 reg_val = REG_RD(bp, emac_base +
2472 EMAC_REG_EMAC_LED);
2473 /* Set the TRAFFIC_OVERRIDE bit to 1 */
2474 reg_val |= EMAC_LED_OVERRIDE;
2475 /* If value is 1, set the TRAFFIC bit, otherwise
2476 reset it.*/
2477 reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
2478 (reg_val & ~EMAC_LED_TRAFFIC);
2479 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
2480 } else { /* Override the traffic led in the BMAC: */
2481 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
2482 + port*4, 1);
2483 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
2484 value);
2485 }
2486 break;
2487 default:
2488 DP(NETIF_MSG_LINK,
2489 "bnx2x_override_led_value() unknown led index %d "
2490 "(should be 0-5)\n", led_idx);
2491 return -EINVAL;
2492 }
2493
2494 return 0;
2495}
2496
2497
2498u8 bnx2x_set_led(struct link_params *params,
2499 struct link_vars *vars, u8 mode, u32 speed)
2500{
2501 u8 port = params->port;
2502 u16 hw_led_mode = params->hw_led_mode;
2503 u8 rc = 0, phy_idx;
2504 u32 tmp;
2505 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2506 struct bnx2x *bp = params->bp;
2507 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
2508 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
2509 speed, hw_led_mode);
2510 /* In case */
2511 for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
2512 if (params->phy[phy_idx].set_link_led) {
2513 params->phy[phy_idx].set_link_led(
2514 &params->phy[phy_idx], params, mode);
2515 }
2516 }
2517
2518 switch (mode) {
2519 case LED_MODE_FRONT_PANEL_OFF:
2520 case LED_MODE_OFF:
2521 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
2522 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
2523 SHARED_HW_CFG_LED_MAC1);
2524
2525 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
2526 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
2527 break;
2528
2529 case LED_MODE_OPER:
2530 /**
2531 * For all other phys, OPER mode is same as ON, so in case
2532 * link is down, do nothing
2533 **/
2534 if (!vars->link_up)
2227 break; 2535 break;
2536 case LED_MODE_ON:
2537 if (SINGLE_MEDIA_DIRECT(params)) {
2538 /**
2539 * This is a work-around for HW issue found when link
2540 * is up in CL73
2541 */
2542 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
2543 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
2544 } else {
2545 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
2546 hw_led_mode);
2547 }
2228 2548
2229 default: 2549 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
2230 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n", 2550 port*4, 0);
2231 params->ext_phy_config); 2551 /* Set blinking rate to ~15.9Hz */
2552 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
2553 LED_BLINK_RATE_VAL);
2554 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
2555 port*4, 1);
2556 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
2557 EMAC_WR(bp, EMAC_REG_EMAC_LED,
2558 (tmp & (~EMAC_LED_OVERRIDE)));
2559
2560 if (CHIP_IS_E1(bp) &&
2561 ((speed == SPEED_2500) ||
2562 (speed == SPEED_1000) ||
2563 (speed == SPEED_100) ||
2564 (speed == SPEED_10))) {
2565 /* On Everest 1 Ax chip versions for speeds less than
2566 10G LED scheme is different */
2567 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
2568 + port*4, 1);
2569 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
2570 port*4, 0);
2571 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
2572 port*4, 1);
2573 }
2574 break;
2575
2576 default:
2577 rc = -EINVAL;
2578 DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
2579 mode);
2580 break;
2581 }
2582 return rc;
2583
2584}
2585
2586/**
2587 * This function comes to reflect the actual link state read DIRECTLY from the
2588 * HW
2589 */
2590u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
2591 u8 is_serdes)
2592{
2593 struct bnx2x *bp = params->bp;
2594 u16 gp_status = 0, phy_index = 0;
2595 u8 ext_phy_link_up = 0, serdes_phy_type;
2596 struct link_vars temp_vars;
2597
2598 CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY],
2599 MDIO_REG_BANK_GP_STATUS,
2600 MDIO_GP_STATUS_TOP_AN_STATUS1,
2601 &gp_status);
2602 /* link is up only if both local phy and external phy are up */
2603 if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
2604 return -ESRCH;
2605
2606 switch (params->num_phys) {
2607 case 1:
2608 /* No external PHY */
2609 return 0;
2610 case 2:
2611 ext_phy_link_up = params->phy[EXT_PHY1].read_status(
2612 &params->phy[EXT_PHY1],
2613 params, &temp_vars);
2614 break;
2615 case 3: /* Dual Media */
2616 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
2617 phy_index++) {
2618 serdes_phy_type = ((params->phy[phy_index].media_type ==
2619 ETH_PHY_SFP_FIBER) ||
2620 (params->phy[phy_index].media_type ==
2621 ETH_PHY_XFP_FIBER));
2622
2623 if (is_serdes != serdes_phy_type)
2624 continue;
2625 if (params->phy[phy_index].read_status) {
2626 ext_phy_link_up |=
2627 params->phy[phy_index].read_status(
2628 &params->phy[phy_index],
2629 params, &temp_vars);
2630 }
2631 }
2632 break;
2633 }
2634 if (ext_phy_link_up)
2635 return 0;
2636 return -ESRCH;
2637}
2638
2639static u8 bnx2x_link_initialize(struct link_params *params,
2640 struct link_vars *vars)
2641{
2642 u8 rc = 0;
2643 u8 phy_index, non_ext_phy;
2644 struct bnx2x *bp = params->bp;
2645 /**
2646 * In case of external phy existence, the line speed would be the
2647 * line speed linked up by the external phy. In case it is direct
2648 * only, then the line_speed during initialization will be
2649 * equal to the req_line_speed
2650 */
2651 vars->line_speed = params->phy[INT_PHY].req_line_speed;
2652
2653 /**
2654 * Initialize the internal phy in case this is a direct board
2655 * (no external phys), or this board has external phy which requires
2656 * to first.
2657 */
2658
2659 if (params->phy[INT_PHY].config_init)
2660 params->phy[INT_PHY].config_init(
2661 &params->phy[INT_PHY],
2662 params, vars);
2663
2664 /* init ext phy and enable link state int */
2665 non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
2666 (params->loopback_mode == LOOPBACK_XGXS));
2667
2668 if (non_ext_phy ||
2669 (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
2670 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
2671 struct bnx2x_phy *phy = &params->phy[INT_PHY];
2672 if (vars->line_speed == SPEED_AUTO_NEG)
2673 bnx2x_set_parallel_detection(phy, params);
2674 bnx2x_init_internal_phy(phy, params, vars);
2675 }
2676
2677 /* Init external phy*/
2678 if (!non_ext_phy)
2679 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
2680 phy_index++) {
2681 /**
2682 * No need to initialize second phy in case of first
2683 * phy only selection. In case of second phy, we do
2684 * need to initialize the first phy, since they are
2685 * connected.
2686 **/
2687 if (phy_index == EXT_PHY2 &&
2688 (bnx2x_phy_selection(params) ==
2689 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
2690 DP(NETIF_MSG_LINK, "Not initializing"
2691 "second phy\n");
2692 continue;
2693 }
2694 params->phy[phy_index].config_init(
2695 &params->phy[phy_index],
2696 params, vars);
2697 }
2698
2699 /* Reset the interrupt indication after phy was initialized */
2700 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
2701 params->port*4,
2702 (NIG_STATUS_XGXS0_LINK10G |
2703 NIG_STATUS_XGXS0_LINK_STATUS |
2704 NIG_STATUS_SERDES0_LINK_STATUS |
2705 NIG_MASK_MI_INT));
2706 return rc;
2707}
2708
2709static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
2710 struct link_params *params)
2711{
2712 /* reset the SerDes/XGXS */
2713 REG_WR(params->bp, GRCBASE_MISC +
2714 MISC_REGISTERS_RESET_REG_3_CLEAR,
2715 (0x1ff << (params->port*16)));
2716}
2717
2718static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
2719 struct link_params *params)
2720{
2721 struct bnx2x *bp = params->bp;
2722 u8 gpio_port;
2723 /* HW reset */
2724 gpio_port = params->port;
2725 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2726 MISC_REGISTERS_GPIO_OUTPUT_LOW,
2727 gpio_port);
2728 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2729 MISC_REGISTERS_GPIO_OUTPUT_LOW,
2730 gpio_port);
2731 DP(NETIF_MSG_LINK, "reset external PHY\n");
2732}
2733
2734static u8 bnx2x_update_link_down(struct link_params *params,
2735 struct link_vars *vars)
2736{
2737 struct bnx2x *bp = params->bp;
2738 u8 port = params->port;
2739
2740 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
2741 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
2742
2743 /* indicate no mac active */
2744 vars->mac_type = MAC_TYPE_NONE;
2745
2746 /* update shared memory */
2747 vars->link_status = 0;
2748 vars->line_speed = 0;
2749 bnx2x_update_mng(params, vars->link_status);
2750
2751 /* activate nig drain */
2752 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2753
2754 /* disable emac */
2755 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
2756
2757 msleep(10);
2758
2759 /* reset BigMac */
2760 bnx2x_bmac_rx_disable(bp, params->port);
2761 REG_WR(bp, GRCBASE_MISC +
2762 MISC_REGISTERS_RESET_REG_2_CLEAR,
2763 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2764 return 0;
2765}
2766
2767static u8 bnx2x_update_link_up(struct link_params *params,
2768 struct link_vars *vars,
2769 u8 link_10g)
2770{
2771 struct bnx2x *bp = params->bp;
2772 u8 port = params->port;
2773 u8 rc = 0;
2774
2775 vars->link_status |= LINK_STATUS_LINK_UP;
2776
2777 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
2778 vars->link_status |=
2779 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
2780
2781 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
2782 vars->link_status |=
2783 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
2784
2785 if (link_10g) {
2786 bnx2x_bmac_enable(params, vars, 0);
2787 bnx2x_set_led(params, vars,
2788 LED_MODE_OPER, SPEED_10000);
2789 } else {
2790 rc = bnx2x_emac_program(params, vars);
2791
2792 bnx2x_emac_enable(params, vars, 0);
2793
2794 /* AN complete? */
2795 if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
2796 && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
2797 SINGLE_MEDIA_DIRECT(params))
2798 bnx2x_set_gmii_tx_driver(params);
2799 }
2800
2801 /* PBF - link up */
2802 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
2803 vars->line_speed);
2804
2805 /* disable drain */
2806 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2807
2808 /* update shared memory */
2809 bnx2x_update_mng(params, vars->link_status);
2810 msleep(20);
2811 return rc;
2812}
2813/**
2814 * The bnx2x_link_update function should be called upon link
2815 * interrupt.
2816 * Link is considered up as follows:
2817 * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
2818 * to be up
2819 * - SINGLE_MEDIA - The link between the 577xx and the external
2820 * phy (XGXS) need to up as well as the external link of the
2821 * phy (PHY_EXT1)
2822 * - DUAL_MEDIA - The link between the 577xx and the first
2823 * external phy needs to be up, and at least one of the 2
2824 * external phy link must be up.
2825 */
2826u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
2827{
2828 struct bnx2x *bp = params->bp;
2829 struct link_vars phy_vars[MAX_PHYS];
2830 u8 port = params->port;
2831 u8 link_10g, phy_index;
2832 u8 ext_phy_link_up = 0, cur_link_up, rc = 0;
2833 u8 is_mi_int = 0;
2834 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
2835 u8 active_external_phy = INT_PHY;
2836 vars->link_status = 0;
2837 for (phy_index = INT_PHY; phy_index < params->num_phys;
2838 phy_index++) {
2839 phy_vars[phy_index].flow_ctrl = 0;
2840 phy_vars[phy_index].link_status = 0;
2841 phy_vars[phy_index].line_speed = 0;
2842 phy_vars[phy_index].duplex = DUPLEX_FULL;
2843 phy_vars[phy_index].phy_link_up = 0;
2844 phy_vars[phy_index].link_up = 0;
2845 }
2846
2847 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
2848 port, (vars->phy_flags & PHY_XGXS_FLAG),
2849 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
2850
2851 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
2852 port*0x18) > 0);
2853 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
2854 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
2855 is_mi_int,
2856 REG_RD(bp,
2857 NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
2858
2859 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
2860 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2861 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
2862
2863 /* disable emac */
2864 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
2865
2866 /**
2867 * Step 1:
2868 * Check external link change only for external phys, and apply
2869 * priority selection between them in case the link on both phys
2870 * is up. Note that the instead of the common vars, a temporary
2871 * vars argument is used since each phy may have different link/
2872 * speed/duplex result
2873 */
2874 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
2875 phy_index++) {
2876 struct bnx2x_phy *phy = &params->phy[phy_index];
2877 if (!phy->read_status)
2878 continue;
2879 /* Read link status and params of this ext phy */
2880 cur_link_up = phy->read_status(phy, params,
2881 &phy_vars[phy_index]);
2882 if (cur_link_up) {
2883 DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
2884 phy_index);
2885 } else {
2886 DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
2887 phy_index);
2888 continue;
2889 }
2890
2891 if (!ext_phy_link_up) {
2892 ext_phy_link_up = 1;
2893 active_external_phy = phy_index;
2894 } else {
2895 switch (bnx2x_phy_selection(params)) {
2896 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
2897 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
2898 /**
2899 * In this option, the first PHY makes sure to pass the
2900 * traffic through itself only.
2901 * Its not clear how to reset the link on the second phy
2902 **/
2903 active_external_phy = EXT_PHY1;
2904 break;
2905 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
2906 /**
2907 * In this option, the first PHY makes sure to pass the
2908 * traffic through the second PHY.
2909 **/
2910 active_external_phy = EXT_PHY2;
2911 break;
2912 default:
2913 /**
2914 * Link indication on both PHYs with the following cases
2915 * is invalid:
2916 * - FIRST_PHY means that second phy wasn't initialized,
2917 * hence its link is expected to be down
2918 * - SECOND_PHY means that first phy should not be able
2919 * to link up by itself (using configuration)
2920 * - DEFAULT should be overriden during initialiazation
2921 **/
2922 DP(NETIF_MSG_LINK, "Invalid link indication"
2923 "mpc=0x%x. DISABLING LINK !!!\n",
2924 params->multi_phy_config);
2925 ext_phy_link_up = 0;
2926 break;
2927 }
2928 }
2929 }
2930 prev_line_speed = vars->line_speed;
2931 /**
2932 * Step 2:
2933 * Read the status of the internal phy. In case of
2934 * DIRECT_SINGLE_MEDIA board, this link is the external link,
2935 * otherwise this is the link between the 577xx and the first
2936 * external phy
2937 */
2938 if (params->phy[INT_PHY].read_status)
2939 params->phy[INT_PHY].read_status(
2940 &params->phy[INT_PHY],
2941 params, vars);
2942 /**
2943 * The INT_PHY flow control reside in the vars. This include the
2944 * case where the speed or flow control are not set to AUTO.
2945 * Otherwise, the active external phy flow control result is set
2946 * to the vars. The ext_phy_line_speed is needed to check if the
2947 * speed is different between the internal phy and external phy.
2948 * This case may be result of intermediate link speed change.
2949 */
2950 if (active_external_phy > INT_PHY) {
2951 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
2952 /**
2953 * Link speed is taken from the XGXS. AN and FC result from
2954 * the external phy.
2955 */
2956 vars->link_status |= phy_vars[active_external_phy].link_status;
2957
2958 /**
2959 * if active_external_phy is first PHY and link is up - disable
2960 * disable TX on second external PHY
2961 */
2962 if (active_external_phy == EXT_PHY1) {
2963 if (params->phy[EXT_PHY2].phy_specific_func) {
2964 DP(NETIF_MSG_LINK, "Disabling TX on"
2965 " EXT_PHY2\n");
2966 params->phy[EXT_PHY2].phy_specific_func(
2967 &params->phy[EXT_PHY2],
2968 params, DISABLE_TX);
2969 }
2970 }
2971
2972 ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
2973 vars->duplex = phy_vars[active_external_phy].duplex;
2974 if (params->phy[active_external_phy].supported &
2975 SUPPORTED_FIBRE)
2976 vars->link_status |= LINK_STATUS_SERDES_LINK;
2977 DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
2978 active_external_phy);
2979 }
2980
2981 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
2982 phy_index++) {
2983 if (params->phy[phy_index].flags &
2984 FLAGS_REARM_LATCH_SIGNAL) {
2985 bnx2x_rearm_latch_signal(bp, port,
2986 phy_index ==
2987 active_external_phy);
2232 break; 2988 break;
2233 } 2989 }
2234 } 2990 }
2991 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
2992 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
2993 vars->link_status, ext_phy_line_speed);
2994 /**
2995 * Upon link speed change set the NIG into drain mode. Comes to
2996 * deals with possible FIFO glitch due to clk change when speed
2997 * is decreased without link down indicator
2998 */
2999
3000 if (vars->phy_link_up) {
3001 if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
3002 (ext_phy_line_speed != vars->line_speed)) {
3003 DP(NETIF_MSG_LINK, "Internal link speed %d is"
3004 " different than the external"
3005 " link speed %d\n", vars->line_speed,
3006 ext_phy_line_speed);
3007 vars->phy_link_up = 0;
3008 } else if (prev_line_speed != vars->line_speed) {
3009 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
3010 + params->port*4, 0);
3011 msleep(1);
3012 }
3013 }
3014
3015 /* anything 10 and over uses the bmac */
3016 link_10g = ((vars->line_speed == SPEED_10000) ||
3017 (vars->line_speed == SPEED_12000) ||
3018 (vars->line_speed == SPEED_12500) ||
3019 (vars->line_speed == SPEED_13000) ||
3020 (vars->line_speed == SPEED_15000) ||
3021 (vars->line_speed == SPEED_16000));
3022
3023 bnx2x_link_int_ack(params, vars, link_10g);
3024
3025 /**
3026 * In case external phy link is up, and internal link is down
3027 * (not initialized yet probably after link initialization, it
3028 * needs to be initialized.
3029 * Note that after link down-up as result of cable plug, the xgxs
3030 * link would probably become up again without the need
3031 * initialize it
3032 */
3033 if (!(SINGLE_MEDIA_DIRECT(params))) {
3034 DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
3035 " init_preceding = %d\n", ext_phy_link_up,
3036 vars->phy_link_up,
3037 params->phy[EXT_PHY1].flags &
3038 FLAGS_INIT_XGXS_FIRST);
3039 if (!(params->phy[EXT_PHY1].flags &
3040 FLAGS_INIT_XGXS_FIRST)
3041 && ext_phy_link_up && !vars->phy_link_up) {
3042 vars->line_speed = ext_phy_line_speed;
3043 if (vars->line_speed < SPEED_1000)
3044 vars->phy_flags |= PHY_SGMII_FLAG;
3045 else
3046 vars->phy_flags &= ~PHY_SGMII_FLAG;
3047 bnx2x_init_internal_phy(&params->phy[INT_PHY],
3048 params,
3049 vars);
3050 }
3051 }
3052 /**
3053 * Link is up only if both local phy and external phy (in case of
3054 * non-direct board) are up
3055 */
3056 vars->link_up = (vars->phy_link_up &&
3057 (ext_phy_link_up ||
3058 SINGLE_MEDIA_DIRECT(params)));
3059
3060 if (vars->link_up)
3061 rc = bnx2x_update_link_up(params, vars, link_10g);
3062 else
3063 rc = bnx2x_update_link_down(params, vars);
3064
3065 return rc;
3066}
3067
3068
3069/*****************************************************************************/
3070/* External Phy section */
3071/*****************************************************************************/
3072void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
3073{
3074 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3075 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3076 msleep(1);
3077 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3078 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
2235} 3079}
2236 3080
2237static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, 3081static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
2238 u32 shmem_base, u32 spirom_ver) 3082 u32 spirom_ver, u32 ver_addr)
2239{ 3083{
2240 DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n", 3084 DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
2241 (u16)(spirom_ver>>16), (u16)spirom_ver, port); 3085 (u16)(spirom_ver>>16), (u16)spirom_ver, port);
2242 REG_WR(bp, shmem_base + 3086
2243 offsetof(struct shmem_region, 3087 if (ver_addr)
2244 port_mb[port].ext_phy_fw_version), 3088 REG_WR(bp, ver_addr, spirom_ver);
2245 spirom_ver);
2246} 3089}
2247 3090
2248static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, u8 port, 3091static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
2249 u32 ext_phy_type, u8 ext_phy_addr, 3092 struct bnx2x_phy *phy,
2250 u32 shmem_base) 3093 u8 port)
2251{ 3094{
2252 u16 fw_ver1, fw_ver2; 3095 u16 fw_ver1, fw_ver2;
2253 3096
2254 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, 3097 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
2255 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 3098 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
2256 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, MDIO_PMA_DEVAD, 3099 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
2257 MDIO_PMA_REG_ROM_VER2, &fw_ver2); 3100 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
2258 bnx2x_save_spirom_version(bp, port, shmem_base, 3101 bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
2259 (u32)(fw_ver1<<16 | fw_ver2)); 3102 phy->ver_addr);
2260} 3103}
2261 3104
2262 3105static void bnx2x_ext_phy_set_pause(struct link_params *params,
2263static void bnx2x_save_8481_spirom_version(struct bnx2x *bp, u8 port, 3106 struct bnx2x_phy *phy,
2264 u8 ext_phy_addr, u32 shmem_base) 3107 struct link_vars *vars)
2265{ 3108{
2266 u16 val, fw_ver1, fw_ver2, cnt; 3109 u16 val;
2267 /* For the 32 bits registers in 8481, access via MDIO2ARM interface.*/ 3110 struct bnx2x *bp = params->bp;
2268 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 3111 /* read modify write pause advertizing */
2269 bnx2x_cl45_write(bp, port, 3112 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
2270 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2271 ext_phy_addr, MDIO_PMA_DEVAD,
2272 0xA819, 0x0014);
2273 bnx2x_cl45_write(bp, port,
2274 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2275 ext_phy_addr,
2276 MDIO_PMA_DEVAD,
2277 0xA81A,
2278 0xc200);
2279 bnx2x_cl45_write(bp, port,
2280 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2281 ext_phy_addr,
2282 MDIO_PMA_DEVAD,
2283 0xA81B,
2284 0x0000);
2285 bnx2x_cl45_write(bp, port,
2286 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2287 ext_phy_addr,
2288 MDIO_PMA_DEVAD,
2289 0xA81C,
2290 0x0300);
2291 bnx2x_cl45_write(bp, port,
2292 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
2293 ext_phy_addr,
2294 MDIO_PMA_DEVAD,
2295 0xA817,
2296 0x0009);
2297 3113
2298 for (cnt = 0; cnt < 100; cnt++) { 3114 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
2299 bnx2x_cl45_read(bp, port, 3115
2300 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3116 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2301 ext_phy_addr, 3117 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2302 MDIO_PMA_DEVAD, 3118 if ((vars->ieee_fc &
2303 0xA818, 3119 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
2304 &val); 3120 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
2305 if (val & 1) 3121 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
2306 break;
2307 udelay(5);
2308 } 3122 }
2309 if (cnt == 100) { 3123 if ((vars->ieee_fc &
2310 DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(1)\n"); 3124 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
2311 bnx2x_save_spirom_version(bp, port, 3125 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
2312 shmem_base, 0); 3126 val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
2313 return;
2314 } 3127 }
3128 DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
3129 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
3130}
2315 3131
3132static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3133 struct link_params *params,
3134 struct link_vars *vars)
3135{
3136 struct bnx2x *bp = params->bp;
3137 u16 ld_pause; /* local */
3138 u16 lp_pause; /* link partner */
3139 u16 pause_result;
3140 u8 ret = 0;
3141 /* read twice */
2316 3142
2317 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ 3143 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2318 bnx2x_cl45_write(bp, port, 3144
2319 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3145 if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
2320 ext_phy_addr, MDIO_PMA_DEVAD, 3146 vars->flow_ctrl = phy->req_flow_ctrl;
2321 0xA819, 0x0000); 3147 else if (phy->req_line_speed != SPEED_AUTO_NEG)
2322 bnx2x_cl45_write(bp, port, 3148 vars->flow_ctrl = params->req_fc_auto_adv;
2323 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3149 else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
2324 ext_phy_addr, MDIO_PMA_DEVAD, 3150 ret = 1;
2325 0xA81A, 0xc200); 3151 bnx2x_cl45_read(bp, phy,
2326 bnx2x_cl45_write(bp, port, 3152 MDIO_AN_DEVAD,
2327 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3153 MDIO_AN_REG_ADV_PAUSE, &ld_pause);
2328 ext_phy_addr, MDIO_PMA_DEVAD, 3154 bnx2x_cl45_read(bp, phy,
2329 0xA817, 0x000A); 3155 MDIO_AN_DEVAD,
2330 for (cnt = 0; cnt < 100; cnt++) { 3156 MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
2331 bnx2x_cl45_read(bp, port, 3157 pause_result = (ld_pause &
2332 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3158 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
2333 ext_phy_addr, 3159 pause_result |= (lp_pause &
2334 MDIO_PMA_DEVAD, 3160 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
2335 0xA818, 3161 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
2336 &val); 3162 pause_result);
2337 if (val & 1) 3163 bnx2x_pause_resolve(vars, pause_result);
2338 break;
2339 udelay(5);
2340 } 3164 }
2341 if (cnt == 100) { 3165 return ret;
2342 DP(NETIF_MSG_LINK, "Unable to read 8481 phy fw version(2)\n"); 3166}
2343 bnx2x_save_spirom_version(bp, port, 3167
2344 shmem_base, 0); 3168static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
3169 struct bnx2x_phy *phy,
3170 struct link_vars *vars)
3171{
3172 u16 val;
3173 bnx2x_cl45_read(bp, phy,
3174 MDIO_AN_DEVAD,
3175 MDIO_AN_REG_STATUS, &val);
3176 bnx2x_cl45_read(bp, phy,
3177 MDIO_AN_DEVAD,
3178 MDIO_AN_REG_STATUS, &val);
3179 if (val & (1<<5))
3180 vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
3181 if ((val & (1<<0)) == 0)
3182 vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
3183}
3184
3185/******************************************************************/
3186/* common BCM8073/BCM8727 PHY SECTION */
3187/******************************************************************/
3188static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
3189 struct link_params *params,
3190 struct link_vars *vars)
3191{
3192 struct bnx2x *bp = params->bp;
3193 if (phy->req_line_speed == SPEED_10 ||
3194 phy->req_line_speed == SPEED_100) {
3195 vars->flow_ctrl = phy->req_flow_ctrl;
2345 return; 3196 return;
2346 } 3197 }
2347 3198
2348 /* lower 16 bits of the register SPI_FW_STATUS */ 3199 if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
2349 bnx2x_cl45_read(bp, port, 3200 (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
2350 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3201 u16 pause_result;
2351 ext_phy_addr, 3202 u16 ld_pause; /* local */
2352 MDIO_PMA_DEVAD, 3203 u16 lp_pause; /* link partner */
2353 0xA81B, 3204 bnx2x_cl45_read(bp, phy,
2354 &fw_ver1); 3205 MDIO_AN_DEVAD,
2355 /* upper 16 bits of register SPI_FW_STATUS */ 3206 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
2356 bnx2x_cl45_read(bp, port, 3207
2357 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 3208 bnx2x_cl45_read(bp, phy,
2358 ext_phy_addr, 3209 MDIO_AN_DEVAD,
2359 MDIO_PMA_DEVAD, 3210 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
2360 0xA81C, 3211 pause_result = (ld_pause &
2361 &fw_ver2); 3212 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
3213 pause_result |= (lp_pause &
3214 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
2362 3215
2363 bnx2x_save_spirom_version(bp, port, 3216 bnx2x_pause_resolve(vars, pause_result);
2364 shmem_base, (fw_ver2<<16) | fw_ver1); 3217 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
3218 pause_result);
3219 }
2365} 3220}
2366 3221
2367static void bnx2x_bcm8072_external_rom_boot(struct link_params *params) 3222static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3223 struct bnx2x_phy *phy,
3224 u8 port)
2368{ 3225{
2369 struct bnx2x *bp = params->bp; 3226 /* Boot port from external ROM */
2370 u8 port = params->port; 3227 /* EDC grst */
2371 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 3228 bnx2x_cl45_write(bp, phy,
2372 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 3229 MDIO_PMA_DEVAD,
3230 MDIO_PMA_REG_GEN_CTRL,
3231 0x0001);
2373 3232
2374 /* Need to wait 200ms after reset */ 3233 /* ucode reboot and rst */
2375 msleep(200); 3234 bnx2x_cl45_write(bp, phy,
2376 /* Boot port from external ROM 3235 MDIO_PMA_DEVAD,
2377 * Set ser_boot_ctl bit in the MISC_CTRL1 register 3236 MDIO_PMA_REG_GEN_CTRL,
2378 */ 3237 0x008c);
2379 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3238
2380 MDIO_PMA_DEVAD, 3239 bnx2x_cl45_write(bp, phy,
2381 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 3240 MDIO_PMA_DEVAD,
3241 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2382 3242
2383 /* Reset internal microprocessor */ 3243 /* Reset internal microprocessor */
2384 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3244 bnx2x_cl45_write(bp, phy,
2385 MDIO_PMA_DEVAD, 3245 MDIO_PMA_DEVAD,
2386 MDIO_PMA_REG_GEN_CTRL, 3246 MDIO_PMA_REG_GEN_CTRL,
2387 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3247 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2388 /* set micro reset = 0 */ 3248
2389 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3249 /* Release srst bit */
2390 MDIO_PMA_DEVAD, 3250 bnx2x_cl45_write(bp, phy,
2391 MDIO_PMA_REG_GEN_CTRL, 3251 MDIO_PMA_DEVAD,
2392 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 3252 MDIO_PMA_REG_GEN_CTRL,
2393 /* Reset internal microprocessor */ 3253 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2394 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3254
2395 MDIO_PMA_DEVAD, 3255 /* wait for 120ms for code download via SPI port */
2396 MDIO_PMA_REG_GEN_CTRL, 3256 msleep(120);
2397 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2398 /* wait for 100ms for code download via SPI port */
2399 msleep(100);
2400 3257
2401 /* Clear ser_boot_ctl bit */ 3258 /* Clear ser_boot_ctl bit */
2402 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3259 bnx2x_cl45_write(bp, phy,
2403 MDIO_PMA_DEVAD, 3260 MDIO_PMA_DEVAD,
2404 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 3261 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2405 /* Wait 100ms */ 3262 bnx2x_save_bcm_spirom_ver(bp, phy, port);
2406 msleep(100); 3263}
3264
3265static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
3266 struct bnx2x_phy *phy)
3267{
3268 u16 val;
3269 bnx2x_cl45_read(bp, phy,
3270 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
2407 3271
2408 bnx2x_save_bcm_spirom_ver(bp, port, 3272 if (val == 0) {
2409 ext_phy_type, 3273 /* Mustn't set low power mode in 8073 A0 */
2410 ext_phy_addr, 3274 return;
2411 params->shmem_base); 3275 }
3276
3277 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
3278 bnx2x_cl45_read(bp, phy,
3279 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3280 val &= ~(1<<13);
3281 bnx2x_cl45_write(bp, phy,
3282 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3283
3284 /* PLL controls */
3285 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
3286 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
3287 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
3288 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
3289 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
3290
3291 /* Tx Controls */
3292 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3293 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
3294 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
3295
3296 /* Rx Controls */
3297 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
3298 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
3299 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
3300
3301 /* Enable PLL sequencer (use read-modify-write to set bit 13) */
3302 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
3303 val |= (1<<13);
3304 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
2412} 3305}
2413 3306
2414static u8 bnx2x_8073_is_snr_needed(struct link_params *params) 3307/******************************************************************/
3308/* BCM8073 PHY SECTION */
3309/******************************************************************/
3310static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
2415{ 3311{
2416 /* This is only required for 8073A1, version 102 only */ 3312 /* This is only required for 8073A1, version 102 only */
2417
2418 struct bnx2x *bp = params->bp;
2419 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2420 u16 val; 3313 u16 val;
2421 3314
2422 /* Read 8073 HW revision*/ 3315 /* Read 8073 HW revision*/
2423 bnx2x_cl45_read(bp, params->port, 3316 bnx2x_cl45_read(bp, phy,
2424 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2425 ext_phy_addr,
2426 MDIO_PMA_DEVAD, 3317 MDIO_PMA_DEVAD,
2427 MDIO_PMA_REG_8073_CHIP_REV, &val); 3318 MDIO_PMA_REG_8073_CHIP_REV, &val);
2428 3319
@@ -2431,9 +3322,7 @@ static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
2431 return 0; 3322 return 0;
2432 } 3323 }
2433 3324
2434 bnx2x_cl45_read(bp, params->port, 3325 bnx2x_cl45_read(bp, phy,
2435 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2436 ext_phy_addr,
2437 MDIO_PMA_DEVAD, 3326 MDIO_PMA_DEVAD,
2438 MDIO_PMA_REG_ROM_VER2, &val); 3327 MDIO_PMA_REG_ROM_VER2, &val);
2439 3328
@@ -2444,15 +3333,11 @@ static u8 bnx2x_8073_is_snr_needed(struct link_params *params)
2444 return 1; 3333 return 1;
2445} 3334}
2446 3335
2447static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params) 3336static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
2448{ 3337{
2449 struct bnx2x *bp = params->bp;
2450 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2451 u16 val, cnt, cnt1 ; 3338 u16 val, cnt, cnt1 ;
2452 3339
2453 bnx2x_cl45_read(bp, params->port, 3340 bnx2x_cl45_read(bp, phy,
2454 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2455 ext_phy_addr,
2456 MDIO_PMA_DEVAD, 3341 MDIO_PMA_DEVAD,
2457 MDIO_PMA_REG_8073_CHIP_REV, &val); 3342 MDIO_PMA_REG_8073_CHIP_REV, &val);
2458 3343
@@ -2466,9 +3351,7 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2466 poll Dev1, Reg $C820: */ 3351 poll Dev1, Reg $C820: */
2467 3352
2468 for (cnt = 0; cnt < 1000; cnt++) { 3353 for (cnt = 0; cnt < 1000; cnt++) {
2469 bnx2x_cl45_read(bp, params->port, 3354 bnx2x_cl45_read(bp, phy,
2470 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2471 ext_phy_addr,
2472 MDIO_PMA_DEVAD, 3355 MDIO_PMA_DEVAD,
2473 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 3356 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
2474 &val); 3357 &val);
@@ -2485,9 +3368,7 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2485 XAUI workaround has completed), 3368 XAUI workaround has completed),
2486 then continue on with system initialization.*/ 3369 then continue on with system initialization.*/
2487 for (cnt1 = 0; cnt1 < 1000; cnt1++) { 3370 for (cnt1 = 0; cnt1 < 1000; cnt1++) {
2488 bnx2x_cl45_read(bp, params->port, 3371 bnx2x_cl45_read(bp, phy,
2489 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2490 ext_phy_addr,
2491 MDIO_PMA_DEVAD, 3372 MDIO_PMA_DEVAD,
2492 MDIO_PMA_REG_8073_XAUI_WA, &val); 3373 MDIO_PMA_REG_8073_XAUI_WA, &val);
2493 if (val & (1<<15)) { 3374 if (val & (1<<15)) {
@@ -2505,143 +3386,385 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2505 return -EINVAL; 3386 return -EINVAL;
2506} 3387}
2507 3388
2508static void bnx2x_bcm8073_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port, 3389static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
2509 u8 ext_phy_addr,
2510 u32 ext_phy_type,
2511 u32 shmem_base)
2512{ 3390{
2513 /* Boot port from external ROM */ 3391 /* Force KR or KX */
2514 /* EDC grst */ 3392 bnx2x_cl45_write(bp, phy,
2515 bnx2x_cl45_write(bp, port, 3393 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
2516 ext_phy_type, 3394 bnx2x_cl45_write(bp, phy,
2517 ext_phy_addr, 3395 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
2518 MDIO_PMA_DEVAD, 3396 bnx2x_cl45_write(bp, phy,
2519 MDIO_PMA_REG_GEN_CTRL, 3397 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
2520 0x0001); 3398 bnx2x_cl45_write(bp, phy,
3399 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
3400}
2521 3401
2522 /* ucode reboot and rst */ 3402static void bnx2x_8073_set_pause_cl37(struct link_params *params,
2523 bnx2x_cl45_write(bp, port, 3403 struct bnx2x_phy *phy,
2524 ext_phy_type, 3404 struct link_vars *vars)
2525 ext_phy_addr, 3405{
2526 MDIO_PMA_DEVAD, 3406 u16 cl37_val;
2527 MDIO_PMA_REG_GEN_CTRL, 3407 struct bnx2x *bp = params->bp;
2528 0x008c); 3408 bnx2x_cl45_read(bp, phy,
3409 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
2529 3410
2530 bnx2x_cl45_write(bp, port, 3411 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2531 ext_phy_type, 3412 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2532 ext_phy_addr, 3413 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2533 MDIO_PMA_DEVAD, 3414 if ((vars->ieee_fc &
2534 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 3415 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
3416 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
3417 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
3418 }
3419 if ((vars->ieee_fc &
3420 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3421 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3422 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3423 }
3424 if ((vars->ieee_fc &
3425 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
3426 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
3427 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3428 }
3429 DP(NETIF_MSG_LINK,
3430 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
2535 3431
2536 /* Reset internal microprocessor */ 3432 bnx2x_cl45_write(bp, phy,
2537 bnx2x_cl45_write(bp, port, 3433 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
2538 ext_phy_type, 3434 msleep(500);
2539 ext_phy_addr, 3435}
2540 MDIO_PMA_DEVAD,
2541 MDIO_PMA_REG_GEN_CTRL,
2542 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2543 3436
2544 /* Release srst bit */ 3437static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
2545 bnx2x_cl45_write(bp, port, 3438 struct link_params *params,
2546 ext_phy_type, 3439 struct link_vars *vars)
2547 ext_phy_addr, 3440{
2548 MDIO_PMA_DEVAD, 3441 struct bnx2x *bp = params->bp;
2549 MDIO_PMA_REG_GEN_CTRL, 3442 u16 val = 0, tmp1;
2550 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3443 u8 gpio_port;
3444 DP(NETIF_MSG_LINK, "Init 8073\n");
2551 3445
2552 /* wait for 100ms for code download via SPI port */ 3446 gpio_port = params->port;
2553 msleep(100); 3447 /* Restore normal power mode*/
3448 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3449 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
2554 3450
2555 /* Clear ser_boot_ctl bit */ 3451 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2556 bnx2x_cl45_write(bp, port, 3452 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
2557 ext_phy_type,
2558 ext_phy_addr,
2559 MDIO_PMA_DEVAD,
2560 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2561 3453
2562 bnx2x_save_bcm_spirom_ver(bp, port, 3454 /* enable LASI */
2563 ext_phy_type, 3455 bnx2x_cl45_write(bp, phy,
2564 ext_phy_addr, 3456 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, (1<<2));
2565 shmem_base); 3457 bnx2x_cl45_write(bp, phy,
2566} 3458 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x0004);
2567 3459
2568static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port, 3460 bnx2x_8073_set_pause_cl37(params, phy, vars);
2569 u8 ext_phy_addr,
2570 u32 shmem_base)
2571{
2572 bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
2573 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2574 shmem_base);
2575}
2576 3461
2577static void bnx2x_bcm8727_external_rom_boot(struct bnx2x *bp, u8 port, 3462 bnx2x_8073_set_xaui_low_power_mode(bp, phy);
2578 u8 ext_phy_addr,
2579 u32 shmem_base)
2580{
2581 bnx2x_bcm8073_bcm8727_external_rom_boot(bp, port, ext_phy_addr,
2582 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
2583 shmem_base);
2584 3463
2585} 3464 bnx2x_cl45_read(bp, phy,
3465 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
2586 3466
2587static void bnx2x_bcm8726_external_rom_boot(struct link_params *params) 3467 bnx2x_cl45_read(bp, phy,
2588{ 3468 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
2589 struct bnx2x *bp = params->bp;
2590 u8 port = params->port;
2591 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2592 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2593 3469
2594 /* Need to wait 100ms after reset */ 3470 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
2595 msleep(100);
2596 3471
2597 /* Micro controller re-boot */ 3472 /* Enable CL37 BAM */
2598 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3473 bnx2x_cl45_read(bp, phy,
2599 MDIO_PMA_DEVAD, 3474 MDIO_AN_DEVAD,
2600 MDIO_PMA_REG_GEN_CTRL, 3475 MDIO_AN_REG_8073_BAM, &val);
2601 0x018B); 3476 bnx2x_cl45_write(bp, phy,
3477 MDIO_AN_DEVAD,
3478 MDIO_AN_REG_8073_BAM, val | 1);
2602 3479
2603 /* Set soft reset */ 3480 if (params->loopback_mode == LOOPBACK_EXT) {
2604 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3481 bnx2x_807x_force_10G(bp, phy);
2605 MDIO_PMA_DEVAD, 3482 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
2606 MDIO_PMA_REG_GEN_CTRL, 3483 return 0;
2607 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 3484 } else {
3485 bnx2x_cl45_write(bp, phy,
3486 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
3487 }
3488 if (phy->req_line_speed != SPEED_AUTO_NEG) {
3489 if (phy->req_line_speed == SPEED_10000) {
3490 val = (1<<7);
3491 } else if (phy->req_line_speed == SPEED_2500) {
3492 val = (1<<5);
3493 /* Note that 2.5G works only
3494 when used with 1G advertisment */
3495 } else
3496 val = (1<<5);
3497 } else {
3498 val = 0;
3499 if (phy->speed_cap_mask &
3500 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
3501 val |= (1<<7);
3502
3503 /* Note that 2.5G works only when
3504 used with 1G advertisment */
3505 if (phy->speed_cap_mask &
3506 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
3507 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
3508 val |= (1<<5);
3509 DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
3510 }
2608 3511
2609 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3512 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
2610 MDIO_PMA_DEVAD, 3513 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
2611 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 3514
3515 if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
3516 (phy->req_line_speed == SPEED_AUTO_NEG)) ||
3517 (phy->req_line_speed == SPEED_2500)) {
3518 u16 phy_ver;
3519 /* Allow 2.5G for A1 and above */
3520 bnx2x_cl45_read(bp, phy,
3521 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
3522 &phy_ver);
3523 DP(NETIF_MSG_LINK, "Add 2.5G\n");
3524 if (phy_ver > 0)
3525 tmp1 |= 1;
3526 else
3527 tmp1 &= 0xfffe;
3528 } else {
3529 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
3530 tmp1 &= 0xfffe;
3531 }
2612 3532
2613 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3533 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
2614 MDIO_PMA_DEVAD, 3534 /* Add support for CL37 (passive mode) II */
2615 MDIO_PMA_REG_GEN_CTRL,
2616 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
2617 3535
2618 /* wait for 150ms for microcode load */ 3536 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
2619 msleep(150); 3537 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
3538 (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
3539 0x20 : 0x40)));
2620 3540
2621 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ 3541 /* Add support for CL37 (passive mode) III */
2622 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 3542 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
2623 MDIO_PMA_DEVAD,
2624 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2625 3543
2626 msleep(200); 3544 /* The SNR will improve about 2db by changing
2627 bnx2x_save_bcm_spirom_ver(bp, port, 3545 BW and FEE main tap. Rest commands are executed
2628 ext_phy_type, 3546 after link is up*/
2629 ext_phy_addr, 3547 if (bnx2x_8073_is_snr_needed(bp, phy))
2630 params->shmem_base); 3548 bnx2x_cl45_write(bp, phy,
3549 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
3550 0xFB0C);
3551
3552 /* Enable FEC (Forware Error Correction) Request in the AN */
3553 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
3554 tmp1 |= (1<<15);
3555 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
3556
3557 bnx2x_ext_phy_set_pause(params, phy, vars);
3558
3559 /* Restart autoneg */
3560 msleep(500);
3561 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
3562 DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
3563 ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
3564 return 0;
3565}
3566
3567static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
3568 struct link_params *params,
3569 struct link_vars *vars)
3570{
3571 struct bnx2x *bp = params->bp;
3572 u8 link_up = 0;
3573 u16 val1, val2;
3574 u16 link_status = 0;
3575 u16 an1000_status = 0;
3576
3577 bnx2x_cl45_read(bp, phy,
3578 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
3579
3580 DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
3581
3582 /* clear the interrupt LASI status register */
3583 bnx2x_cl45_read(bp, phy,
3584 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
3585 bnx2x_cl45_read(bp, phy,
3586 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
3587 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
3588 /* Clear MSG-OUT */
3589 bnx2x_cl45_read(bp, phy,
3590 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
3591
3592 /* Check the LASI */
3593 bnx2x_cl45_read(bp, phy,
3594 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
3595
3596 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
3597
3598 /* Check the link status */
3599 bnx2x_cl45_read(bp, phy,
3600 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
3601 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
3602
3603 bnx2x_cl45_read(bp, phy,
3604 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
3605 bnx2x_cl45_read(bp, phy,
3606 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
3607 link_up = ((val1 & 4) == 4);
3608 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
3609
3610 if (link_up &&
3611 ((phy->req_line_speed != SPEED_10000))) {
3612 if (bnx2x_8073_xaui_wa(bp, phy) != 0)
3613 return 0;
3614 }
3615 bnx2x_cl45_read(bp, phy,
3616 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
3617 bnx2x_cl45_read(bp, phy,
3618 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
3619
3620 /* Check the link status on 1.1.2 */
3621 bnx2x_cl45_read(bp, phy,
3622 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
3623 bnx2x_cl45_read(bp, phy,
3624 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
3625 DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
3626 "an_link_status=0x%x\n", val2, val1, an1000_status);
3627
3628 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
3629 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
3630 /* The SNR will improve about 2dbby
3631 changing the BW and FEE main tap.*/
3632 /* The 1st write to change FFE main
3633 tap is set before restart AN */
3634 /* Change PLL Bandwidth in EDC
3635 register */
3636 bnx2x_cl45_write(bp, phy,
3637 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
3638 0x26BC);
3639
3640 /* Change CDR Bandwidth in EDC register */
3641 bnx2x_cl45_write(bp, phy,
3642 MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
3643 0x0333);
3644 }
3645 bnx2x_cl45_read(bp, phy,
3646 MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
3647 &link_status);
3648
3649 /* Bits 0..2 --> speed detected, bits 13..15--> link is down */
3650 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
3651 link_up = 1;
3652 vars->line_speed = SPEED_10000;
3653 DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
3654 params->port);
3655 } else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
3656 link_up = 1;
3657 vars->line_speed = SPEED_2500;
3658 DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
3659 params->port);
3660 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
3661 link_up = 1;
3662 vars->line_speed = SPEED_1000;
3663 DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
3664 params->port);
3665 } else {
3666 link_up = 0;
3667 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
3668 params->port);
3669 }
3670
3671 if (link_up) {
3672 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
3673 bnx2x_8073_resolve_fc(phy, params, vars);
3674 }
3675 return link_up;
3676}
3677
3678static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
3679 struct link_params *params)
3680{
3681 struct bnx2x *bp = params->bp;
3682 u8 gpio_port;
3683 gpio_port = params->port;
3684 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
3685 gpio_port);
3686 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3687 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3688 gpio_port);
3689}
3690
3691/******************************************************************/
3692/* BCM8705 PHY SECTION */
3693/******************************************************************/
3694static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
3695 struct link_params *params,
3696 struct link_vars *vars)
3697{
3698 struct bnx2x *bp = params->bp;
3699 DP(NETIF_MSG_LINK, "init 8705\n");
3700 /* Restore normal power mode*/
3701 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3702 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
3703 /* HW reset */
3704 bnx2x_ext_phy_hw_reset(bp, params->port);
3705 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
3706 bnx2x_wait_reset_complete(bp, phy);
3707
3708 bnx2x_cl45_write(bp, phy,
3709 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
3710 bnx2x_cl45_write(bp, phy,
3711 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
3712 bnx2x_cl45_write(bp, phy,
3713 MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
3714 bnx2x_cl45_write(bp, phy,
3715 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
3716 /* BCM8705 doesn't have microcode, hence the 0 */
3717 bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
3718 return 0;
3719}
3720
3721static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
3722 struct link_params *params,
3723 struct link_vars *vars)
3724{
3725 u8 link_up = 0;
3726 u16 val1, rx_sd;
3727 struct bnx2x *bp = params->bp;
3728 DP(NETIF_MSG_LINK, "read status 8705\n");
3729 bnx2x_cl45_read(bp, phy,
3730 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
3731 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
3732
3733 bnx2x_cl45_read(bp, phy,
3734 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
3735 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
3736
3737 bnx2x_cl45_read(bp, phy,
3738 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
3739
3740 bnx2x_cl45_read(bp, phy,
3741 MDIO_PMA_DEVAD, 0xc809, &val1);
3742 bnx2x_cl45_read(bp, phy,
3743 MDIO_PMA_DEVAD, 0xc809, &val1);
3744
3745 DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
3746 link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
3747 if (link_up) {
3748 vars->line_speed = SPEED_10000;
3749 bnx2x_ext_phy_resolve_fc(phy, params, vars);
3750 }
3751 return link_up;
2631} 3752}
2632 3753
2633static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, u8 port, 3754/******************************************************************/
2634 u32 ext_phy_type, u8 ext_phy_addr, 3755/* SFP+ module Section */
2635 u8 tx_en) 3756/******************************************************************/
3757static void bnx2x_sfp_set_transmitter(struct bnx2x *bp,
3758 struct bnx2x_phy *phy,
3759 u8 port,
3760 u8 tx_en)
2636{ 3761{
2637 u16 val; 3762 u16 val;
2638 3763
2639 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n", 3764 DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
2640 tx_en, port); 3765 tx_en, port);
2641 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ 3766 /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
2642 bnx2x_cl45_read(bp, port, 3767 bnx2x_cl45_read(bp, phy,
2643 ext_phy_type,
2644 ext_phy_addr,
2645 MDIO_PMA_DEVAD, 3768 MDIO_PMA_DEVAD,
2646 MDIO_PMA_REG_PHY_IDENTIFIER, 3769 MDIO_PMA_REG_PHY_IDENTIFIER,
2647 &val); 3770 &val);
@@ -2651,58 +3774,42 @@ static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, u8 port,
2651 else 3774 else
2652 val |= (1<<15); 3775 val |= (1<<15);
2653 3776
2654 bnx2x_cl45_write(bp, port, 3777 bnx2x_cl45_write(bp, phy,
2655 ext_phy_type,
2656 ext_phy_addr,
2657 MDIO_PMA_DEVAD, 3778 MDIO_PMA_DEVAD,
2658 MDIO_PMA_REG_PHY_IDENTIFIER, 3779 MDIO_PMA_REG_PHY_IDENTIFIER,
2659 val); 3780 val);
2660} 3781}
2661 3782
2662static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params, 3783static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3784 struct link_params *params,
2663 u16 addr, u8 byte_cnt, u8 *o_buf) 3785 u16 addr, u8 byte_cnt, u8 *o_buf)
2664{ 3786{
2665 struct bnx2x *bp = params->bp; 3787 struct bnx2x *bp = params->bp;
2666 u16 val = 0; 3788 u16 val = 0;
2667 u16 i; 3789 u16 i;
2668 u8 port = params->port;
2669 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2670 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2671
2672 if (byte_cnt > 16) { 3790 if (byte_cnt > 16) {
2673 DP(NETIF_MSG_LINK, "Reading from eeprom is" 3791 DP(NETIF_MSG_LINK, "Reading from eeprom is"
2674 " is limited to 0xf\n"); 3792 " is limited to 0xf\n");
2675 return -EINVAL; 3793 return -EINVAL;
2676 } 3794 }
2677 /* Set the read command byte count */ 3795 /* Set the read command byte count */
2678 bnx2x_cl45_write(bp, port, 3796 bnx2x_cl45_write(bp, phy,
2679 ext_phy_type, 3797 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2680 ext_phy_addr,
2681 MDIO_PMA_DEVAD,
2682 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2683 (byte_cnt | 0xa000)); 3798 (byte_cnt | 0xa000));
2684 3799
2685 /* Set the read command address */ 3800 /* Set the read command address */
2686 bnx2x_cl45_write(bp, port, 3801 bnx2x_cl45_write(bp, phy,
2687 ext_phy_type, 3802 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2688 ext_phy_addr,
2689 MDIO_PMA_DEVAD,
2690 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2691 addr); 3803 addr);
2692 3804
2693 /* Activate read command */ 3805 /* Activate read command */
2694 bnx2x_cl45_write(bp, port, 3806 bnx2x_cl45_write(bp, phy,
2695 ext_phy_type, 3807 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2696 ext_phy_addr,
2697 MDIO_PMA_DEVAD,
2698 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2699 0x2c0f); 3808 0x2c0f);
2700 3809
2701 /* Wait up to 500us for command complete status */ 3810 /* Wait up to 500us for command complete status */
2702 for (i = 0; i < 100; i++) { 3811 for (i = 0; i < 100; i++) {
2703 bnx2x_cl45_read(bp, port, 3812 bnx2x_cl45_read(bp, phy,
2704 ext_phy_type,
2705 ext_phy_addr,
2706 MDIO_PMA_DEVAD, 3813 MDIO_PMA_DEVAD,
2707 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3814 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2708 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3815 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2721,18 +3828,14 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
2721 3828
2722 /* Read the buffer */ 3829 /* Read the buffer */
2723 for (i = 0; i < byte_cnt; i++) { 3830 for (i = 0; i < byte_cnt; i++) {
2724 bnx2x_cl45_read(bp, port, 3831 bnx2x_cl45_read(bp, phy,
2725 ext_phy_type,
2726 ext_phy_addr,
2727 MDIO_PMA_DEVAD, 3832 MDIO_PMA_DEVAD,
2728 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); 3833 MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
2729 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); 3834 o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
2730 } 3835 }
2731 3836
2732 for (i = 0; i < 100; i++) { 3837 for (i = 0; i < 100; i++) {
2733 bnx2x_cl45_read(bp, port, 3838 bnx2x_cl45_read(bp, phy,
2734 ext_phy_type,
2735 ext_phy_addr,
2736 MDIO_PMA_DEVAD, 3839 MDIO_PMA_DEVAD,
2737 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3840 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2738 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3841 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2743,14 +3846,12 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct link_params *params,
2743 return -EINVAL; 3846 return -EINVAL;
2744} 3847}
2745 3848
2746static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params, 3849static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3850 struct link_params *params,
2747 u16 addr, u8 byte_cnt, u8 *o_buf) 3851 u16 addr, u8 byte_cnt, u8 *o_buf)
2748{ 3852{
2749 struct bnx2x *bp = params->bp; 3853 struct bnx2x *bp = params->bp;
2750 u16 val, i; 3854 u16 val, i;
2751 u8 port = params->port;
2752 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
2753 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2754 3855
2755 if (byte_cnt > 16) { 3856 if (byte_cnt > 16) {
2756 DP(NETIF_MSG_LINK, "Reading from eeprom is" 3857 DP(NETIF_MSG_LINK, "Reading from eeprom is"
@@ -2759,40 +3860,30 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2759 } 3860 }
2760 3861
2761 /* Need to read from 1.8000 to clear it */ 3862 /* Need to read from 1.8000 to clear it */
2762 bnx2x_cl45_read(bp, port, 3863 bnx2x_cl45_read(bp, phy,
2763 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
2764 ext_phy_addr,
2765 MDIO_PMA_DEVAD, 3864 MDIO_PMA_DEVAD,
2766 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 3865 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2767 &val); 3866 &val);
2768 3867
2769 /* Set the read command byte count */ 3868 /* Set the read command byte count */
2770 bnx2x_cl45_write(bp, port, 3869 bnx2x_cl45_write(bp, phy,
2771 ext_phy_type,
2772 ext_phy_addr,
2773 MDIO_PMA_DEVAD, 3870 MDIO_PMA_DEVAD,
2774 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 3871 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
2775 ((byte_cnt < 2) ? 2 : byte_cnt)); 3872 ((byte_cnt < 2) ? 2 : byte_cnt));
2776 3873
2777 /* Set the read command address */ 3874 /* Set the read command address */
2778 bnx2x_cl45_write(bp, port, 3875 bnx2x_cl45_write(bp, phy,
2779 ext_phy_type,
2780 ext_phy_addr,
2781 MDIO_PMA_DEVAD, 3876 MDIO_PMA_DEVAD,
2782 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, 3877 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
2783 addr); 3878 addr);
2784 /* Set the destination address */ 3879 /* Set the destination address */
2785 bnx2x_cl45_write(bp, port, 3880 bnx2x_cl45_write(bp, phy,
2786 ext_phy_type,
2787 ext_phy_addr,
2788 MDIO_PMA_DEVAD, 3881 MDIO_PMA_DEVAD,
2789 0x8004, 3882 0x8004,
2790 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); 3883 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
2791 3884
2792 /* Activate read command */ 3885 /* Activate read command */
2793 bnx2x_cl45_write(bp, port, 3886 bnx2x_cl45_write(bp, phy,
2794 ext_phy_type,
2795 ext_phy_addr,
2796 MDIO_PMA_DEVAD, 3887 MDIO_PMA_DEVAD,
2797 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 3888 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
2798 0x8002); 3889 0x8002);
@@ -2802,9 +3893,7 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2802 3893
2803 /* Wait up to 500us for command complete status */ 3894 /* Wait up to 500us for command complete status */
2804 for (i = 0; i < 100; i++) { 3895 for (i = 0; i < 100; i++) {
2805 bnx2x_cl45_read(bp, port, 3896 bnx2x_cl45_read(bp, phy,
2806 ext_phy_type,
2807 ext_phy_addr,
2808 MDIO_PMA_DEVAD, 3897 MDIO_PMA_DEVAD,
2809 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3898 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2810 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3899 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2823,18 +3912,14 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2823 3912
2824 /* Read the buffer */ 3913 /* Read the buffer */
2825 for (i = 0; i < byte_cnt; i++) { 3914 for (i = 0; i < byte_cnt; i++) {
2826 bnx2x_cl45_read(bp, port, 3915 bnx2x_cl45_read(bp, phy,
2827 ext_phy_type,
2828 ext_phy_addr,
2829 MDIO_PMA_DEVAD, 3916 MDIO_PMA_DEVAD,
2830 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); 3917 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
2831 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); 3918 o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
2832 } 3919 }
2833 3920
2834 for (i = 0; i < 100; i++) { 3921 for (i = 0; i < 100; i++) {
2835 bnx2x_cl45_read(bp, port, 3922 bnx2x_cl45_read(bp, phy,
2836 ext_phy_type,
2837 ext_phy_addr,
2838 MDIO_PMA_DEVAD, 3923 MDIO_PMA_DEVAD,
2839 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 3924 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
2840 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 3925 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
@@ -2846,21 +3931,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct link_params *params,
2846 return -EINVAL; 3931 return -EINVAL;
2847} 3932}
2848 3933
2849u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr, 3934u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3935 struct link_params *params, u16 addr,
2850 u8 byte_cnt, u8 *o_buf) 3936 u8 byte_cnt, u8 *o_buf)
2851{ 3937{
2852 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 3938 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
2853 3939 return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
2854 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
2855 return bnx2x_8726_read_sfp_module_eeprom(params, addr,
2856 byte_cnt, o_buf); 3940 byte_cnt, o_buf);
2857 else if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 3941 else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
2858 return bnx2x_8727_read_sfp_module_eeprom(params, addr, 3942 return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
2859 byte_cnt, o_buf); 3943 byte_cnt, o_buf);
2860 return -EINVAL; 3944 return -EINVAL;
2861} 3945}
2862 3946
2863static u8 bnx2x_get_edc_mode(struct link_params *params, 3947static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
3948 struct link_params *params,
2864 u16 *edc_mode) 3949 u16 *edc_mode)
2865{ 3950{
2866 struct bnx2x *bp = params->bp; 3951 struct bnx2x *bp = params->bp;
@@ -2868,10 +3953,11 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
2868 *edc_mode = EDC_MODE_LIMITING; 3953 *edc_mode = EDC_MODE_LIMITING;
2869 3954
2870 /* First check for copper cable */ 3955 /* First check for copper cable */
2871 if (bnx2x_read_sfp_module_eeprom(params, 3956 if (bnx2x_read_sfp_module_eeprom(phy,
2872 SFP_EEPROM_CON_TYPE_ADDR, 3957 params,
2873 1, 3958 SFP_EEPROM_CON_TYPE_ADDR,
2874 &val) != 0) { 3959 1,
3960 &val) != 0) {
2875 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); 3961 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
2876 return -EINVAL; 3962 return -EINVAL;
2877 } 3963 }
@@ -2883,7 +3969,8 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
2883 3969
2884 /* Check if its active cable( includes SFP+ module) 3970 /* Check if its active cable( includes SFP+ module)
2885 of passive cable*/ 3971 of passive cable*/
2886 if (bnx2x_read_sfp_module_eeprom(params, 3972 if (bnx2x_read_sfp_module_eeprom(phy,
3973 params,
2887 SFP_EEPROM_FC_TX_TECH_ADDR, 3974 SFP_EEPROM_FC_TX_TECH_ADDR,
2888 1, 3975 1,
2889 &copper_module_type) != 3976 &copper_module_type) !=
@@ -2923,10 +4010,11 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
2923 4010
2924 if (check_limiting_mode) { 4011 if (check_limiting_mode) {
2925 u8 options[SFP_EEPROM_OPTIONS_SIZE]; 4012 u8 options[SFP_EEPROM_OPTIONS_SIZE];
2926 if (bnx2x_read_sfp_module_eeprom(params, 4013 if (bnx2x_read_sfp_module_eeprom(phy,
2927 SFP_EEPROM_OPTIONS_ADDR, 4014 params,
2928 SFP_EEPROM_OPTIONS_SIZE, 4015 SFP_EEPROM_OPTIONS_ADDR,
2929 options) != 0) { 4016 SFP_EEPROM_OPTIONS_SIZE,
4017 options) != 0) {
2930 DP(NETIF_MSG_LINK, "Failed to read Option" 4018 DP(NETIF_MSG_LINK, "Failed to read Option"
2931 " field from module EEPROM\n"); 4019 " field from module EEPROM\n");
2932 return -EINVAL; 4020 return -EINVAL;
@@ -2939,17 +4027,17 @@ static u8 bnx2x_get_edc_mode(struct link_params *params,
2939 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); 4027 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
2940 return 0; 4028 return 0;
2941} 4029}
2942
2943/* This function read the relevant field from the module ( SFP+ ), 4030/* This function read the relevant field from the module ( SFP+ ),
2944 and verify it is compliant with this board */ 4031 and verify it is compliant with this board */
2945static u8 bnx2x_verify_sfp_module(struct link_params *params) 4032static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4033 struct link_params *params)
2946{ 4034{
2947 struct bnx2x *bp = params->bp; 4035 struct bnx2x *bp = params->bp;
2948 u32 val; 4036 u32 val, cmd;
2949 u32 fw_resp; 4037 u32 fw_resp, fw_cmd_param;
2950 char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1]; 4038 char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
2951 char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1]; 4039 char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
2952 4040 phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
2953 val = REG_RD(bp, params->shmem_base + 4041 val = REG_RD(bp, params->shmem_base +
2954 offsetof(struct shmem_region, dev_info. 4042 offsetof(struct shmem_region, dev_info.
2955 port_feature_config[params->port].config)); 4043 port_feature_config[params->port].config));
@@ -2959,29 +4047,43 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
2959 return 0; 4047 return 0;
2960 } 4048 }
2961 4049
2962 /* Ask the FW to validate the module */ 4050 if (params->feature_config_flags &
2963 if (!(params->feature_config_flags & 4051 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
2964 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY)) { 4052 /* Use specific phy request */
4053 cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
4054 } else if (params->feature_config_flags &
4055 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
4056 /* Use first phy request only in case of non-dual media*/
4057 if (DUAL_MEDIA(params)) {
4058 DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
4059 "verification\n");
4060 return -EINVAL;
4061 }
4062 cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
4063 } else {
4064 /* No support in OPT MDL detection */
2965 DP(NETIF_MSG_LINK, "FW does not support OPT MDL " 4065 DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
2966 "verification\n"); 4066 "verification\n");
2967 return -EINVAL; 4067 return -EINVAL;
2968 } 4068 }
2969 4069 fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
2970 fw_resp = bnx2x_fw_command(bp, DRV_MSG_CODE_VRFY_OPT_MDL); 4070 fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
2971 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { 4071 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
2972 DP(NETIF_MSG_LINK, "Approved module\n"); 4072 DP(NETIF_MSG_LINK, "Approved module\n");
2973 return 0; 4073 return 0;
2974 } 4074 }
2975 4075
2976 /* format the warning message */ 4076 /* format the warning message */
2977 if (bnx2x_read_sfp_module_eeprom(params, 4077 if (bnx2x_read_sfp_module_eeprom(phy,
4078 params,
2978 SFP_EEPROM_VENDOR_NAME_ADDR, 4079 SFP_EEPROM_VENDOR_NAME_ADDR,
2979 SFP_EEPROM_VENDOR_NAME_SIZE, 4080 SFP_EEPROM_VENDOR_NAME_SIZE,
2980 (u8 *)vendor_name)) 4081 (u8 *)vendor_name))
2981 vendor_name[0] = '\0'; 4082 vendor_name[0] = '\0';
2982 else 4083 else
2983 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; 4084 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
2984 if (bnx2x_read_sfp_module_eeprom(params, 4085 if (bnx2x_read_sfp_module_eeprom(phy,
4086 params,
2985 SFP_EEPROM_PART_NO_ADDR, 4087 SFP_EEPROM_PART_NO_ADDR,
2986 SFP_EEPROM_PART_NO_SIZE, 4088 SFP_EEPROM_PART_NO_SIZE,
2987 (u8 *)vendor_pn)) 4089 (u8 *)vendor_pn))
@@ -2989,22 +4091,78 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
2989 else 4091 else
2990 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; 4092 vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
2991 4093
2992 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected, Port %d from %s part number %s\n", 4094 netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected,"
4095 " Port %d from %s part number %s\n",
2993 params->port, vendor_name, vendor_pn); 4096 params->port, vendor_name, vendor_pn);
4097 phy->flags |= FLAGS_SFP_NOT_APPROVED;
2994 return -EINVAL; 4098 return -EINVAL;
2995} 4099}
2996 4100
2997static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params, 4101static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
2998 u16 edc_mode) 4102 struct link_params *params)
4103
2999{ 4104{
4105 u8 val;
3000 struct bnx2x *bp = params->bp; 4106 struct bnx2x *bp = params->bp;
3001 u8 port = params->port; 4107 u16 timeout;
3002 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4108 /* Initialization time after hot-plug may take up to 300ms for some
4109 phys type ( e.g. JDSU ) */
4110 for (timeout = 0; timeout < 60; timeout++) {
4111 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
4112 == 0) {
4113 DP(NETIF_MSG_LINK, "SFP+ module initialization "
4114 "took %d ms\n", timeout * 5);
4115 return 0;
4116 }
4117 msleep(5);
4118 }
4119 return -EINVAL;
4120}
4121
4122static void bnx2x_8727_power_module(struct bnx2x *bp,
4123 struct bnx2x_phy *phy,
4124 u8 is_power_up) {
4125 /* Make sure GPIOs are not using for LED mode */
4126 u16 val;
4127 /*
4128 * In the GPIO register, bit 4 is use to detemine if the GPIOs are
4129 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
4130 * output
4131 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
4132 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
4133 * where the 1st bit is the over-current(only input), and 2nd bit is
4134 * for power( only output )
4135 */
4136
4137 /*
4138 * In case of NOC feature is disabled and power is up, set GPIO control
4139 * as input to enable listening of over-current indication
4140 */
4141 if (phy->flags & FLAGS_NOC)
4142 return;
4143 if (!(phy->flags &
4144 FLAGS_NOC) && is_power_up)
4145 val = (1<<4);
4146 else
4147 /*
4148 * Set GPIO control to OUTPUT, and set the power bit
4149 * to according to the is_power_up
4150 */
4151 val = ((!(is_power_up)) << 1);
4152
4153 bnx2x_cl45_write(bp, phy,
4154 MDIO_PMA_DEVAD,
4155 MDIO_PMA_REG_8727_GPIO_CTRL,
4156 val);
4157}
4158
4159static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
4160 struct bnx2x_phy *phy,
4161 u16 edc_mode)
4162{
3003 u16 cur_limiting_mode; 4163 u16 cur_limiting_mode;
3004 4164
3005 bnx2x_cl45_read(bp, port, 4165 bnx2x_cl45_read(bp, phy,
3006 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3007 ext_phy_addr,
3008 MDIO_PMA_DEVAD, 4166 MDIO_PMA_DEVAD,
3009 MDIO_PMA_REG_ROM_VER2, 4167 MDIO_PMA_REG_ROM_VER2,
3010 &cur_limiting_mode); 4168 &cur_limiting_mode);
@@ -3014,12 +4172,10 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
3014 if (edc_mode == EDC_MODE_LIMITING) { 4172 if (edc_mode == EDC_MODE_LIMITING) {
3015 DP(NETIF_MSG_LINK, 4173 DP(NETIF_MSG_LINK,
3016 "Setting LIMITING MODE\n"); 4174 "Setting LIMITING MODE\n");
3017 bnx2x_cl45_write(bp, port, 4175 bnx2x_cl45_write(bp, phy,
3018 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, 4176 MDIO_PMA_DEVAD,
3019 ext_phy_addr, 4177 MDIO_PMA_REG_ROM_VER2,
3020 MDIO_PMA_DEVAD, 4178 EDC_MODE_LIMITING);
3021 MDIO_PMA_REG_ROM_VER2,
3022 EDC_MODE_LIMITING);
3023 } else { /* LRM mode ( default )*/ 4179 } else { /* LRM mode ( default )*/
3024 4180
3025 DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); 4181 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
@@ -3030,27 +4186,19 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
3030 if (cur_limiting_mode != EDC_MODE_LIMITING) 4186 if (cur_limiting_mode != EDC_MODE_LIMITING)
3031 return 0; 4187 return 0;
3032 4188
3033 bnx2x_cl45_write(bp, port, 4189 bnx2x_cl45_write(bp, phy,
3034 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3035 ext_phy_addr,
3036 MDIO_PMA_DEVAD, 4190 MDIO_PMA_DEVAD,
3037 MDIO_PMA_REG_LRM_MODE, 4191 MDIO_PMA_REG_LRM_MODE,
3038 0); 4192 0);
3039 bnx2x_cl45_write(bp, port, 4193 bnx2x_cl45_write(bp, phy,
3040 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3041 ext_phy_addr,
3042 MDIO_PMA_DEVAD, 4194 MDIO_PMA_DEVAD,
3043 MDIO_PMA_REG_ROM_VER2, 4195 MDIO_PMA_REG_ROM_VER2,
3044 0x128); 4196 0x128);
3045 bnx2x_cl45_write(bp, port, 4197 bnx2x_cl45_write(bp, phy,
3046 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3047 ext_phy_addr,
3048 MDIO_PMA_DEVAD, 4198 MDIO_PMA_DEVAD,
3049 MDIO_PMA_REG_MISC_CTRL0, 4199 MDIO_PMA_REG_MISC_CTRL0,
3050 0x4008); 4200 0x4008);
3051 bnx2x_cl45_write(bp, port, 4201 bnx2x_cl45_write(bp, phy,
3052 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
3053 ext_phy_addr,
3054 MDIO_PMA_DEVAD, 4202 MDIO_PMA_DEVAD,
3055 MDIO_PMA_REG_LRM_MODE, 4203 MDIO_PMA_REG_LRM_MODE,
3056 0xaaaa); 4204 0xaaaa);
@@ -3058,46 +4206,33 @@ static u8 bnx2x_bcm8726_set_limiting_mode(struct link_params *params,
3058 return 0; 4206 return 0;
3059} 4207}
3060 4208
3061static u8 bnx2x_bcm8727_set_limiting_mode(struct link_params *params, 4209static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
4210 struct bnx2x_phy *phy,
3062 u16 edc_mode) 4211 u16 edc_mode)
3063{ 4212{
3064 struct bnx2x *bp = params->bp;
3065 u8 port = params->port;
3066 u16 phy_identifier; 4213 u16 phy_identifier;
3067 u16 rom_ver2_val; 4214 u16 rom_ver2_val;
3068 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4215 bnx2x_cl45_read(bp, phy,
3069
3070 bnx2x_cl45_read(bp, port,
3071 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3072 ext_phy_addr,
3073 MDIO_PMA_DEVAD, 4216 MDIO_PMA_DEVAD,
3074 MDIO_PMA_REG_PHY_IDENTIFIER, 4217 MDIO_PMA_REG_PHY_IDENTIFIER,
3075 &phy_identifier); 4218 &phy_identifier);
3076 4219
3077 bnx2x_cl45_write(bp, port, 4220 bnx2x_cl45_write(bp, phy,
3078 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3079 ext_phy_addr,
3080 MDIO_PMA_DEVAD, 4221 MDIO_PMA_DEVAD,
3081 MDIO_PMA_REG_PHY_IDENTIFIER, 4222 MDIO_PMA_REG_PHY_IDENTIFIER,
3082 (phy_identifier & ~(1<<9))); 4223 (phy_identifier & ~(1<<9)));
3083 4224
3084 bnx2x_cl45_read(bp, port, 4225 bnx2x_cl45_read(bp, phy,
3085 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3086 ext_phy_addr,
3087 MDIO_PMA_DEVAD, 4226 MDIO_PMA_DEVAD,
3088 MDIO_PMA_REG_ROM_VER2, 4227 MDIO_PMA_REG_ROM_VER2,
3089 &rom_ver2_val); 4228 &rom_ver2_val);
3090 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ 4229 /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
3091 bnx2x_cl45_write(bp, port, 4230 bnx2x_cl45_write(bp, phy,
3092 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3093 ext_phy_addr,
3094 MDIO_PMA_DEVAD, 4231 MDIO_PMA_DEVAD,
3095 MDIO_PMA_REG_ROM_VER2, 4232 MDIO_PMA_REG_ROM_VER2,
3096 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); 4233 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
3097 4234
3098 bnx2x_cl45_write(bp, port, 4235 bnx2x_cl45_write(bp, phy,
3099 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3100 ext_phy_addr,
3101 MDIO_PMA_DEVAD, 4236 MDIO_PMA_DEVAD,
3102 MDIO_PMA_REG_PHY_IDENTIFIER, 4237 MDIO_PMA_REG_PHY_IDENTIFIER,
3103 (phy_identifier | (1<<9))); 4238 (phy_identifier | (1<<9)));
@@ -3105,72 +4240,34 @@ static u8 bnx2x_bcm8727_set_limiting_mode(struct link_params *params,
3105 return 0; 4240 return 0;
3106} 4241}
3107 4242
3108 4243static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
3109static u8 bnx2x_wait_for_sfp_module_initialized(struct link_params *params) 4244 struct link_params *params,
4245 u32 action)
3110{ 4246{
3111 u8 val;
3112 struct bnx2x *bp = params->bp; 4247 struct bnx2x *bp = params->bp;
3113 u16 timeout;
3114 /* Initialization time after hot-plug may take up to 300ms for some
3115 phys type ( e.g. JDSU ) */
3116 for (timeout = 0; timeout < 60; timeout++) {
3117 if (bnx2x_read_sfp_module_eeprom(params, 1, 1, &val)
3118 == 0) {
3119 DP(NETIF_MSG_LINK, "SFP+ module initialization "
3120 "took %d ms\n", timeout * 5);
3121 return 0;
3122 }
3123 msleep(5);
3124 }
3125 return -EINVAL;
3126}
3127 4248
3128static void bnx2x_8727_power_module(struct bnx2x *bp, 4249 switch (action) {
3129 struct link_params *params, 4250 case DISABLE_TX:
3130 u8 ext_phy_addr, u8 is_power_up) { 4251 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
3131 /* Make sure GPIOs are not using for LED mode */ 4252 break;
3132 u16 val; 4253 case ENABLE_TX:
3133 u8 port = params->port; 4254 if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
3134 /* 4255 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
3135 * In the GPIO register, bit 4 is use to detemine if the GPIOs are 4256 break;
3136 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for 4257 default:
3137 * output 4258 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
3138 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0 4259 action);
3139 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1 4260 return;
3140 * where the 1st bit is the over-current(only input), and 2nd bit is 4261 }
3141 * for power( only output )
3142 */
3143
3144 /*
3145 * In case of NOC feature is disabled and power is up, set GPIO control
3146 * as input to enable listening of over-current indication
3147 */
3148
3149 if (!(params->feature_config_flags &
3150 FEATURE_CONFIG_BCM8727_NOC) && is_power_up)
3151 val = (1<<4);
3152 else
3153 /*
3154 * Set GPIO control to OUTPUT, and set the power bit
3155 * to according to the is_power_up
3156 */
3157 val = ((!(is_power_up)) << 1);
3158
3159 bnx2x_cl45_write(bp, port,
3160 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
3161 ext_phy_addr,
3162 MDIO_PMA_DEVAD,
3163 MDIO_PMA_REG_8727_GPIO_CTRL,
3164 val);
3165} 4262}
3166 4263
3167static u8 bnx2x_sfp_module_detection(struct link_params *params) 4264static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
4265 struct link_params *params)
3168{ 4266{
3169 struct bnx2x *bp = params->bp; 4267 struct bnx2x *bp = params->bp;
3170 u16 edc_mode; 4268 u16 edc_mode;
3171 u8 rc = 0; 4269 u8 rc = 0;
3172 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4270
3173 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3174 u32 val = REG_RD(bp, params->shmem_base + 4271 u32 val = REG_RD(bp, params->shmem_base +
3175 offsetof(struct shmem_region, dev_info. 4272 offsetof(struct shmem_region, dev_info.
3176 port_feature_config[params->port].config)); 4273 port_feature_config[params->port].config));
@@ -3178,10 +4275,10 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3178 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", 4275 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
3179 params->port); 4276 params->port);
3180 4277
3181 if (bnx2x_get_edc_mode(params, &edc_mode) != 0) { 4278 if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
3182 DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); 4279 DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
3183 return -EINVAL; 4280 return -EINVAL;
3184 } else if (bnx2x_verify_sfp_module(params) != 4281 } else if (bnx2x_verify_sfp_module(phy, params) !=
3185 0) { 4282 0) {
3186 /* check SFP+ module compatibility */ 4283 /* check SFP+ module compatibility */
3187 DP(NETIF_MSG_LINK, "Module verification failed!!\n"); 4284 DP(NETIF_MSG_LINK, "Module verification failed!!\n");
@@ -3190,13 +4287,12 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3190 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 4287 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
3191 MISC_REGISTERS_GPIO_HIGH, 4288 MISC_REGISTERS_GPIO_HIGH,
3192 params->port); 4289 params->port);
3193 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) && 4290 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
3194 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 4291 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
3195 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) { 4292 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
3196 /* Shutdown SFP+ module */ 4293 /* Shutdown SFP+ module */
3197 DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n"); 4294 DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
3198 bnx2x_8727_power_module(bp, params, 4295 bnx2x_8727_power_module(bp, phy, 0);
3199 ext_phy_addr, 0);
3200 return rc; 4296 return rc;
3201 } 4297 }
3202 } else { 4298 } else {
@@ -3208,15 +4304,15 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3208 } 4304 }
3209 4305
3210 /* power up the SFP module */ 4306 /* power up the SFP module */
3211 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) 4307 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
3212 bnx2x_8727_power_module(bp, params, ext_phy_addr, 1); 4308 bnx2x_8727_power_module(bp, phy, 1);
3213 4309
3214 /* Check and set limiting mode / LRM mode on 8726. 4310 /* Check and set limiting mode / LRM mode on 8726.
3215 On 8727 it is done automatically */ 4311 On 8727 it is done automatically */
3216 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) 4312 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
3217 bnx2x_bcm8726_set_limiting_mode(params, edc_mode); 4313 bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
3218 else 4314 else
3219 bnx2x_bcm8727_set_limiting_mode(params, edc_mode); 4315 bnx2x_8727_set_limiting_mode(bp, phy, edc_mode);
3220 /* 4316 /*
3221 * Enable transmit for this module if the module is approved, or 4317 * Enable transmit for this module if the module is approved, or
3222 * if unapproved modules should also enable the Tx laser 4318 * if unapproved modules should also enable the Tx laser
@@ -3224,11 +4320,9 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3224 if (rc == 0 || 4320 if (rc == 0 ||
3225 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != 4321 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
3226 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 4322 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
3227 bnx2x_sfp_set_transmitter(bp, params->port, 4323 bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
3228 ext_phy_type, ext_phy_addr, 1);
3229 else 4324 else
3230 bnx2x_sfp_set_transmitter(bp, params->port, 4325 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
3231 ext_phy_type, ext_phy_addr, 0);
3232 4326
3233 return rc; 4327 return rc;
3234} 4328}
@@ -3236,6 +4330,7 @@ static u8 bnx2x_sfp_module_detection(struct link_params *params)
3236void bnx2x_handle_module_detect_int(struct link_params *params) 4330void bnx2x_handle_module_detect_int(struct link_params *params)
3237{ 4331{
3238 struct bnx2x *bp = params->bp; 4332 struct bnx2x *bp = params->bp;
4333 struct bnx2x_phy *phy = &params->phy[EXT_PHY1];
3239 u32 gpio_val; 4334 u32 gpio_val;
3240 u8 port = params->port; 4335 u8 port = params->port;
3241 4336
@@ -3245,1349 +4340,587 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
3245 params->port); 4340 params->port);
3246 4341
3247 /* Get current gpio val refelecting module plugged in / out*/ 4342 /* Get current gpio val refelecting module plugged in / out*/
3248 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port); 4343 gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
3249 4344
3250 /* Call the handling function in case module is detected */ 4345 /* Call the handling function in case module is detected */
3251 if (gpio_val == 0) { 4346 if (gpio_val == 0) {
3252 4347
3253 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, 4348 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
3254 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, 4349 MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
3255 port); 4350 port);
3256 4351
3257 if (bnx2x_wait_for_sfp_module_initialized(params) == 4352 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
3258 0) 4353 bnx2x_sfp_module_detection(phy, params);
3259 bnx2x_sfp_module_detection(params);
3260 else 4354 else
3261 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 4355 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
3262 } else { 4356 } else {
3263 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
3264
3265 u32 ext_phy_type =
3266 XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3267 u32 val = REG_RD(bp, params->shmem_base + 4357 u32 val = REG_RD(bp, params->shmem_base +
3268 offsetof(struct shmem_region, dev_info. 4358 offsetof(struct shmem_region, dev_info.
3269 port_feature_config[params->port]. 4359 port_feature_config[params->port].
3270 config)); 4360 config));
3271 4361
3272 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, 4362 bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
3273 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 4363 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
3274 port); 4364 port);
3275 /* Module was plugged out. */ 4365 /* Module was plugged out. */
3276 /* Disable transmit for this module */ 4366 /* Disable transmit for this module */
3277 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 4367 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
3278 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 4368 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
3279 bnx2x_sfp_set_transmitter(bp, params->port, 4369 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
3280 ext_phy_type, ext_phy_addr, 0);
3281 } 4370 }
3282} 4371}
3283 4372
3284static void bnx2x_bcm807x_force_10G(struct link_params *params) 4373/******************************************************************/
4374/* common BCM8706/BCM8726 PHY SECTION */
4375/******************************************************************/
4376static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
4377 struct link_params *params,
4378 struct link_vars *vars)
3285{ 4379{
4380 u8 link_up = 0;
4381 u16 val1, val2, rx_sd, pcs_status;
3286 struct bnx2x *bp = params->bp; 4382 struct bnx2x *bp = params->bp;
3287 u8 port = params->port; 4383 DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
3288 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4384 /* Clear RX Alarm*/
3289 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4385 bnx2x_cl45_read(bp, phy,
3290 4386 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2);
3291 /* Force KR or KX */ 4387 /* clear LASI indication*/
3292 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4388 bnx2x_cl45_read(bp, phy,
3293 MDIO_PMA_DEVAD, 4389 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
3294 MDIO_PMA_REG_CTRL, 4390 bnx2x_cl45_read(bp, phy,
3295 0x2040); 4391 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
3296 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4392 DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
3297 MDIO_PMA_DEVAD, 4393
3298 MDIO_PMA_REG_10G_CTRL2, 4394 bnx2x_cl45_read(bp, phy,
3299 0x000b); 4395 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
3300 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4396 bnx2x_cl45_read(bp, phy,
3301 MDIO_PMA_DEVAD, 4397 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
3302 MDIO_PMA_REG_BCM_CTRL, 4398 bnx2x_cl45_read(bp, phy,
3303 0x0000); 4399 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
3304 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 4400 bnx2x_cl45_read(bp, phy,
3305 MDIO_AN_DEVAD, 4401 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
3306 MDIO_AN_REG_CTRL, 4402
3307 0x0000); 4403 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
3308} 4404 " link_status 0x%x\n", rx_sd, pcs_status, val2);
3309 4405 /* link is up if both bit 0 of pmd_rx_sd and
3310static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params) 4406 * bit 0 of pcs_status are set, or if the autoneg bit
3311{ 4407 * 1 is set
3312 struct bnx2x *bp = params->bp; 4408 */
3313 u8 port = params->port; 4409 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
3314 u16 val; 4410 if (link_up) {
3315 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4411 if (val2 & (1<<1))
3316 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4412 vars->line_speed = SPEED_1000;
3317 4413 else
3318 bnx2x_cl45_read(bp, params->port, 4414 vars->line_speed = SPEED_10000;
3319 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 4415 bnx2x_ext_phy_resolve_fc(phy, params, vars);
3320 ext_phy_addr,
3321 MDIO_PMA_DEVAD,
3322 MDIO_PMA_REG_8073_CHIP_REV, &val);
3323
3324 if (val == 0) {
3325 /* Mustn't set low power mode in 8073 A0 */
3326 return;
3327 } 4416 }
3328 4417 return link_up;
3329 /* Disable PLL sequencer (use read-modify-write to clear bit 13) */
3330 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
3331 MDIO_XS_DEVAD,
3332 MDIO_XS_PLL_SEQUENCER, &val);
3333 val &= ~(1<<13);
3334 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3335 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3336
3337 /* PLL controls */
3338 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3339 MDIO_XS_DEVAD, 0x805E, 0x1077);
3340 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3341 MDIO_XS_DEVAD, 0x805D, 0x0000);
3342 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3343 MDIO_XS_DEVAD, 0x805C, 0x030B);
3344 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3345 MDIO_XS_DEVAD, 0x805B, 0x1240);
3346 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3347 MDIO_XS_DEVAD, 0x805A, 0x2490);
3348
3349 /* Tx Controls */
3350 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3351 MDIO_XS_DEVAD, 0x80A7, 0x0C74);
3352 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3353 MDIO_XS_DEVAD, 0x80A6, 0x9041);
3354 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3355 MDIO_XS_DEVAD, 0x80A5, 0x4640);
3356
3357 /* Rx Controls */
3358 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3359 MDIO_XS_DEVAD, 0x80FE, 0x01C4);
3360 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3361 MDIO_XS_DEVAD, 0x80FD, 0x9249);
3362 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3363 MDIO_XS_DEVAD, 0x80FC, 0x2015);
3364
3365 /* Enable PLL sequencer (use read-modify-write to set bit 13) */
3366 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
3367 MDIO_XS_DEVAD,
3368 MDIO_XS_PLL_SEQUENCER, &val);
3369 val |= (1<<13);
3370 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3371 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
3372} 4418}
3373 4419
3374static void bnx2x_8073_set_pause_cl37(struct link_params *params, 4420/******************************************************************/
3375 struct link_vars *vars) 4421/* BCM8706 PHY SECTION */
4422/******************************************************************/
4423static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
4424 struct link_params *params,
4425 struct link_vars *vars)
3376{ 4426{
4427 u16 cnt, val;
3377 struct bnx2x *bp = params->bp; 4428 struct bnx2x *bp = params->bp;
3378 u16 cl37_val; 4429 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3379 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4430 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
3380 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4431 /* HW reset */
3381 4432 bnx2x_ext_phy_hw_reset(bp, params->port);
3382 bnx2x_cl45_read(bp, params->port, 4433 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
3383 ext_phy_type, 4434 bnx2x_wait_reset_complete(bp, phy);
3384 ext_phy_addr,
3385 MDIO_AN_DEVAD,
3386 MDIO_AN_REG_CL37_FC_LD, &cl37_val);
3387
3388 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3389 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3390 4435
3391 if ((vars->ieee_fc & 4436 /* Wait until fw is loaded */
3392 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) == 4437 for (cnt = 0; cnt < 100; cnt++) {
3393 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) { 4438 bnx2x_cl45_read(bp, phy,
3394 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; 4439 MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
4440 if (val)
4441 break;
4442 msleep(10);
3395 } 4443 }
3396 if ((vars->ieee_fc & 4444 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
3397 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == 4445 if ((params->feature_config_flags &
3398 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 4446 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3399 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 4447 u8 i;
4448 u16 reg;
4449 for (i = 0; i < 4; i++) {
4450 reg = MDIO_XS_8706_REG_BANK_RX0 +
4451 i*(MDIO_XS_8706_REG_BANK_RX1 -
4452 MDIO_XS_8706_REG_BANK_RX0);
4453 bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
4454 /* Clear first 3 bits of the control */
4455 val &= ~0x7;
4456 /* Set control bits according to configuration */
4457 val |= (phy->rx_preemphasis[i] & 0x7);
4458 DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
4459 " reg 0x%x <-- val 0x%x\n", reg, val);
4460 bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
4461 }
3400 } 4462 }
3401 if ((vars->ieee_fc & 4463 /* Force speed */
3402 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == 4464 if (phy->req_line_speed == SPEED_10000) {
3403 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { 4465 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
3404 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 4466
4467 bnx2x_cl45_write(bp, phy,
4468 MDIO_PMA_DEVAD,
4469 MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
4470 bnx2x_cl45_write(bp, phy,
4471 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
4472 } else {
4473 /* Force 1Gbps using autoneg with 1G advertisment */
4474
4475 /* Allow CL37 through CL73 */
4476 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
4477 bnx2x_cl45_write(bp, phy,
4478 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
4479
4480 /* Enable Full-Duplex advertisment on CL37 */
4481 bnx2x_cl45_write(bp, phy,
4482 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
4483 /* Enable CL37 AN */
4484 bnx2x_cl45_write(bp, phy,
4485 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
4486 /* 1G support */
4487 bnx2x_cl45_write(bp, phy,
4488 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
4489
4490 /* Enable clause 73 AN */
4491 bnx2x_cl45_write(bp, phy,
4492 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
4493 bnx2x_cl45_write(bp, phy,
4494 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
4495 0x0400);
4496 bnx2x_cl45_write(bp, phy,
4497 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
4498 0x0004);
3405 } 4499 }
3406 DP(NETIF_MSG_LINK, 4500 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
3407 "Ext phy AN advertize cl37 0x%x\n", cl37_val); 4501 return 0;
4502}
3408 4503
3409 bnx2x_cl45_write(bp, params->port, 4504static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy,
3410 ext_phy_type, 4505 struct link_params *params,
3411 ext_phy_addr, 4506 struct link_vars *vars)
3412 MDIO_AN_DEVAD, 4507{
3413 MDIO_AN_REG_CL37_FC_LD, cl37_val); 4508 return bnx2x_8706_8726_read_status(phy, params, vars);
3414 msleep(500);
3415} 4509}
3416 4510
3417static void bnx2x_ext_phy_set_pause(struct link_params *params, 4511/******************************************************************/
3418 struct link_vars *vars) 4512/* BCM8726 PHY SECTION */
4513/******************************************************************/
4514static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
4515 struct link_params *params)
3419{ 4516{
3420 struct bnx2x *bp = params->bp; 4517 struct bnx2x *bp = params->bp;
3421 u16 val; 4518 DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
3422 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4519 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
3423 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3424
3425 /* read modify write pause advertizing */
3426 bnx2x_cl45_read(bp, params->port,
3427 ext_phy_type,
3428 ext_phy_addr,
3429 MDIO_AN_DEVAD,
3430 MDIO_AN_REG_ADV_PAUSE, &val);
3431
3432 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
3433
3434 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3435
3436 if ((vars->ieee_fc &
3437 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
3438 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
3439 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
3440 }
3441 if ((vars->ieee_fc &
3442 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
3443 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
3444 val |=
3445 MDIO_AN_REG_ADV_PAUSE_PAUSE;
3446 }
3447 DP(NETIF_MSG_LINK,
3448 "Ext phy AN advertize 0x%x\n", val);
3449 bnx2x_cl45_write(bp, params->port,
3450 ext_phy_type,
3451 ext_phy_addr,
3452 MDIO_AN_DEVAD,
3453 MDIO_AN_REG_ADV_PAUSE, val);
3454} 4520}
3455static void bnx2x_set_preemphasis(struct link_params *params) 4521
4522static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
4523 struct link_params *params)
3456{ 4524{
3457 u16 bank, i = 0;
3458 struct bnx2x *bp = params->bp; 4525 struct bnx2x *bp = params->bp;
4526 /* Need to wait 100ms after reset */
4527 msleep(100);
3459 4528
3460 for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; 4529 /* Micro controller re-boot */
3461 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { 4530 bnx2x_cl45_write(bp, phy,
3462 CL45_WR_OVER_CL22(bp, params->port, 4531 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
3463 params->phy_addr,
3464 bank,
3465 MDIO_RX0_RX_EQ_BOOST,
3466 params->xgxs_config_rx[i]);
3467 }
3468
3469 for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
3470 bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
3471 CL45_WR_OVER_CL22(bp, params->port,
3472 params->phy_addr,
3473 bank,
3474 MDIO_TX0_TX_DRIVER,
3475 params->xgxs_config_tx[i]);
3476 }
3477}
3478 4532
4533 /* Set soft reset */
4534 bnx2x_cl45_write(bp, phy,
4535 MDIO_PMA_DEVAD,
4536 MDIO_PMA_REG_GEN_CTRL,
4537 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
3479 4538
3480static void bnx2x_8481_set_led4(struct link_params *params, 4539 bnx2x_cl45_write(bp, phy,
3481 u32 ext_phy_type, u8 ext_phy_addr) 4540 MDIO_PMA_DEVAD,
3482{ 4541 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
3483 struct bnx2x *bp = params->bp;
3484 4542
3485 /* PHYC_CTL_LED_CTL */ 4543 bnx2x_cl45_write(bp, phy,
3486 bnx2x_cl45_write(bp, params->port,
3487 ext_phy_type,
3488 ext_phy_addr,
3489 MDIO_PMA_DEVAD, 4544 MDIO_PMA_DEVAD,
3490 MDIO_PMA_REG_8481_LINK_SIGNAL, 0xa482); 4545 MDIO_PMA_REG_GEN_CTRL,
4546 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
3491 4547
3492 /* Unmask LED4 for 10G link */ 4548 /* wait for 150ms for microcode load */
3493 bnx2x_cl45_write(bp, params->port, 4549 msleep(150);
3494 ext_phy_type, 4550
3495 ext_phy_addr, 4551 /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
4552 bnx2x_cl45_write(bp, phy,
3496 MDIO_PMA_DEVAD, 4553 MDIO_PMA_DEVAD,
3497 MDIO_PMA_REG_8481_SIGNAL_MASK, (1<<6)); 4554 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
3498 /* 'Interrupt Mask' */
3499 bnx2x_cl45_write(bp, params->port,
3500 ext_phy_type,
3501 ext_phy_addr,
3502 MDIO_AN_DEVAD,
3503 0xFFFB, 0xFFFD);
3504}
3505static void bnx2x_8481_set_legacy_led_mode(struct link_params *params,
3506 u32 ext_phy_type, u8 ext_phy_addr)
3507{
3508 struct bnx2x *bp = params->bp;
3509 4555
3510 /* LED1 (10G Link): Disable LED1 when 10/100/1000 link */ 4556 msleep(200);
3511 /* LED2 (1G/100/10 Link): Enable LED2 when 10/100/1000 link) */ 4557 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
3512 bnx2x_cl45_write(bp, params->port,
3513 ext_phy_type,
3514 ext_phy_addr,
3515 MDIO_AN_DEVAD,
3516 MDIO_AN_REG_8481_LEGACY_SHADOW,
3517 (1<<15) | (0xd << 10) | (0xc<<4) | 0xe);
3518} 4558}
3519 4559
3520static void bnx2x_8481_set_10G_led_mode(struct link_params *params, 4560static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
3521 u32 ext_phy_type, u8 ext_phy_addr) 4561 struct link_params *params,
4562 struct link_vars *vars)
3522{ 4563{
3523 struct bnx2x *bp = params->bp; 4564 struct bnx2x *bp = params->bp;
3524 u16 val1; 4565 u16 val1;
3525 4566 u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
3526 /* LED1 (10G Link) */ 4567 if (link_up) {
3527 /* Enable continuse based on source 7(10G-link) */ 4568 bnx2x_cl45_read(bp, phy,
3528 bnx2x_cl45_read(bp, params->port, 4569 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
3529 ext_phy_type, 4570 &val1);
3530 ext_phy_addr, 4571 if (val1 & (1<<15)) {
3531 MDIO_PMA_DEVAD, 4572 DP(NETIF_MSG_LINK, "Tx is disabled\n");
3532 MDIO_PMA_REG_8481_LINK_SIGNAL, 4573 link_up = 0;
3533 &val1); 4574 vars->line_speed = 0;
3534 /* Set bit 2 to 0, and bits [1:0] to 10 */ 4575 }
3535 val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/ 4576 }
3536 val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */ 4577 return link_up;
3537
3538 bnx2x_cl45_write(bp, params->port,
3539 ext_phy_type,
3540 ext_phy_addr,
3541 MDIO_PMA_DEVAD,
3542 MDIO_PMA_REG_8481_LINK_SIGNAL,
3543 val1);
3544
3545 /* Unmask LED1 for 10G link */
3546 bnx2x_cl45_read(bp, params->port,
3547 ext_phy_type,
3548 ext_phy_addr,
3549 MDIO_PMA_DEVAD,
3550 MDIO_PMA_REG_8481_LED1_MASK,
3551 &val1);
3552 /* Set bit 2 to 0, and bits [1:0] to 10 */
3553 val1 |= (1<<7);
3554 bnx2x_cl45_write(bp, params->port,
3555 ext_phy_type,
3556 ext_phy_addr,
3557 MDIO_PMA_DEVAD,
3558 MDIO_PMA_REG_8481_LED1_MASK,
3559 val1);
3560
3561 /* LED2 (1G/100/10G Link) */
3562 /* Mask LED2 for 10G link */
3563 bnx2x_cl45_write(bp, params->port,
3564 ext_phy_type,
3565 ext_phy_addr,
3566 MDIO_PMA_DEVAD,
3567 MDIO_PMA_REG_8481_LED2_MASK,
3568 0);
3569
3570 /* Unmask LED3 for 10G link */
3571 bnx2x_cl45_write(bp, params->port,
3572 ext_phy_type,
3573 ext_phy_addr,
3574 MDIO_PMA_DEVAD,
3575 MDIO_PMA_REG_8481_LED3_MASK,
3576 0x6);
3577 bnx2x_cl45_write(bp, params->port,
3578 ext_phy_type,
3579 ext_phy_addr,
3580 MDIO_PMA_DEVAD,
3581 MDIO_PMA_REG_8481_LED3_BLINK,
3582 0);
3583} 4578}
3584 4579
3585 4580
3586static void bnx2x_init_internal_phy(struct link_params *params, 4581static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
3587 struct link_vars *vars, 4582 struct link_params *params,
3588 u8 enable_cl73) 4583 struct link_vars *vars)
3589{ 4584{
3590 struct bnx2x *bp = params->bp; 4585 struct bnx2x *bp = params->bp;
4586 u32 val;
4587 u32 swap_val, swap_override, aeu_gpio_mask, offset;
4588 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
4589 /* Restore normal power mode*/
4590 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4591 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
3591 4592
3592 if (!(vars->phy_flags & PHY_SGMII_FLAG)) { 4593 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3593 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 4594 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
3594 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 4595
3595 (params->feature_config_flags & 4596 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
3596 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) 4597 bnx2x_wait_reset_complete(bp, phy);
3597 bnx2x_set_preemphasis(params); 4598
3598 4599 bnx2x_8726_external_rom_boot(phy, params);
3599 /* forced speed requested? */ 4600
3600 if (vars->line_speed != SPEED_AUTO_NEG || 4601 /* Need to call module detected on initialization since
3601 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 4602 the module detection triggered by actual module
3602 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 4603 insertion might occur before driver is loaded, and when
3603 params->loopback_mode == LOOPBACK_EXT)) { 4604 driver is loaded, it reset all registers, including the
3604 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 4605 transmitter */
3605 4606 bnx2x_sfp_module_detection(phy, params);
3606 /* disable autoneg */ 4607
3607 bnx2x_set_autoneg(params, vars, 0); 4608 if (phy->req_line_speed == SPEED_1000) {
4609 DP(NETIF_MSG_LINK, "Setting 1G force\n");
4610 bnx2x_cl45_write(bp, phy,
4611 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
4612 bnx2x_cl45_write(bp, phy,
4613 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
4614 bnx2x_cl45_write(bp, phy,
4615 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x5);
4616 bnx2x_cl45_write(bp, phy,
4617 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
4618 0x400);
4619 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
4620 (phy->speed_cap_mask &
4621 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
4622 ((phy->speed_cap_mask &
4623 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
4624 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
4625 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
4626 /* Set Flow control */
4627 bnx2x_ext_phy_set_pause(params, phy, vars);
4628 bnx2x_cl45_write(bp, phy,
4629 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
4630 bnx2x_cl45_write(bp, phy,
4631 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
4632 bnx2x_cl45_write(bp, phy,
4633 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
4634 bnx2x_cl45_write(bp, phy,
4635 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
4636 bnx2x_cl45_write(bp, phy,
4637 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
4638 /* Enable RX-ALARM control to receive
4639 interrupt for 1G speed change */
4640 bnx2x_cl45_write(bp, phy,
4641 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
4642 bnx2x_cl45_write(bp, phy,
4643 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
4644 0x400);
4645
4646 } else { /* Default 10G. Set only LASI control */
4647 bnx2x_cl45_write(bp, phy,
4648 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
4649 }
3608 4650
3609 /* program speed and duplex */ 4651 /* Set TX PreEmphasis if needed */
3610 bnx2x_program_serdes(params, vars); 4652 if ((params->feature_config_flags &
4653 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
4654 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
4655 "TX_CTRL2 0x%x\n",
4656 phy->tx_preemphasis[0],
4657 phy->tx_preemphasis[1]);
4658 bnx2x_cl45_write(bp, phy,
4659 MDIO_PMA_DEVAD,
4660 MDIO_PMA_REG_8726_TX_CTRL1,
4661 phy->tx_preemphasis[0]);
4662
4663 bnx2x_cl45_write(bp, phy,
4664 MDIO_PMA_DEVAD,
4665 MDIO_PMA_REG_8726_TX_CTRL2,
4666 phy->tx_preemphasis[1]);
4667 }
3611 4668
3612 } else { /* AN_mode */ 4669 /* Set GPIO3 to trigger SFP+ module insertion/removal */
3613 DP(NETIF_MSG_LINK, "not SGMII, AN\n"); 4670 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4671 MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
3614 4672
3615 /* AN enabled */ 4673 /* The GPIO should be swapped if the swap register is set and active */
3616 bnx2x_set_brcm_cl37_advertisment(params); 4674 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4675 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3617 4676
3618 /* program duplex & pause advertisement (for aneg) */ 4677 /* Select function upon port-swap configuration */
3619 bnx2x_set_ieee_aneg_advertisment(params, 4678 if (params->port == 0) {
3620 vars->ieee_fc); 4679 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4680 aeu_gpio_mask = (swap_val && swap_override) ?
4681 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4682 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4683 } else {
4684 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4685 aeu_gpio_mask = (swap_val && swap_override) ?
4686 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4687 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4688 }
4689 val = REG_RD(bp, offset);
4690 /* add GPIO3 to group */
4691 val |= aeu_gpio_mask;
4692 REG_WR(bp, offset, val);
4693 return 0;
3621 4694
3622 /* enable autoneg */ 4695}
3623 bnx2x_set_autoneg(params, vars, enable_cl73);
3624 4696
3625 /* enable and restart AN */ 4697static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
3626 bnx2x_restart_autoneg(params, enable_cl73); 4698 struct link_params *params)
3627 } 4699{
4700 struct bnx2x *bp = params->bp;
4701 DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
4702 /* Set serial boot control for external load */
4703 bnx2x_cl45_write(bp, phy,
4704 MDIO_PMA_DEVAD,
4705 MDIO_PMA_REG_GEN_CTRL, 0x0001);
4706}
3628 4707
3629 } else { /* SGMII mode */ 4708/******************************************************************/
3630 DP(NETIF_MSG_LINK, "SGMII\n"); 4709/* BCM8727 PHY SECTION */
4710/******************************************************************/
3631 4711
3632 bnx2x_initialize_sgmii_process(params, vars); 4712static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
4713 struct link_params *params, u8 mode)
4714{
4715 struct bnx2x *bp = params->bp;
4716 u16 led_mode_bitmask = 0;
4717 u16 gpio_pins_bitmask = 0;
4718 u16 val;
4719 /* Only NOC flavor requires to set the LED specifically */
4720 if (!(phy->flags & FLAGS_NOC))
4721 return;
4722 switch (mode) {
4723 case LED_MODE_FRONT_PANEL_OFF:
4724 case LED_MODE_OFF:
4725 led_mode_bitmask = 0;
4726 gpio_pins_bitmask = 0x03;
4727 break;
4728 case LED_MODE_ON:
4729 led_mode_bitmask = 0;
4730 gpio_pins_bitmask = 0x02;
4731 break;
4732 case LED_MODE_OPER:
4733 led_mode_bitmask = 0x60;
4734 gpio_pins_bitmask = 0x11;
4735 break;
3633 } 4736 }
4737 bnx2x_cl45_read(bp, phy,
4738 MDIO_PMA_DEVAD,
4739 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4740 &val);
4741 val &= 0xff8f;
4742 val |= led_mode_bitmask;
4743 bnx2x_cl45_write(bp, phy,
4744 MDIO_PMA_DEVAD,
4745 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4746 val);
4747 bnx2x_cl45_read(bp, phy,
4748 MDIO_PMA_DEVAD,
4749 MDIO_PMA_REG_8727_GPIO_CTRL,
4750 &val);
4751 val &= 0xffe0;
4752 val |= gpio_pins_bitmask;
4753 bnx2x_cl45_write(bp, phy,
4754 MDIO_PMA_DEVAD,
4755 MDIO_PMA_REG_8727_GPIO_CTRL,
4756 val);
4757}
4758static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
4759 struct link_params *params) {
4760 u32 swap_val, swap_override;
4761 u8 port;
4762 /**
4763 * The PHY reset is controlled by GPIO 1. Fake the port number
4764 * to cancel the swap done in set_gpio()
4765 */
4766 struct bnx2x *bp = params->bp;
4767 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4768 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4769 port = (swap_val && swap_override) ^ 1;
4770 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4771 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3634} 4772}
3635 4773
3636static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) 4774static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
4775 struct link_params *params,
4776 struct link_vars *vars)
3637{ 4777{
4778 u16 tmp1, val, mod_abs;
4779 u16 rx_alarm_ctrl_val;
4780 u16 lasi_ctrl_val;
3638 struct bnx2x *bp = params->bp; 4781 struct bnx2x *bp = params->bp;
3639 u32 ext_phy_type; 4782 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
3640 u8 ext_phy_addr; 4783
3641 u16 cnt; 4784 bnx2x_wait_reset_complete(bp, phy);
3642 u16 ctrl = 0; 4785 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
3643 u16 val = 0; 4786 lasi_ctrl_val = 0x0004;
3644 u8 rc = 0; 4787
3645 4788 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
3646 if (vars->phy_flags & PHY_XGXS_FLAG) { 4789 /* enable LASI */
3647 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 4790 bnx2x_cl45_write(bp, phy,
3648 4791 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
3649 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4792 rx_alarm_ctrl_val);
3650 /* Make sure that the soft reset is off (expect for the 8072: 4793
3651 * due to the lock, it will be done inside the specific 4794 bnx2x_cl45_write(bp, phy,
3652 * handling) 4795 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
4796
4797 /* Initially configure MOD_ABS to interrupt when
4798 module is presence( bit 8) */
4799 bnx2x_cl45_read(bp, phy,
4800 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4801 /* Set EDC off by setting OPTXLOS signal input to low
4802 (bit 9).
4803 When the EDC is off it locks onto a reference clock and
4804 avoids becoming 'lost'.*/
4805 mod_abs &= ~(1<<8);
4806 if (!(phy->flags & FLAGS_NOC))
4807 mod_abs &= ~(1<<9);
4808 bnx2x_cl45_write(bp, phy,
4809 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4810
4811
4812 /* Make MOD_ABS give interrupt on change */
4813 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4814 &val);
4815 val |= (1<<12);
4816 if (phy->flags & FLAGS_NOC)
4817 val |= (3<<5);
4818
4819 /**
4820 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
4821 * status which reflect SFP+ module over-current
4822 */
4823 if (!(phy->flags & FLAGS_NOC))
4824 val &= 0xff8f; /* Reset bits 4-6 */
4825 bnx2x_cl45_write(bp, phy,
4826 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
4827
4828 bnx2x_8727_power_module(bp, phy, 1);
4829
4830 bnx2x_cl45_read(bp, phy,
4831 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
4832
4833 bnx2x_cl45_read(bp, phy,
4834 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1);
4835
4836 /* Set option 1G speed */
4837 if (phy->req_line_speed == SPEED_1000) {
4838 DP(NETIF_MSG_LINK, "Setting 1G force\n");
4839 bnx2x_cl45_write(bp, phy,
4840 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
4841 bnx2x_cl45_write(bp, phy,
4842 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
4843 bnx2x_cl45_read(bp, phy,
4844 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
4845 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
4846 /**
4847 * Power down the XAUI until link is up in case of dual-media
4848 * and 1G
3653 */ 4849 */
3654 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && 4850 if (DUAL_MEDIA(params)) {
3655 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 4851 bnx2x_cl45_read(bp, phy,
3656 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) && 4852 MDIO_PMA_DEVAD,
3657 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) && 4853 MDIO_PMA_REG_8727_PCS_GP, &val);
3658 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) { 4854 val |= (3<<10);
3659 /* Wait for soft reset to get cleared upto 1 sec */ 4855 bnx2x_cl45_write(bp, phy,
3660 for (cnt = 0; cnt < 1000; cnt++) {
3661 bnx2x_cl45_read(bp, params->port,
3662 ext_phy_type,
3663 ext_phy_addr,
3664 MDIO_PMA_DEVAD,
3665 MDIO_PMA_REG_CTRL, &ctrl);
3666 if (!(ctrl & (1<<15)))
3667 break;
3668 msleep(1);
3669 }
3670 DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n",
3671 ctrl, cnt);
3672 }
3673
3674 switch (ext_phy_type) {
3675 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3676 break;
3677
3678 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3679 DP(NETIF_MSG_LINK, "XGXS 8705\n");
3680
3681 bnx2x_cl45_write(bp, params->port,
3682 ext_phy_type,
3683 ext_phy_addr,
3684 MDIO_PMA_DEVAD,
3685 MDIO_PMA_REG_MISC_CTRL,
3686 0x8288);
3687 bnx2x_cl45_write(bp, params->port,
3688 ext_phy_type,
3689 ext_phy_addr,
3690 MDIO_PMA_DEVAD,
3691 MDIO_PMA_REG_PHY_IDENTIFIER,
3692 0x7fbf);
3693 bnx2x_cl45_write(bp, params->port,
3694 ext_phy_type,
3695 ext_phy_addr,
3696 MDIO_PMA_DEVAD,
3697 MDIO_PMA_REG_CMU_PLL_BYPASS,
3698 0x0100);
3699 bnx2x_cl45_write(bp, params->port,
3700 ext_phy_type,
3701 ext_phy_addr,
3702 MDIO_WIS_DEVAD,
3703 MDIO_WIS_REG_LASI_CNTL, 0x1);
3704
3705 /* BCM8705 doesn't have microcode, hence the 0 */
3706 bnx2x_save_spirom_version(bp, params->port,
3707 params->shmem_base, 0);
3708 break;
3709
3710 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3711 /* Wait until fw is loaded */
3712 for (cnt = 0; cnt < 100; cnt++) {
3713 bnx2x_cl45_read(bp, params->port, ext_phy_type,
3714 ext_phy_addr, MDIO_PMA_DEVAD,
3715 MDIO_PMA_REG_ROM_VER1, &val);
3716 if (val)
3717 break;
3718 msleep(10);
3719 }
3720 DP(NETIF_MSG_LINK, "XGXS 8706 is initialized "
3721 "after %d ms\n", cnt);
3722 if ((params->feature_config_flags &
3723 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3724 u8 i;
3725 u16 reg;
3726 for (i = 0; i < 4; i++) {
3727 reg = MDIO_XS_8706_REG_BANK_RX0 +
3728 i*(MDIO_XS_8706_REG_BANK_RX1 -
3729 MDIO_XS_8706_REG_BANK_RX0);
3730 bnx2x_cl45_read(bp, params->port,
3731 ext_phy_type,
3732 ext_phy_addr,
3733 MDIO_XS_DEVAD,
3734 reg, &val);
3735 /* Clear first 3 bits of the control */
3736 val &= ~0x7;
3737 /* Set control bits according to
3738 configuation */
3739 val |= (params->xgxs_config_rx[i] &
3740 0x7);
3741 DP(NETIF_MSG_LINK, "Setting RX"
3742 "Equalizer to BCM8706 reg 0x%x"
3743 " <-- val 0x%x\n", reg, val);
3744 bnx2x_cl45_write(bp, params->port,
3745 ext_phy_type,
3746 ext_phy_addr,
3747 MDIO_XS_DEVAD,
3748 reg, val);
3749 }
3750 }
3751 /* Force speed */
3752 if (params->req_line_speed == SPEED_10000) {
3753 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
3754
3755 bnx2x_cl45_write(bp, params->port,
3756 ext_phy_type,
3757 ext_phy_addr,
3758 MDIO_PMA_DEVAD,
3759 MDIO_PMA_REG_DIGITAL_CTRL,
3760 0x400);
3761 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3762 ext_phy_addr, MDIO_PMA_DEVAD,
3763 MDIO_PMA_REG_LASI_CTRL, 1);
3764 } else {
3765 /* Force 1Gbps using autoneg with 1G
3766 advertisment */
3767
3768 /* Allow CL37 through CL73 */
3769 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3770 bnx2x_cl45_write(bp, params->port,
3771 ext_phy_type,
3772 ext_phy_addr,
3773 MDIO_AN_DEVAD,
3774 MDIO_AN_REG_CL37_CL73,
3775 0x040c);
3776
3777 /* Enable Full-Duplex advertisment on CL37 */
3778 bnx2x_cl45_write(bp, params->port,
3779 ext_phy_type,
3780 ext_phy_addr,
3781 MDIO_AN_DEVAD,
3782 MDIO_AN_REG_CL37_FC_LP,
3783 0x0020);
3784 /* Enable CL37 AN */
3785 bnx2x_cl45_write(bp, params->port,
3786 ext_phy_type,
3787 ext_phy_addr,
3788 MDIO_AN_DEVAD,
3789 MDIO_AN_REG_CL37_AN,
3790 0x1000);
3791 /* 1G support */
3792 bnx2x_cl45_write(bp, params->port,
3793 ext_phy_type,
3794 ext_phy_addr,
3795 MDIO_AN_DEVAD,
3796 MDIO_AN_REG_ADV, (1<<5));
3797
3798 /* Enable clause 73 AN */
3799 bnx2x_cl45_write(bp, params->port,
3800 ext_phy_type,
3801 ext_phy_addr,
3802 MDIO_AN_DEVAD,
3803 MDIO_AN_REG_CTRL,
3804 0x1200);
3805 bnx2x_cl45_write(bp, params->port,
3806 ext_phy_type,
3807 ext_phy_addr,
3808 MDIO_PMA_DEVAD,
3809 MDIO_PMA_REG_RX_ALARM_CTRL,
3810 0x0400);
3811 bnx2x_cl45_write(bp, params->port,
3812 ext_phy_type,
3813 ext_phy_addr,
3814 MDIO_PMA_DEVAD,
3815 MDIO_PMA_REG_LASI_CTRL, 0x0004);
3816
3817 }
3818 bnx2x_save_bcm_spirom_ver(bp, params->port,
3819 ext_phy_type,
3820 ext_phy_addr,
3821 params->shmem_base);
3822 break;
3823 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
3824 DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
3825 bnx2x_bcm8726_external_rom_boot(params);
3826
3827 /* Need to call module detected on initialization since
3828 the module detection triggered by actual module
3829 insertion might occur before driver is loaded, and when
3830 driver is loaded, it reset all registers, including the
3831 transmitter */
3832 bnx2x_sfp_module_detection(params);
3833
3834 /* Set Flow control */
3835 bnx2x_ext_phy_set_pause(params, vars);
3836 if (params->req_line_speed == SPEED_1000) {
3837 DP(NETIF_MSG_LINK, "Setting 1G force\n");
3838 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3839 ext_phy_addr, MDIO_PMA_DEVAD,
3840 MDIO_PMA_REG_CTRL, 0x40);
3841 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3842 ext_phy_addr, MDIO_PMA_DEVAD,
3843 MDIO_PMA_REG_10G_CTRL2, 0xD);
3844 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3845 ext_phy_addr, MDIO_PMA_DEVAD,
3846 MDIO_PMA_REG_LASI_CTRL, 0x5);
3847 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3848 ext_phy_addr, MDIO_PMA_DEVAD,
3849 MDIO_PMA_REG_RX_ALARM_CTRL,
3850 0x400);
3851 } else if ((params->req_line_speed ==
3852 SPEED_AUTO_NEG) &&
3853 ((params->speed_cap_mask &
3854 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
3855 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
3856 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3857 ext_phy_addr, MDIO_AN_DEVAD,
3858 MDIO_AN_REG_ADV, 0x20);
3859 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3860 ext_phy_addr, MDIO_AN_DEVAD,
3861 MDIO_AN_REG_CL37_CL73, 0x040c);
3862 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3863 ext_phy_addr, MDIO_AN_DEVAD,
3864 MDIO_AN_REG_CL37_FC_LD, 0x0020);
3865 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3866 ext_phy_addr, MDIO_AN_DEVAD,
3867 MDIO_AN_REG_CL37_AN, 0x1000);
3868 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3869 ext_phy_addr, MDIO_AN_DEVAD,
3870 MDIO_AN_REG_CTRL, 0x1200);
3871
3872 /* Enable RX-ALARM control to receive
3873 interrupt for 1G speed change */
3874 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3875 ext_phy_addr, MDIO_PMA_DEVAD,
3876 MDIO_PMA_REG_LASI_CTRL, 0x4);
3877 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3878 ext_phy_addr, MDIO_PMA_DEVAD,
3879 MDIO_PMA_REG_RX_ALARM_CTRL,
3880 0x400);
3881
3882 } else { /* Default 10G. Set only LASI control */
3883 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3884 ext_phy_addr, MDIO_PMA_DEVAD,
3885 MDIO_PMA_REG_LASI_CTRL, 1);
3886 }
3887
3888 /* Set TX PreEmphasis if needed */
3889 if ((params->feature_config_flags &
3890 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
3891 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
3892 "TX_CTRL2 0x%x\n",
3893 params->xgxs_config_tx[0],
3894 params->xgxs_config_tx[1]);
3895 bnx2x_cl45_write(bp, params->port,
3896 ext_phy_type,
3897 ext_phy_addr,
3898 MDIO_PMA_DEVAD,
3899 MDIO_PMA_REG_8726_TX_CTRL1,
3900 params->xgxs_config_tx[0]);
3901
3902 bnx2x_cl45_write(bp, params->port,
3903 ext_phy_type,
3904 ext_phy_addr,
3905 MDIO_PMA_DEVAD,
3906 MDIO_PMA_REG_8726_TX_CTRL2,
3907 params->xgxs_config_tx[1]);
3908 }
3909 break;
3910 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3911 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3912 {
3913 u16 tmp1;
3914 u16 rx_alarm_ctrl_val;
3915 u16 lasi_ctrl_val;
3916 if (ext_phy_type ==
3917 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
3918 rx_alarm_ctrl_val = 0x400;
3919 lasi_ctrl_val = 0x0004;
3920 } else {
3921 rx_alarm_ctrl_val = (1<<2);
3922 lasi_ctrl_val = 0x0004;
3923 }
3924
3925 /* enable LASI */
3926 bnx2x_cl45_write(bp, params->port,
3927 ext_phy_type,
3928 ext_phy_addr,
3929 MDIO_PMA_DEVAD,
3930 MDIO_PMA_REG_RX_ALARM_CTRL,
3931 rx_alarm_ctrl_val);
3932
3933 bnx2x_cl45_write(bp, params->port,
3934 ext_phy_type,
3935 ext_phy_addr,
3936 MDIO_PMA_DEVAD,
3937 MDIO_PMA_REG_LASI_CTRL,
3938 lasi_ctrl_val);
3939
3940 bnx2x_8073_set_pause_cl37(params, vars);
3941
3942 if (ext_phy_type ==
3943 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)
3944 bnx2x_bcm8072_external_rom_boot(params);
3945 else
3946 /* In case of 8073 with long xaui lines,
3947 don't set the 8073 xaui low power*/
3948 bnx2x_bcm8073_set_xaui_low_power_mode(params);
3949
3950 bnx2x_cl45_read(bp, params->port,
3951 ext_phy_type,
3952 ext_phy_addr,
3953 MDIO_PMA_DEVAD,
3954 MDIO_PMA_REG_M8051_MSGOUT_REG,
3955 &tmp1);
3956
3957 bnx2x_cl45_read(bp, params->port,
3958 ext_phy_type,
3959 ext_phy_addr,
3960 MDIO_PMA_DEVAD,
3961 MDIO_PMA_REG_RX_ALARM, &tmp1);
3962
3963 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1):"
3964 "0x%x\n", tmp1);
3965
3966 /* If this is forced speed, set to KR or KX
3967 * (all other are not supported)
3968 */
3969 if (params->loopback_mode == LOOPBACK_EXT) {
3970 bnx2x_bcm807x_force_10G(params);
3971 DP(NETIF_MSG_LINK,
3972 "Forced speed 10G on 807X\n");
3973 break;
3974 } else {
3975 bnx2x_cl45_write(bp, params->port,
3976 ext_phy_type, ext_phy_addr,
3977 MDIO_PMA_DEVAD,
3978 MDIO_PMA_REG_BCM_CTRL,
3979 0x0002);
3980 }
3981 if (params->req_line_speed != SPEED_AUTO_NEG) {
3982 if (params->req_line_speed == SPEED_10000) {
3983 val = (1<<7);
3984 } else if (params->req_line_speed ==
3985 SPEED_2500) {
3986 val = (1<<5);
3987 /* Note that 2.5G works only
3988 when used with 1G advertisment */
3989 } else
3990 val = (1<<5);
3991 } else {
3992
3993 val = 0;
3994 if (params->speed_cap_mask &
3995 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
3996 val |= (1<<7);
3997
3998 /* Note that 2.5G works only when
3999 used with 1G advertisment */
4000 if (params->speed_cap_mask &
4001 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
4002 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
4003 val |= (1<<5);
4004 DP(NETIF_MSG_LINK,
4005 "807x autoneg val = 0x%x\n", val);
4006 }
4007
4008 bnx2x_cl45_write(bp, params->port,
4009 ext_phy_type,
4010 ext_phy_addr,
4011 MDIO_AN_DEVAD,
4012 MDIO_AN_REG_ADV, val);
4013 if (ext_phy_type ==
4014 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
4015 bnx2x_cl45_read(bp, params->port,
4016 ext_phy_type,
4017 ext_phy_addr,
4018 MDIO_AN_DEVAD,
4019 MDIO_AN_REG_8073_2_5G, &tmp1);
4020
4021 if (((params->speed_cap_mask &
4022 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
4023 (params->req_line_speed ==
4024 SPEED_AUTO_NEG)) ||
4025 (params->req_line_speed ==
4026 SPEED_2500)) {
4027 u16 phy_ver;
4028 /* Allow 2.5G for A1 and above */
4029 bnx2x_cl45_read(bp, params->port,
4030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4031 ext_phy_addr,
4032 MDIO_PMA_DEVAD, 4856 MDIO_PMA_DEVAD,
4033 MDIO_PMA_REG_8073_CHIP_REV, &phy_ver); 4857 MDIO_PMA_REG_8727_PCS_GP, val);
4034 DP(NETIF_MSG_LINK, "Add 2.5G\n");
4035 if (phy_ver > 0)
4036 tmp1 |= 1;
4037 else
4038 tmp1 &= 0xfffe;
4039 } else {
4040 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
4041 tmp1 &= 0xfffe;
4042 }
4043
4044 bnx2x_cl45_write(bp, params->port,
4045 ext_phy_type,
4046 ext_phy_addr,
4047 MDIO_AN_DEVAD,
4048 MDIO_AN_REG_8073_2_5G, tmp1);
4049 }
4050
4051 /* Add support for CL37 (passive mode) II */
4052
4053 bnx2x_cl45_read(bp, params->port,
4054 ext_phy_type,
4055 ext_phy_addr,
4056 MDIO_AN_DEVAD,
4057 MDIO_AN_REG_CL37_FC_LD,
4058 &tmp1);
4059
4060 bnx2x_cl45_write(bp, params->port,
4061 ext_phy_type,
4062 ext_phy_addr,
4063 MDIO_AN_DEVAD,
4064 MDIO_AN_REG_CL37_FC_LD, (tmp1 |
4065 ((params->req_duplex == DUPLEX_FULL) ?
4066 0x20 : 0x40)));
4067
4068 /* Add support for CL37 (passive mode) III */
4069 bnx2x_cl45_write(bp, params->port,
4070 ext_phy_type,
4071 ext_phy_addr,
4072 MDIO_AN_DEVAD,
4073 MDIO_AN_REG_CL37_AN, 0x1000);
4074
4075 if (ext_phy_type ==
4076 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
4077 /* The SNR will improve about 2db by changing
4078 BW and FEE main tap. Rest commands are executed
4079 after link is up*/
4080 /*Change FFE main cursor to 5 in EDC register*/
4081 if (bnx2x_8073_is_snr_needed(params))
4082 bnx2x_cl45_write(bp, params->port,
4083 ext_phy_type,
4084 ext_phy_addr,
4085 MDIO_PMA_DEVAD,
4086 MDIO_PMA_REG_EDC_FFE_MAIN,
4087 0xFB0C);
4088
4089 /* Enable FEC (Forware Error Correction)
4090 Request in the AN */
4091 bnx2x_cl45_read(bp, params->port,
4092 ext_phy_type,
4093 ext_phy_addr,
4094 MDIO_AN_DEVAD,
4095 MDIO_AN_REG_ADV2, &tmp1);
4096
4097 tmp1 |= (1<<15);
4098
4099 bnx2x_cl45_write(bp, params->port,
4100 ext_phy_type,
4101 ext_phy_addr,
4102 MDIO_AN_DEVAD,
4103 MDIO_AN_REG_ADV2, tmp1);
4104
4105 }
4106
4107 bnx2x_ext_phy_set_pause(params, vars);
4108
4109 /* Restart autoneg */
4110 msleep(500);
4111 bnx2x_cl45_write(bp, params->port,
4112 ext_phy_type,
4113 ext_phy_addr,
4114 MDIO_AN_DEVAD,
4115 MDIO_AN_REG_CTRL, 0x1200);
4116 DP(NETIF_MSG_LINK, "807x Autoneg Restart: "
4117 "Advertise 1G=%x, 10G=%x\n",
4118 ((val & (1<<5)) > 0),
4119 ((val & (1<<7)) > 0));
4120 break;
4121 }
4122
4123 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4124 {
4125 u16 tmp1;
4126 u16 rx_alarm_ctrl_val;
4127 u16 lasi_ctrl_val;
4128
4129 /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
4130
4131 u16 mod_abs;
4132 rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
4133 lasi_ctrl_val = 0x0004;
4134
4135 DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
4136 /* enable LASI */
4137 bnx2x_cl45_write(bp, params->port,
4138 ext_phy_type,
4139 ext_phy_addr,
4140 MDIO_PMA_DEVAD,
4141 MDIO_PMA_REG_RX_ALARM_CTRL,
4142 rx_alarm_ctrl_val);
4143
4144 bnx2x_cl45_write(bp, params->port,
4145 ext_phy_type,
4146 ext_phy_addr,
4147 MDIO_PMA_DEVAD,
4148 MDIO_PMA_REG_LASI_CTRL,
4149 lasi_ctrl_val);
4150
4151 /* Initially configure MOD_ABS to interrupt when
4152 module is presence( bit 8) */
4153 bnx2x_cl45_read(bp, params->port,
4154 ext_phy_type,
4155 ext_phy_addr,
4156 MDIO_PMA_DEVAD,
4157 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4158 /* Set EDC off by setting OPTXLOS signal input to low
4159 (bit 9).
4160 When the EDC is off it locks onto a reference clock and
4161 avoids becoming 'lost'.*/
4162 mod_abs &= ~((1<<8) | (1<<9));
4163 bnx2x_cl45_write(bp, params->port,
4164 ext_phy_type,
4165 ext_phy_addr,
4166 MDIO_PMA_DEVAD,
4167 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4168
4169 /* Make MOD_ABS give interrupt on change */
4170 bnx2x_cl45_read(bp, params->port,
4171 ext_phy_type,
4172 ext_phy_addr,
4173 MDIO_PMA_DEVAD,
4174 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4175 &val);
4176 val |= (1<<12);
4177 bnx2x_cl45_write(bp, params->port,
4178 ext_phy_type,
4179 ext_phy_addr,
4180 MDIO_PMA_DEVAD,
4181 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4182 val);
4183
4184 /* Set 8727 GPIOs to input to allow reading from the
4185 8727 GPIO0 status which reflect SFP+ module
4186 over-current */
4187
4188 bnx2x_cl45_read(bp, params->port,
4189 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4190 ext_phy_addr,
4191 MDIO_PMA_DEVAD,
4192 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4193 &val);
4194 val &= 0xff8f; /* Reset bits 4-6 */
4195 bnx2x_cl45_write(bp, params->port,
4196 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4197 ext_phy_addr,
4198 MDIO_PMA_DEVAD,
4199 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
4200 val);
4201
4202 bnx2x_8727_power_module(bp, params, ext_phy_addr, 1);
4203 bnx2x_bcm8073_set_xaui_low_power_mode(params);
4204
4205 bnx2x_cl45_read(bp, params->port,
4206 ext_phy_type,
4207 ext_phy_addr,
4208 MDIO_PMA_DEVAD,
4209 MDIO_PMA_REG_M8051_MSGOUT_REG,
4210 &tmp1);
4211
4212 bnx2x_cl45_read(bp, params->port,
4213 ext_phy_type,
4214 ext_phy_addr,
4215 MDIO_PMA_DEVAD,
4216 MDIO_PMA_REG_RX_ALARM, &tmp1);
4217
4218 /* Set option 1G speed */
4219 if (params->req_line_speed == SPEED_1000) {
4220
4221 DP(NETIF_MSG_LINK, "Setting 1G force\n");
4222 bnx2x_cl45_write(bp, params->port,
4223 ext_phy_type,
4224 ext_phy_addr,
4225 MDIO_PMA_DEVAD,
4226 MDIO_PMA_REG_CTRL, 0x40);
4227 bnx2x_cl45_write(bp, params->port,
4228 ext_phy_type,
4229 ext_phy_addr,
4230 MDIO_PMA_DEVAD,
4231 MDIO_PMA_REG_10G_CTRL2, 0xD);
4232 bnx2x_cl45_read(bp, params->port,
4233 ext_phy_type,
4234 ext_phy_addr,
4235 MDIO_PMA_DEVAD,
4236 MDIO_PMA_REG_10G_CTRL2, &tmp1);
4237 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
4238
4239 } else if ((params->req_line_speed ==
4240 SPEED_AUTO_NEG) &&
4241 ((params->speed_cap_mask &
4242 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
4243
4244 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
4245 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4246 ext_phy_addr, MDIO_AN_DEVAD,
4247 MDIO_PMA_REG_8727_MISC_CTRL, 0);
4248 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4249 ext_phy_addr, MDIO_AN_DEVAD,
4250 MDIO_AN_REG_CL37_AN, 0x1300);
4251 } else {
4252 /* Since the 8727 has only single reset pin,
4253 need to set the 10G registers although it is
4254 default */
4255 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4256 ext_phy_addr, MDIO_AN_DEVAD,
4257 MDIO_AN_REG_CTRL, 0x0020);
4258 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4259 ext_phy_addr, MDIO_AN_DEVAD,
4260 0x7, 0x0100);
4261 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4262 ext_phy_addr, MDIO_PMA_DEVAD,
4263 MDIO_PMA_REG_CTRL, 0x2040);
4264 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4265 ext_phy_addr, MDIO_PMA_DEVAD,
4266 MDIO_PMA_REG_10G_CTRL2, 0x0008);
4267 }
4268
4269 /* Set 2-wire transfer rate of SFP+ module EEPROM
4270 * to 100Khz since some DACs(direct attached cables) do
4271 * not work at 400Khz.
4272 */
4273 bnx2x_cl45_write(bp, params->port,
4274 ext_phy_type,
4275 ext_phy_addr,
4276 MDIO_PMA_DEVAD,
4277 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
4278 0xa001);
4279
4280 /* Set TX PreEmphasis if needed */
4281 if ((params->feature_config_flags &
4282 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
4283 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
4284 "TX_CTRL2 0x%x\n",
4285 params->xgxs_config_tx[0],
4286 params->xgxs_config_tx[1]);
4287 bnx2x_cl45_write(bp, params->port,
4288 ext_phy_type,
4289 ext_phy_addr,
4290 MDIO_PMA_DEVAD,
4291 MDIO_PMA_REG_8727_TX_CTRL1,
4292 params->xgxs_config_tx[0]);
4293
4294 bnx2x_cl45_write(bp, params->port,
4295 ext_phy_type,
4296 ext_phy_addr,
4297 MDIO_PMA_DEVAD,
4298 MDIO_PMA_REG_8727_TX_CTRL2,
4299 params->xgxs_config_tx[1]);
4300 }
4301
4302 break;
4303 }
4304
4305 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4306 {
4307 u16 fw_ver1, fw_ver2;
4308 DP(NETIF_MSG_LINK,
4309 "Setting the SFX7101 LASI indication\n");
4310
4311 bnx2x_cl45_write(bp, params->port,
4312 ext_phy_type,
4313 ext_phy_addr,
4314 MDIO_PMA_DEVAD,
4315 MDIO_PMA_REG_LASI_CTRL, 0x1);
4316 DP(NETIF_MSG_LINK,
4317 "Setting the SFX7101 LED to blink on traffic\n");
4318 bnx2x_cl45_write(bp, params->port,
4319 ext_phy_type,
4320 ext_phy_addr,
4321 MDIO_PMA_DEVAD,
4322 MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
4323
4324 bnx2x_ext_phy_set_pause(params, vars);
4325 /* Restart autoneg */
4326 bnx2x_cl45_read(bp, params->port,
4327 ext_phy_type,
4328 ext_phy_addr,
4329 MDIO_AN_DEVAD,
4330 MDIO_AN_REG_CTRL, &val);
4331 val |= 0x200;
4332 bnx2x_cl45_write(bp, params->port,
4333 ext_phy_type,
4334 ext_phy_addr,
4335 MDIO_AN_DEVAD,
4336 MDIO_AN_REG_CTRL, val);
4337
4338 /* Save spirom version */
4339 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4340 ext_phy_addr, MDIO_PMA_DEVAD,
4341 MDIO_PMA_REG_7101_VER1, &fw_ver1);
4342
4343 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4344 ext_phy_addr, MDIO_PMA_DEVAD,
4345 MDIO_PMA_REG_7101_VER2, &fw_ver2);
4346
4347 bnx2x_save_spirom_version(params->bp, params->port,
4348 params->shmem_base,
4349 (u32)(fw_ver1<<16 | fw_ver2));
4350 break;
4351 } 4858 }
4352 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 4859 } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
4353 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: 4860 ((phy->speed_cap_mask &
4354 /* This phy uses the NIG latch mechanism since link 4861 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
4355 indication arrives through its LED4 and not via 4862 ((phy->speed_cap_mask &
4356 its LASI signal, so we get steady signal 4863 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
4357 instead of clear on read */ 4864 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
4358 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 4865
4359 1 << NIG_LATCH_BC_ENABLE_MI_INT); 4866 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
4360 4867 bnx2x_cl45_write(bp, phy,
4361 bnx2x_cl45_write(bp, params->port, 4868 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
4362 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 4869 bnx2x_cl45_write(bp, phy,
4363 ext_phy_addr, 4870 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
4364 MDIO_PMA_DEVAD, 4871 } else {
4365 MDIO_PMA_REG_CTRL, 0x0000); 4872 /**
4366 4873 * Since the 8727 has only single reset pin, need to set the 10G
4367 bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr); 4874 * registers although it is default
4368 if (params->req_line_speed == SPEED_AUTO_NEG) { 4875 */
4369 4876 bnx2x_cl45_write(bp, phy,
4370 u16 autoneg_val, an_1000_val, an_10_100_val; 4877 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
4371 /* set 1000 speed advertisement */ 4878 0x0020);
4372 bnx2x_cl45_read(bp, params->port, 4879 bnx2x_cl45_write(bp, phy,
4373 ext_phy_type, 4880 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
4374 ext_phy_addr, 4881 bnx2x_cl45_write(bp, phy,
4375 MDIO_AN_DEVAD, 4882 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
4376 MDIO_AN_REG_8481_1000T_CTRL, 4883 bnx2x_cl45_write(bp, phy,
4377 &an_1000_val); 4884 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
4378 4885 0x0008);
4379 if (params->speed_cap_mask & 4886 }
4380 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
4381 an_1000_val |= (1<<8);
4382 if (params->req_duplex == DUPLEX_FULL)
4383 an_1000_val |= (1<<9);
4384 DP(NETIF_MSG_LINK, "Advertising 1G\n");
4385 } else
4386 an_1000_val &= ~((1<<8) | (1<<9));
4387
4388 bnx2x_cl45_write(bp, params->port,
4389 ext_phy_type,
4390 ext_phy_addr,
4391 MDIO_AN_DEVAD,
4392 MDIO_AN_REG_8481_1000T_CTRL,
4393 an_1000_val);
4394
4395 /* set 100 speed advertisement */
4396 bnx2x_cl45_read(bp, params->port,
4397 ext_phy_type,
4398 ext_phy_addr,
4399 MDIO_AN_DEVAD,
4400 MDIO_AN_REG_8481_LEGACY_AN_ADV,
4401 &an_10_100_val);
4402
4403 if (params->speed_cap_mask &
4404 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
4405 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) {
4406 an_10_100_val |= (1<<7);
4407 if (params->req_duplex == DUPLEX_FULL)
4408 an_10_100_val |= (1<<8);
4409 DP(NETIF_MSG_LINK,
4410 "Advertising 100M\n");
4411 } else
4412 an_10_100_val &= ~((1<<7) | (1<<8));
4413
4414 /* set 10 speed advertisement */
4415 if (params->speed_cap_mask &
4416 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
4417 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) {
4418 an_10_100_val |= (1<<5);
4419 if (params->req_duplex == DUPLEX_FULL)
4420 an_10_100_val |= (1<<6);
4421 DP(NETIF_MSG_LINK, "Advertising 10M\n");
4422 }
4423 else
4424 an_10_100_val &= ~((1<<5) | (1<<6));
4425
4426 bnx2x_cl45_write(bp, params->port,
4427 ext_phy_type,
4428 ext_phy_addr,
4429 MDIO_AN_DEVAD,
4430 MDIO_AN_REG_8481_LEGACY_AN_ADV,
4431 an_10_100_val);
4432
4433 bnx2x_cl45_read(bp, params->port,
4434 ext_phy_type,
4435 ext_phy_addr,
4436 MDIO_AN_DEVAD,
4437 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4438 &autoneg_val);
4439
4440 /* Disable forced speed */
4441 autoneg_val &= ~(1<<6|1<<13);
4442
4443 /* Enable autoneg and restart autoneg
4444 for legacy speeds */
4445 autoneg_val |= (1<<9|1<<12);
4446
4447 if (params->req_duplex == DUPLEX_FULL)
4448 autoneg_val |= (1<<8);
4449 else
4450 autoneg_val &= ~(1<<8);
4451
4452 bnx2x_cl45_write(bp, params->port,
4453 ext_phy_type,
4454 ext_phy_addr,
4455 MDIO_AN_DEVAD,
4456 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4457 autoneg_val);
4458
4459 if (params->speed_cap_mask &
4460 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
4461 DP(NETIF_MSG_LINK, "Advertising 10G\n");
4462 /* Restart autoneg for 10G*/
4463
4464 bnx2x_cl45_write(bp, params->port,
4465 ext_phy_type,
4466 ext_phy_addr,
4467 MDIO_AN_DEVAD,
4468 MDIO_AN_REG_CTRL, 0x3200);
4469 }
4470 } else {
4471 /* Force speed */
4472 u16 autoneg_ctrl, pma_ctrl;
4473 bnx2x_cl45_read(bp, params->port,
4474 ext_phy_type,
4475 ext_phy_addr,
4476 MDIO_AN_DEVAD,
4477 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4478 &autoneg_ctrl);
4479
4480 /* Disable autoneg */
4481 autoneg_ctrl &= ~(1<<12);
4482
4483 /* Set 1000 force */
4484 switch (params->req_line_speed) {
4485 case SPEED_10000:
4486 DP(NETIF_MSG_LINK,
4487 "Unable to set 10G force !\n");
4488 break;
4489 case SPEED_1000:
4490 bnx2x_cl45_read(bp, params->port,
4491 ext_phy_type,
4492 ext_phy_addr,
4493 MDIO_PMA_DEVAD,
4494 MDIO_PMA_REG_CTRL,
4495 &pma_ctrl);
4496 autoneg_ctrl &= ~(1<<13);
4497 autoneg_ctrl |= (1<<6);
4498 pma_ctrl &= ~(1<<13);
4499 pma_ctrl |= (1<<6);
4500 DP(NETIF_MSG_LINK,
4501 "Setting 1000M force\n");
4502 bnx2x_cl45_write(bp, params->port,
4503 ext_phy_type,
4504 ext_phy_addr,
4505 MDIO_PMA_DEVAD,
4506 MDIO_PMA_REG_CTRL,
4507 pma_ctrl);
4508 break;
4509 case SPEED_100:
4510 autoneg_ctrl |= (1<<13);
4511 autoneg_ctrl &= ~(1<<6);
4512 DP(NETIF_MSG_LINK,
4513 "Setting 100M force\n");
4514 break;
4515 case SPEED_10:
4516 autoneg_ctrl &= ~(1<<13);
4517 autoneg_ctrl &= ~(1<<6);
4518 DP(NETIF_MSG_LINK,
4519 "Setting 10M force\n");
4520 break;
4521 }
4522
4523 /* Duplex mode */
4524 if (params->req_duplex == DUPLEX_FULL) {
4525 autoneg_ctrl |= (1<<8);
4526 DP(NETIF_MSG_LINK,
4527 "Setting full duplex\n");
4528 } else
4529 autoneg_ctrl &= ~(1<<8);
4530
4531 /* Update autoneg ctrl and pma ctrl */
4532 bnx2x_cl45_write(bp, params->port,
4533 ext_phy_type,
4534 ext_phy_addr,
4535 MDIO_AN_DEVAD,
4536 MDIO_AN_REG_8481_LEGACY_MII_CTRL,
4537 autoneg_ctrl);
4538 }
4539
4540 /* Save spirom version */
4541 bnx2x_save_8481_spirom_version(bp, params->port,
4542 ext_phy_addr,
4543 params->shmem_base);
4544 break;
4545 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
4546 DP(NETIF_MSG_LINK,
4547 "XGXS PHY Failure detected 0x%x\n",
4548 params->ext_phy_config);
4549 rc = -EINVAL;
4550 break;
4551 default:
4552 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
4553 params->ext_phy_config);
4554 rc = -EINVAL;
4555 break;
4556 }
4557
4558 } else { /* SerDes */
4559
4560 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
4561 switch (ext_phy_type) {
4562 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
4563 DP(NETIF_MSG_LINK, "SerDes Direct\n");
4564 break;
4565
4566 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
4567 DP(NETIF_MSG_LINK, "SerDes 5482\n");
4568 break;
4569 4887
4570 default: 4888 /* Set 2-wire transfer rate of SFP+ module EEPROM
4571 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n", 4889 * to 100Khz since some DACs(direct attached cables) do
4572 params->ext_phy_config); 4890 * not work at 400Khz.
4573 break; 4891 */
4574 } 4892 bnx2x_cl45_write(bp, phy,
4893 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
4894 0xa001);
4895
4896 /* Set TX PreEmphasis if needed */
4897 if ((params->feature_config_flags &
4898 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
4899 DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
4900 phy->tx_preemphasis[0],
4901 phy->tx_preemphasis[1]);
4902 bnx2x_cl45_write(bp, phy,
4903 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
4904 phy->tx_preemphasis[0]);
4905
4906 bnx2x_cl45_write(bp, phy,
4907 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
4908 phy->tx_preemphasis[1]);
4575 } 4909 }
4576 return rc; 4910
4911 return 0;
4577} 4912}
4578 4913
4579static void bnx2x_8727_handle_mod_abs(struct link_params *params) 4914static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
4915 struct link_params *params)
4580{ 4916{
4581 struct bnx2x *bp = params->bp; 4917 struct bnx2x *bp = params->bp;
4582 u16 mod_abs, rx_alarm_status; 4918 u16 mod_abs, rx_alarm_status;
4583 u8 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config);
4584 u32 val = REG_RD(bp, params->shmem_base + 4919 u32 val = REG_RD(bp, params->shmem_base +
4585 offsetof(struct shmem_region, dev_info. 4920 offsetof(struct shmem_region, dev_info.
4586 port_feature_config[params->port]. 4921 port_feature_config[params->port].
4587 config)); 4922 config));
4588 bnx2x_cl45_read(bp, params->port, 4923 bnx2x_cl45_read(bp, phy,
4589 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4590 ext_phy_addr,
4591 MDIO_PMA_DEVAD, 4924 MDIO_PMA_DEVAD,
4592 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 4925 MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
4593 if (mod_abs & (1<<8)) { 4926 if (mod_abs & (1<<8)) {
@@ -4602,18 +4935,16 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
4602 (bit 9). 4935 (bit 9).
4603 When the EDC is off it locks onto a reference clock and 4936 When the EDC is off it locks onto a reference clock and
4604 avoids becoming 'lost'.*/ 4937 avoids becoming 'lost'.*/
4605 mod_abs &= ~((1<<8)|(1<<9)); 4938 mod_abs &= ~(1<<8);
4606 bnx2x_cl45_write(bp, params->port, 4939 if (!(phy->flags & FLAGS_NOC))
4607 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 4940 mod_abs &= ~(1<<9);
4608 ext_phy_addr, 4941 bnx2x_cl45_write(bp, phy,
4609 MDIO_PMA_DEVAD, 4942 MDIO_PMA_DEVAD,
4610 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 4943 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4611 4944
4612 /* Clear RX alarm since it stays up as long as 4945 /* Clear RX alarm since it stays up as long as
4613 the mod_abs wasn't changed */ 4946 the mod_abs wasn't changed */
4614 bnx2x_cl45_read(bp, params->port, 4947 bnx2x_cl45_read(bp, phy,
4615 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4616 ext_phy_addr,
4617 MDIO_PMA_DEVAD, 4948 MDIO_PMA_DEVAD,
4618 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); 4949 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4619 4950
@@ -4630,33 +4961,28 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
4630 2. Restore the default polarity of the OPRXLOS signal and 4961 2. Restore the default polarity of the OPRXLOS signal and
4631 this signal will then correctly indicate the presence or 4962 this signal will then correctly indicate the presence or
4632 absence of the Rx signal. (bit 9) */ 4963 absence of the Rx signal. (bit 9) */
4633 mod_abs |= ((1<<8)|(1<<9)); 4964 mod_abs |= (1<<8);
4634 bnx2x_cl45_write(bp, params->port, 4965 if (!(phy->flags & FLAGS_NOC))
4635 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 4966 mod_abs |= (1<<9);
4636 ext_phy_addr, 4967 bnx2x_cl45_write(bp, phy,
4637 MDIO_PMA_DEVAD, 4968 MDIO_PMA_DEVAD,
4638 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 4969 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
4639 4970
4640 /* Clear RX alarm since it stays up as long as 4971 /* Clear RX alarm since it stays up as long as
4641 the mod_abs wasn't changed. This is need to be done 4972 the mod_abs wasn't changed. This is need to be done
4642 before calling the module detection, otherwise it will clear 4973 before calling the module detection, otherwise it will clear
4643 the link update alarm */ 4974 the link update alarm */
4644 bnx2x_cl45_read(bp, params->port, 4975 bnx2x_cl45_read(bp, phy,
4645 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 4976 MDIO_PMA_DEVAD,
4646 ext_phy_addr, 4977 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4647 MDIO_PMA_DEVAD,
4648 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4649 4978
4650 4979
4651 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 4980 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
4652 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 4981 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
4653 bnx2x_sfp_set_transmitter(bp, params->port, 4982 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
4654 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
4655 ext_phy_addr, 0);
4656 4983
4657 if (bnx2x_wait_for_sfp_module_initialized(params) 4984 if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
4658 == 0) 4985 bnx2x_sfp_module_detection(phy, params);
4659 bnx2x_sfp_module_detection(params);
4660 else 4986 else
4661 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 4987 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
4662 } 4988 }
@@ -4667,1298 +4993,1711 @@ static void bnx2x_8727_handle_mod_abs(struct link_params *params)
4667 module plugged in/out */ 4993 module plugged in/out */
4668} 4994}
4669 4995
4996static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
4997 struct link_params *params,
4998 struct link_vars *vars)
4670 4999
4671static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
4672 struct link_vars *vars,
4673 u8 is_mi_int)
4674{ 5000{
4675 struct bnx2x *bp = params->bp; 5001 struct bnx2x *bp = params->bp;
4676 u32 ext_phy_type; 5002 u8 link_up = 0;
4677 u8 ext_phy_addr; 5003 u16 link_status = 0;
4678 u16 val1 = 0, val2; 5004 u16 rx_alarm_status, lasi_ctrl, val1;
4679 u16 rx_sd, pcs_status; 5005
4680 u8 ext_phy_link_up = 0; 5006 /* If PHY is not initialized, do not check link status */
4681 u8 port = params->port; 5007 bnx2x_cl45_read(bp, phy,
5008 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL,
5009 &lasi_ctrl);
5010 if (!lasi_ctrl)
5011 return 0;
4682 5012
4683 if (vars->phy_flags & PHY_XGXS_FLAG) { 5013 /* Check the LASI */
4684 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 5014 bnx2x_cl45_read(bp, phy,
4685 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5015 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
4686 switch (ext_phy_type) { 5016 &rx_alarm_status);
4687 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 5017 vars->line_speed = 0;
4688 DP(NETIF_MSG_LINK, "XGXS Direct\n"); 5018 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status);
4689 ext_phy_link_up = 1;
4690 break;
4691 5019
4692 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 5020 bnx2x_cl45_read(bp, phy,
4693 DP(NETIF_MSG_LINK, "XGXS 8705\n"); 5021 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
4694 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4695 ext_phy_addr,
4696 MDIO_WIS_DEVAD,
4697 MDIO_WIS_REG_LASI_STATUS, &val1);
4698 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4699
4700 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4701 ext_phy_addr,
4702 MDIO_WIS_DEVAD,
4703 MDIO_WIS_REG_LASI_STATUS, &val1);
4704 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
4705
4706 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4707 ext_phy_addr,
4708 MDIO_PMA_DEVAD,
4709 MDIO_PMA_REG_RX_SD, &rx_sd);
4710
4711 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4712 ext_phy_addr,
4713 1,
4714 0xc809, &val1);
4715 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4716 ext_phy_addr,
4717 1,
4718 0xc809, &val1);
4719
4720 DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
4721 ext_phy_link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) &&
4722 ((val1 & (1<<8)) == 0));
4723 if (ext_phy_link_up)
4724 vars->line_speed = SPEED_10000;
4725 break;
4726 5022
4727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 5023 DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
4728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4729 DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
4730 /* Clear RX Alarm*/
4731 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4732 ext_phy_addr,
4733 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM,
4734 &val2);
4735 /* clear LASI indication*/
4736 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4737 ext_phy_addr,
4738 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
4739 &val1);
4740 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4741 ext_phy_addr,
4742 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS,
4743 &val2);
4744 DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x-->"
4745 "0x%x\n", val1, val2);
4746
4747 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4748 ext_phy_addr,
4749 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD,
4750 &rx_sd);
4751 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4752 ext_phy_addr,
4753 MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS,
4754 &pcs_status);
4755 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4756 ext_phy_addr,
4757 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
4758 &val2);
4759 bnx2x_cl45_read(bp, params->port, ext_phy_type,
4760 ext_phy_addr,
4761 MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS,
4762 &val2);
4763
4764 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x"
4765 " pcs_status 0x%x 1Gbps link_status 0x%x\n",
4766 rx_sd, pcs_status, val2);
4767 /* link is up if both bit 0 of pmd_rx_sd and
4768 * bit 0 of pcs_status are set, or if the autoneg bit
4769 1 is set
4770 */
4771 ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
4772 (val2 & (1<<1)));
4773 if (ext_phy_link_up) {
4774 if (ext_phy_type ==
4775 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
4776 /* If transmitter is disabled,
4777 ignore false link up indication */
4778 bnx2x_cl45_read(bp, params->port,
4779 ext_phy_type,
4780 ext_phy_addr,
4781 MDIO_PMA_DEVAD,
4782 MDIO_PMA_REG_PHY_IDENTIFIER,
4783 &val1);
4784 if (val1 & (1<<15)) {
4785 DP(NETIF_MSG_LINK, "Tx is "
4786 "disabled\n");
4787 ext_phy_link_up = 0;
4788 break;
4789 }
4790 }
4791 if (val2 & (1<<1))
4792 vars->line_speed = SPEED_1000;
4793 else
4794 vars->line_speed = SPEED_10000;
4795 }
4796 break;
4797
4798 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4799 {
4800 u16 link_status = 0;
4801 u16 rx_alarm_status;
4802 /* Check the LASI */
4803 bnx2x_cl45_read(bp, params->port,
4804 ext_phy_type,
4805 ext_phy_addr,
4806 MDIO_PMA_DEVAD,
4807 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4808
4809 DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
4810 rx_alarm_status);
4811
4812 bnx2x_cl45_read(bp, params->port,
4813 ext_phy_type,
4814 ext_phy_addr,
4815 MDIO_PMA_DEVAD,
4816 MDIO_PMA_REG_LASI_STATUS, &val1);
4817 5024
4818 DP(NETIF_MSG_LINK, 5025 /* Clear MSG-OUT */
4819 "8727 LASI status 0x%x\n", 5026 bnx2x_cl45_read(bp, phy,
4820 val1); 5027 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
4821 5028
4822 /* Clear MSG-OUT */ 5029 /**
4823 bnx2x_cl45_read(bp, params->port, 5030 * If a module is present and there is need to check
4824 ext_phy_type, 5031 * for over current
4825 ext_phy_addr, 5032 */
4826 MDIO_PMA_DEVAD, 5033 if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
4827 MDIO_PMA_REG_M8051_MSGOUT_REG, 5034 /* Check over-current using 8727 GPIO0 input*/
4828 &val1); 5035 bnx2x_cl45_read(bp, phy,
5036 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
5037 &val1);
5038
5039 if ((val1 & (1<<8)) == 0) {
5040 DP(NETIF_MSG_LINK, "8727 Power fault has been detected"
5041 " on port %d\n", params->port);
5042 netdev_err(bp->dev, "Error: Power fault on Port %d has"
5043 " been detected and the power to "
5044 "that SFP+ module has been removed"
5045 " to prevent failure of the card."
5046 " Please remove the SFP+ module and"
5047 " restart the system to clear this"
5048 " error.\n",
5049 params->port);
4829 5050
4830 /* 5051 /*
4831 * If a module is present and there is need to check 5052 * Disable all RX_ALARMs except for
4832 * for over current 5053 * mod_abs
4833 */ 5054 */
4834 if (!(params->feature_config_flags & 5055 bnx2x_cl45_write(bp, phy,
4835 FEATURE_CONFIG_BCM8727_NOC) && 5056 MDIO_PMA_DEVAD,
4836 !(rx_alarm_status & (1<<5))) { 5057 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
4837 /* Check over-current using 8727 GPIO0 input*/
4838 bnx2x_cl45_read(bp, params->port,
4839 ext_phy_type,
4840 ext_phy_addr,
4841 MDIO_PMA_DEVAD,
4842 MDIO_PMA_REG_8727_GPIO_CTRL,
4843 &val1);
4844
4845 if ((val1 & (1<<8)) == 0) {
4846 DP(NETIF_MSG_LINK, "8727 Power fault"
4847 " has been detected on "
4848 "port %d\n",
4849 params->port);
4850 netdev_err(bp->dev, "Error: Power fault on Port %d has been detected and the power to that SFP+ module has been removed to prevent failure of the card. Please remove the SFP+ module and restart the system to clear this error.\n",
4851 params->port);
4852 /*
4853 * Disable all RX_ALARMs except for
4854 * mod_abs
4855 */
4856 bnx2x_cl45_write(bp, params->port,
4857 ext_phy_type,
4858 ext_phy_addr,
4859 MDIO_PMA_DEVAD,
4860 MDIO_PMA_REG_RX_ALARM_CTRL,
4861 (1<<5));
4862
4863 bnx2x_cl45_read(bp, params->port,
4864 ext_phy_type,
4865 ext_phy_addr,
4866 MDIO_PMA_DEVAD,
4867 MDIO_PMA_REG_PHY_IDENTIFIER,
4868 &val1);
4869 /* Wait for module_absent_event */
4870 val1 |= (1<<8);
4871 bnx2x_cl45_write(bp, params->port,
4872 ext_phy_type,
4873 ext_phy_addr,
4874 MDIO_PMA_DEVAD,
4875 MDIO_PMA_REG_PHY_IDENTIFIER,
4876 val1);
4877 /* Clear RX alarm */
4878 bnx2x_cl45_read(bp, params->port,
4879 ext_phy_type,
4880 ext_phy_addr,
4881 MDIO_PMA_DEVAD,
4882 MDIO_PMA_REG_RX_ALARM,
4883 &rx_alarm_status);
4884 break;
4885 }
4886 } /* Over current check */
4887
4888 /* When module absent bit is set, check module */
4889 if (rx_alarm_status & (1<<5)) {
4890 bnx2x_8727_handle_mod_abs(params);
4891 /* Enable all mod_abs and link detection bits */
4892 bnx2x_cl45_write(bp, params->port,
4893 ext_phy_type,
4894 ext_phy_addr,
4895 MDIO_PMA_DEVAD,
4896 MDIO_PMA_REG_RX_ALARM_CTRL,
4897 ((1<<5) | (1<<2)));
4898 }
4899
4900 /* If transmitter is disabled,
4901 ignore false link up indication */
4902 bnx2x_cl45_read(bp, params->port,
4903 ext_phy_type,
4904 ext_phy_addr,
4905 MDIO_PMA_DEVAD,
4906 MDIO_PMA_REG_PHY_IDENTIFIER,
4907 &val1);
4908 if (val1 & (1<<15)) {
4909 DP(NETIF_MSG_LINK, "Tx is disabled\n");
4910 ext_phy_link_up = 0;
4911 break;
4912 }
4913 5058
4914 bnx2x_cl45_read(bp, params->port, 5059 bnx2x_cl45_read(bp, phy,
4915 ext_phy_type, 5060 MDIO_PMA_DEVAD,
4916 ext_phy_addr, 5061 MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
4917 MDIO_PMA_DEVAD, 5062 /* Wait for module_absent_event */
4918 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 5063 val1 |= (1<<8);
4919 &link_status); 5064 bnx2x_cl45_write(bp, phy,
4920 5065 MDIO_PMA_DEVAD,
4921 /* Bits 0..2 --> speed detected, 5066 MDIO_PMA_REG_PHY_IDENTIFIER, val1);
4922 bits 13..15--> link is down */ 5067 /* Clear RX alarm */
4923 if ((link_status & (1<<2)) && 5068 bnx2x_cl45_read(bp, phy,
4924 (!(link_status & (1<<15)))) { 5069 MDIO_PMA_DEVAD,
4925 ext_phy_link_up = 1; 5070 MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
4926 vars->line_speed = SPEED_10000; 5071 return 0;
4927 } else if ((link_status & (1<<0)) &&
4928 (!(link_status & (1<<13)))) {
4929 ext_phy_link_up = 1;
4930 vars->line_speed = SPEED_1000;
4931 DP(NETIF_MSG_LINK,
4932 "port %x: External link"
4933 " up in 1G\n", params->port);
4934 } else {
4935 ext_phy_link_up = 0;
4936 DP(NETIF_MSG_LINK,
4937 "port %x: External link"
4938 " is down\n", params->port);
4939 }
4940 break;
4941 } 5072 }
5073 } /* Over current check */
5074
5075 /* When module absent bit is set, check module */
5076 if (rx_alarm_status & (1<<5)) {
5077 bnx2x_8727_handle_mod_abs(phy, params);
5078 /* Enable all mod_abs and link detection bits */
5079 bnx2x_cl45_write(bp, phy,
5080 MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL,
5081 ((1<<5) | (1<<2)));
5082 }
5083 DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
5084 bnx2x_8727_specific_func(phy, params, ENABLE_TX);
5085 /* If transmitter is disabled, ignore false link up indication */
5086 bnx2x_cl45_read(bp, phy,
5087 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
5088 if (val1 & (1<<15)) {
5089 DP(NETIF_MSG_LINK, "Tx is disabled\n");
5090 return 0;
5091 }
4942 5092
4943 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 5093 bnx2x_cl45_read(bp, phy,
4944 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 5094 MDIO_PMA_DEVAD,
4945 { 5095 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
4946 u16 link_status = 0;
4947 u16 an1000_status = 0;
4948
4949 if (ext_phy_type ==
4950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
4951 bnx2x_cl45_read(bp, params->port,
4952 ext_phy_type,
4953 ext_phy_addr,
4954 MDIO_PCS_DEVAD,
4955 MDIO_PCS_REG_LASI_STATUS, &val1);
4956 bnx2x_cl45_read(bp, params->port,
4957 ext_phy_type,
4958 ext_phy_addr,
4959 MDIO_PCS_DEVAD,
4960 MDIO_PCS_REG_LASI_STATUS, &val2);
4961 DP(NETIF_MSG_LINK,
4962 "870x LASI status 0x%x->0x%x\n",
4963 val1, val2);
4964 } else {
4965 /* In 8073, port1 is directed through emac0 and
4966 * port0 is directed through emac1
4967 */
4968 bnx2x_cl45_read(bp, params->port,
4969 ext_phy_type,
4970 ext_phy_addr,
4971 MDIO_PMA_DEVAD,
4972 MDIO_PMA_REG_LASI_STATUS, &val1);
4973
4974 DP(NETIF_MSG_LINK,
4975 "8703 LASI status 0x%x\n",
4976 val1);
4977 }
4978 5096
4979 /* clear the interrupt LASI status register */ 5097 /* Bits 0..2 --> speed detected,
4980 bnx2x_cl45_read(bp, params->port, 5098 bits 13..15--> link is down */
4981 ext_phy_type, 5099 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
4982 ext_phy_addr, 5100 link_up = 1;
4983 MDIO_PCS_DEVAD, 5101 vars->line_speed = SPEED_10000;
4984 MDIO_PCS_REG_STATUS, &val2); 5102 } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
4985 bnx2x_cl45_read(bp, params->port, 5103 link_up = 1;
4986 ext_phy_type, 5104 vars->line_speed = SPEED_1000;
4987 ext_phy_addr, 5105 DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
4988 MDIO_PCS_DEVAD, 5106 params->port);
4989 MDIO_PCS_REG_STATUS, &val1); 5107 } else {
4990 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", 5108 link_up = 0;
4991 val2, val1); 5109 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
4992 /* Clear MSG-OUT */ 5110 params->port);
4993 bnx2x_cl45_read(bp, params->port, 5111 }
4994 ext_phy_type, 5112 if (link_up)
4995 ext_phy_addr, 5113 bnx2x_ext_phy_resolve_fc(phy, params, vars);
4996 MDIO_PMA_DEVAD, 5114
4997 MDIO_PMA_REG_M8051_MSGOUT_REG, 5115 if ((DUAL_MEDIA(params)) &&
4998 &val1); 5116 (phy->req_line_speed == SPEED_1000)) {
4999 5117 bnx2x_cl45_read(bp, phy,
5000 /* Check the LASI */ 5118 MDIO_PMA_DEVAD,
5001 bnx2x_cl45_read(bp, params->port, 5119 MDIO_PMA_REG_8727_PCS_GP, &val1);
5002 ext_phy_type, 5120 /**
5003 ext_phy_addr, 5121 * In case of dual-media board and 1G, power up the XAUI side,
5004 MDIO_PMA_DEVAD, 5122 * otherwise power it down. For 10G it is done automatically
5005 MDIO_PMA_REG_RX_ALARM, &val2); 5123 */
5006 5124 if (link_up)
5007 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2); 5125 val1 &= ~(3<<10);
5008 5126 else
5009 /* Check the link status */ 5127 val1 |= (3<<10);
5010 bnx2x_cl45_read(bp, params->port, 5128 bnx2x_cl45_write(bp, phy,
5011 ext_phy_type, 5129 MDIO_PMA_DEVAD,
5012 ext_phy_addr, 5130 MDIO_PMA_REG_8727_PCS_GP, val1);
5013 MDIO_PCS_DEVAD, 5131 }
5014 MDIO_PCS_REG_STATUS, &val2); 5132 return link_up;
5015 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2); 5133}
5016
5017 bnx2x_cl45_read(bp, params->port,
5018 ext_phy_type,
5019 ext_phy_addr,
5020 MDIO_PMA_DEVAD,
5021 MDIO_PMA_REG_STATUS, &val2);
5022 bnx2x_cl45_read(bp, params->port,
5023 ext_phy_type,
5024 ext_phy_addr,
5025 MDIO_PMA_DEVAD,
5026 MDIO_PMA_REG_STATUS, &val1);
5027 ext_phy_link_up = ((val1 & 4) == 4);
5028 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
5029 if (ext_phy_type ==
5030 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
5031
5032 if (ext_phy_link_up &&
5033 ((params->req_line_speed !=
5034 SPEED_10000))) {
5035 if (bnx2x_bcm8073_xaui_wa(params)
5036 != 0) {
5037 ext_phy_link_up = 0;
5038 break;
5039 }
5040 }
5041 bnx2x_cl45_read(bp, params->port,
5042 ext_phy_type,
5043 ext_phy_addr,
5044 MDIO_AN_DEVAD,
5045 MDIO_AN_REG_LINK_STATUS,
5046 &an1000_status);
5047 bnx2x_cl45_read(bp, params->port,
5048 ext_phy_type,
5049 ext_phy_addr,
5050 MDIO_AN_DEVAD,
5051 MDIO_AN_REG_LINK_STATUS,
5052 &an1000_status);
5053
5054 /* Check the link status on 1.1.2 */
5055 bnx2x_cl45_read(bp, params->port,
5056 ext_phy_type,
5057 ext_phy_addr,
5058 MDIO_PMA_DEVAD,
5059 MDIO_PMA_REG_STATUS, &val2);
5060 bnx2x_cl45_read(bp, params->port,
5061 ext_phy_type,
5062 ext_phy_addr,
5063 MDIO_PMA_DEVAD,
5064 MDIO_PMA_REG_STATUS, &val1);
5065 DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
5066 "an_link_status=0x%x\n",
5067 val2, val1, an1000_status);
5068
5069 ext_phy_link_up = (((val1 & 4) == 4) ||
5070 (an1000_status & (1<<1)));
5071 if (ext_phy_link_up &&
5072 bnx2x_8073_is_snr_needed(params)) {
5073 /* The SNR will improve about 2dbby
5074 changing the BW and FEE main tap.*/
5075
5076 /* The 1st write to change FFE main
5077 tap is set before restart AN */
5078 /* Change PLL Bandwidth in EDC
5079 register */
5080 bnx2x_cl45_write(bp, port, ext_phy_type,
5081 ext_phy_addr,
5082 MDIO_PMA_DEVAD,
5083 MDIO_PMA_REG_PLL_BANDWIDTH,
5084 0x26BC);
5085
5086 /* Change CDR Bandwidth in EDC
5087 register */
5088 bnx2x_cl45_write(bp, port, ext_phy_type,
5089 ext_phy_addr,
5090 MDIO_PMA_DEVAD,
5091 MDIO_PMA_REG_CDR_BANDWIDTH,
5092 0x0333);
5093 }
5094 bnx2x_cl45_read(bp, params->port,
5095 ext_phy_type,
5096 ext_phy_addr,
5097 MDIO_PMA_DEVAD,
5098 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
5099 &link_status);
5100
5101 /* Bits 0..2 --> speed detected,
5102 bits 13..15--> link is down */
5103 if ((link_status & (1<<2)) &&
5104 (!(link_status & (1<<15)))) {
5105 ext_phy_link_up = 1;
5106 vars->line_speed = SPEED_10000;
5107 DP(NETIF_MSG_LINK,
5108 "port %x: External link"
5109 " up in 10G\n", params->port);
5110 } else if ((link_status & (1<<1)) &&
5111 (!(link_status & (1<<14)))) {
5112 ext_phy_link_up = 1;
5113 vars->line_speed = SPEED_2500;
5114 DP(NETIF_MSG_LINK,
5115 "port %x: External link"
5116 " up in 2.5G\n", params->port);
5117 } else if ((link_status & (1<<0)) &&
5118 (!(link_status & (1<<13)))) {
5119 ext_phy_link_up = 1;
5120 vars->line_speed = SPEED_1000;
5121 DP(NETIF_MSG_LINK,
5122 "port %x: External link"
5123 " up in 1G\n", params->port);
5124 } else {
5125 ext_phy_link_up = 0;
5126 DP(NETIF_MSG_LINK,
5127 "port %x: External link"
5128 " is down\n", params->port);
5129 }
5130 } else {
5131 /* See if 1G link is up for the 8072 */
5132 bnx2x_cl45_read(bp, params->port,
5133 ext_phy_type,
5134 ext_phy_addr,
5135 MDIO_AN_DEVAD,
5136 MDIO_AN_REG_LINK_STATUS,
5137 &an1000_status);
5138 bnx2x_cl45_read(bp, params->port,
5139 ext_phy_type,
5140 ext_phy_addr,
5141 MDIO_AN_DEVAD,
5142 MDIO_AN_REG_LINK_STATUS,
5143 &an1000_status);
5144 if (an1000_status & (1<<1)) {
5145 ext_phy_link_up = 1;
5146 vars->line_speed = SPEED_1000;
5147 DP(NETIF_MSG_LINK,
5148 "port %x: External link"
5149 " up in 1G\n", params->port);
5150 } else if (ext_phy_link_up) {
5151 ext_phy_link_up = 1;
5152 vars->line_speed = SPEED_10000;
5153 DP(NETIF_MSG_LINK,
5154 "port %x: External link"
5155 " up in 10G\n", params->port);
5156 }
5157 }
5158 5134
5135static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
5136 struct link_params *params)
5137{
5138 struct bnx2x *bp = params->bp;
5139 /* Disable Transmitter */
5140 bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
5141 /* Clear LASI */
5142 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
5159 5143
5160 break; 5144}
5161 }
5162 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5163 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5164 ext_phy_addr,
5165 MDIO_PMA_DEVAD,
5166 MDIO_PMA_REG_LASI_STATUS, &val2);
5167 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5168 ext_phy_addr,
5169 MDIO_PMA_DEVAD,
5170 MDIO_PMA_REG_LASI_STATUS, &val1);
5171 DP(NETIF_MSG_LINK,
5172 "10G-base-T LASI status 0x%x->0x%x\n",
5173 val2, val1);
5174 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5175 ext_phy_addr,
5176 MDIO_PMA_DEVAD,
5177 MDIO_PMA_REG_STATUS, &val2);
5178 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5179 ext_phy_addr,
5180 MDIO_PMA_DEVAD,
5181 MDIO_PMA_REG_STATUS, &val1);
5182 DP(NETIF_MSG_LINK,
5183 "10G-base-T PMA status 0x%x->0x%x\n",
5184 val2, val1);
5185 ext_phy_link_up = ((val1 & 4) == 4);
5186 /* if link is up
5187 * print the AN outcome of the SFX7101 PHY
5188 */
5189 if (ext_phy_link_up) {
5190 bnx2x_cl45_read(bp, params->port,
5191 ext_phy_type,
5192 ext_phy_addr,
5193 MDIO_AN_DEVAD,
5194 MDIO_AN_REG_MASTER_STATUS,
5195 &val2);
5196 vars->line_speed = SPEED_10000;
5197 DP(NETIF_MSG_LINK,
5198 "SFX7101 AN status 0x%x->Master=%x\n",
5199 val2,
5200 (val2 & (1<<14)));
5201 }
5202 break;
5203 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5204 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5205 /* Check 10G-BaseT link status */
5206 /* Check PMD signal ok */
5207 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5208 ext_phy_addr,
5209 MDIO_AN_DEVAD,
5210 0xFFFA,
5211 &val1);
5212 bnx2x_cl45_read(bp, params->port, ext_phy_type,
5213 ext_phy_addr,
5214 MDIO_PMA_DEVAD,
5215 MDIO_PMA_REG_8481_PMD_SIGNAL,
5216 &val2);
5217 DP(NETIF_MSG_LINK, "PMD_SIGNAL 1.a811 = 0x%x\n", val2);
5218
5219 /* Check link 10G */
5220 if (val2 & (1<<11)) {
5221 vars->line_speed = SPEED_10000;
5222 ext_phy_link_up = 1;
5223 bnx2x_8481_set_10G_led_mode(params,
5224 ext_phy_type,
5225 ext_phy_addr);
5226 } else { /* Check Legacy speed link */
5227 u16 legacy_status, legacy_speed;
5228
5229 /* Enable expansion register 0x42
5230 (Operation mode status) */
5231 bnx2x_cl45_write(bp, params->port,
5232 ext_phy_type,
5233 ext_phy_addr,
5234 MDIO_AN_DEVAD,
5235 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS,
5236 0xf42);
5237
5238 /* Get legacy speed operation status */
5239 bnx2x_cl45_read(bp, params->port,
5240 ext_phy_type,
5241 ext_phy_addr,
5242 MDIO_AN_DEVAD,
5243 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
5244 &legacy_status);
5245
5246 DP(NETIF_MSG_LINK, "Legacy speed status"
5247 " = 0x%x\n", legacy_status);
5248 ext_phy_link_up = ((legacy_status & (1<<11))
5249 == (1<<11));
5250 if (ext_phy_link_up) {
5251 legacy_speed = (legacy_status & (3<<9));
5252 if (legacy_speed == (0<<9))
5253 vars->line_speed = SPEED_10;
5254 else if (legacy_speed == (1<<9))
5255 vars->line_speed =
5256 SPEED_100;
5257 else if (legacy_speed == (2<<9))
5258 vars->line_speed =
5259 SPEED_1000;
5260 else /* Should not happen */
5261 vars->line_speed = 0;
5262
5263 if (legacy_status & (1<<8))
5264 vars->duplex = DUPLEX_FULL;
5265 else
5266 vars->duplex = DUPLEX_HALF;
5267
5268 DP(NETIF_MSG_LINK, "Link is up "
5269 "in %dMbps, is_duplex_full"
5270 "= %d\n",
5271 vars->line_speed,
5272 (vars->duplex == DUPLEX_FULL));
5273 bnx2x_8481_set_legacy_led_mode(params,
5274 ext_phy_type,
5275 ext_phy_addr);
5276 }
5277 }
5278 break;
5279 default:
5280 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
5281 params->ext_phy_config);
5282 ext_phy_link_up = 0;
5283 break;
5284 }
5285 /* Set SGMII mode for external phy */
5286 if (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5287 if (vars->line_speed < SPEED_1000)
5288 vars->phy_flags |= PHY_SGMII_FLAG;
5289 else
5290 vars->phy_flags &= ~PHY_SGMII_FLAG;
5291 }
5292 5145
5293 } else { /* SerDes */ 5146/******************************************************************/
5294 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 5147/* BCM8481/BCM84823/BCM84833 PHY SECTION */
5295 switch (ext_phy_type) { 5148/******************************************************************/
5296 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 5149static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
5297 DP(NETIF_MSG_LINK, "SerDes Direct\n"); 5150 struct link_params *params)
5298 ext_phy_link_up = 1; 5151{
5299 break; 5152 u16 val, fw_ver1, fw_ver2, cnt;
5153 struct bnx2x *bp = params->bp;
5300 5154
5301 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 5155 /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
5302 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 5156 /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
5303 ext_phy_link_up = 1; 5157 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
5158 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
5159 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
5160 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
5161 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
5162
5163 for (cnt = 0; cnt < 100; cnt++) {
5164 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
5165 if (val & 1)
5304 break; 5166 break;
5167 udelay(5);
5168 }
5169 if (cnt == 100) {
5170 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
5171 bnx2x_save_spirom_version(bp, params->port, 0,
5172 phy->ver_addr);
5173 return;
5174 }
5305 5175
5306 default: 5176
5307 DP(NETIF_MSG_LINK, 5177 /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
5308 "BAD SerDes ext_phy_config 0x%x\n", 5178 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
5309 params->ext_phy_config); 5179 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
5310 ext_phy_link_up = 0; 5180 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
5181 for (cnt = 0; cnt < 100; cnt++) {
5182 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
5183 if (val & 1)
5311 break; 5184 break;
5312 } 5185 udelay(5);
5313 } 5186 }
5187 if (cnt == 100) {
5188 DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
5189 bnx2x_save_spirom_version(bp, params->port, 0,
5190 phy->ver_addr);
5191 return;
5192 }
5193
5194 /* lower 16 bits of the register SPI_FW_STATUS */
5195 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
5196 /* upper 16 bits of register SPI_FW_STATUS */
5197 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
5314 5198
5315 return ext_phy_link_up; 5199 bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
5200 phy->ver_addr);
5316} 5201}
5317 5202
5318static void bnx2x_link_int_enable(struct link_params *params) 5203static void bnx2x_848xx_set_led(struct bnx2x *bp,
5204 struct bnx2x_phy *phy)
5319{ 5205{
5320 u8 port = params->port; 5206 u16 val;
5321 u32 ext_phy_type;
5322 u32 mask;
5323 struct bnx2x *bp = params->bp;
5324 5207
5325 /* setting the status to report on link up 5208 /* PHYC_CTL_LED_CTL */
5326 for either XGXS or SerDes */ 5209 bnx2x_cl45_read(bp, phy,
5210 MDIO_PMA_DEVAD,
5211 MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
5212 val &= 0xFE00;
5213 val |= 0x0092;
5214
5215 bnx2x_cl45_write(bp, phy,
5216 MDIO_PMA_DEVAD,
5217 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
5218
5219 bnx2x_cl45_write(bp, phy,
5220 MDIO_PMA_DEVAD,
5221 MDIO_PMA_REG_8481_LED1_MASK,
5222 0x80);
5223
5224 bnx2x_cl45_write(bp, phy,
5225 MDIO_PMA_DEVAD,
5226 MDIO_PMA_REG_8481_LED2_MASK,
5227 0x18);
5228
5229 bnx2x_cl45_write(bp, phy,
5230 MDIO_PMA_DEVAD,
5231 MDIO_PMA_REG_8481_LED3_MASK,
5232 0x0040);
5327 5233
5328 if (params->switch_cfg == SWITCH_CFG_10G) { 5234 /* 'Interrupt Mask' */
5329 mask = (NIG_MASK_XGXS0_LINK10G | 5235 bnx2x_cl45_write(bp, phy,
5330 NIG_MASK_XGXS0_LINK_STATUS); 5236 MDIO_AN_DEVAD,
5331 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n"); 5237 0xFFFB, 0xFFFD);
5332 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5238}
5333 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
5334 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
5335 (ext_phy_type !=
5336 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
5337 mask |= NIG_MASK_MI_INT;
5338 DP(NETIF_MSG_LINK, "enabled external phy int\n");
5339 }
5340 5239
5341 } else { /* SerDes */ 5240static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
5342 mask = NIG_MASK_SERDES0_LINK_STATUS; 5241 struct link_params *params,
5343 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n"); 5242 struct link_vars *vars)
5344 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 5243{
5345 if ((ext_phy_type != 5244 struct bnx2x *bp = params->bp;
5346 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) && 5245 u16 autoneg_val, an_1000_val, an_10_100_val;
5347 (ext_phy_type != 5246 bnx2x_wait_reset_complete(bp, phy);
5348 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) { 5247 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
5349 mask |= NIG_MASK_MI_INT; 5248 1 << NIG_LATCH_BC_ENABLE_MI_INT);
5350 DP(NETIF_MSG_LINK, "enabled external phy int\n"); 5249
5351 } 5250 bnx2x_cl45_write(bp, phy,
5251 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
5252
5253 bnx2x_848xx_set_led(bp, phy);
5254
5255 /* set 1000 speed advertisement */
5256 bnx2x_cl45_read(bp, phy,
5257 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
5258 &an_1000_val);
5259
5260 bnx2x_ext_phy_set_pause(params, phy, vars);
5261 bnx2x_cl45_read(bp, phy,
5262 MDIO_AN_DEVAD,
5263 MDIO_AN_REG_8481_LEGACY_AN_ADV,
5264 &an_10_100_val);
5265 bnx2x_cl45_read(bp, phy,
5266 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
5267 &autoneg_val);
5268 /* Disable forced speed */
5269 autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
5270 an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
5271
5272 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
5273 (phy->speed_cap_mask &
5274 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
5275 (phy->req_line_speed == SPEED_1000)) {
5276 an_1000_val |= (1<<8);
5277 autoneg_val |= (1<<9 | 1<<12);
5278 if (phy->req_duplex == DUPLEX_FULL)
5279 an_1000_val |= (1<<9);
5280 DP(NETIF_MSG_LINK, "Advertising 1G\n");
5281 } else
5282 an_1000_val &= ~((1<<8) | (1<<9));
5283
5284 bnx2x_cl45_write(bp, phy,
5285 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
5286 an_1000_val);
5287
5288 /* set 10 speed advertisement */
5289 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
5290 (phy->speed_cap_mask &
5291 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
5292 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
5293 an_10_100_val |= (1<<7);
5294 /* Enable autoneg and restart autoneg for legacy speeds */
5295 autoneg_val |= (1<<9 | 1<<12);
5296
5297 if (phy->req_duplex == DUPLEX_FULL)
5298 an_10_100_val |= (1<<8);
5299 DP(NETIF_MSG_LINK, "Advertising 100M\n");
5300 }
5301 /* set 10 speed advertisement */
5302 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
5303 (phy->speed_cap_mask &
5304 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
5305 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
5306 an_10_100_val |= (1<<5);
5307 autoneg_val |= (1<<9 | 1<<12);
5308 if (phy->req_duplex == DUPLEX_FULL)
5309 an_10_100_val |= (1<<6);
5310 DP(NETIF_MSG_LINK, "Advertising 10M\n");
5352 } 5311 }
5353 bnx2x_bits_en(bp,
5354 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
5355 mask);
5356 5312
5357 DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port, 5313 /* Only 10/100 are allowed to work in FORCE mode */
5358 (params->switch_cfg == SWITCH_CFG_10G), 5314 if (phy->req_line_speed == SPEED_100) {
5359 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); 5315 autoneg_val |= (1<<13);
5360 DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n", 5316 /* Enabled AUTO-MDIX when autoneg is disabled */
5361 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), 5317 bnx2x_cl45_write(bp, phy,
5362 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), 5318 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
5363 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c)); 5319 (1<<15 | 1<<9 | 7<<0));
5364 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", 5320 DP(NETIF_MSG_LINK, "Setting 100M force\n");
5365 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 5321 }
5366 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 5322 if (phy->req_line_speed == SPEED_10) {
5367} 5323 /* Enabled AUTO-MDIX when autoneg is disabled */
5324 bnx2x_cl45_write(bp, phy,
5325 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
5326 (1<<15 | 1<<9 | 7<<0));
5327 DP(NETIF_MSG_LINK, "Setting 10M force\n");
5328 }
5368 5329
5369static void bnx2x_8481_rearm_latch_signal(struct bnx2x *bp, u8 port, 5330 bnx2x_cl45_write(bp, phy,
5370 u8 is_mi_int) 5331 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
5371{ 5332 an_10_100_val);
5372 u32 latch_status = 0, is_mi_int_status; 5333
5373 /* Disable the MI INT ( external phy int ) 5334 if (phy->req_duplex == DUPLEX_FULL)
5374 * by writing 1 to the status register. Link down indication 5335 autoneg_val |= (1<<8);
5375 * is high-active-signal, so in this case we need to write the 5336
5376 * status to clear the XOR 5337 bnx2x_cl45_write(bp, phy,
5377 */ 5338 MDIO_AN_DEVAD,
5378 /* Read Latched signals */ 5339 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
5379 latch_status = REG_RD(bp, 5340
5380 NIG_REG_LATCH_STATUS_0 + port*8); 5341 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
5381 is_mi_int_status = REG_RD(bp, 5342 (phy->speed_cap_mask &
5382 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4); 5343 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
5383 DP(NETIF_MSG_LINK, "original_signal = 0x%x, nig_status = 0x%x," 5344 (phy->req_line_speed == SPEED_10000)) {
5384 "latch_status = 0x%x\n", 5345 DP(NETIF_MSG_LINK, "Advertising 10G\n");
5385 is_mi_int, is_mi_int_status, latch_status); 5346 /* Restart autoneg for 10G*/
5386 /* Handle only those with latched-signal=up.*/ 5347
5387 if (latch_status & 1) { 5348 bnx2x_cl45_write(bp, phy,
5388 /* For all latched-signal=up,Write original_signal to status */ 5349 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
5389 if (is_mi_int) 5350 0x3200);
5390 bnx2x_bits_en(bp, 5351 } else if (phy->req_line_speed != SPEED_10 &&
5391 NIG_REG_STATUS_INTERRUPT_PORT0 5352 phy->req_line_speed != SPEED_100) {
5392 + port*4, 5353 bnx2x_cl45_write(bp, phy,
5393 NIG_STATUS_EMAC0_MI_INT); 5354 MDIO_AN_DEVAD,
5394 else 5355 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
5395 bnx2x_bits_dis(bp, 5356 1);
5396 NIG_REG_STATUS_INTERRUPT_PORT0
5397 + port*4,
5398 NIG_STATUS_EMAC0_MI_INT);
5399 /* For all latched-signal=up : Re-Arm Latch signals */
5400 REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
5401 (latch_status & 0xfffe) | (latch_status & 1));
5402 } 5357 }
5358 /* Save spirom version */
5359 bnx2x_save_848xx_spirom_version(phy, params);
5360
5361 return 0;
5403} 5362}
5404/* 5363
5405 * link management 5364static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
5406 */ 5365 struct link_params *params,
5407static void bnx2x_link_int_ack(struct link_params *params, 5366 struct link_vars *vars)
5408 struct link_vars *vars, u8 is_10g,
5409 u8 is_mi_int)
5410{ 5367{
5411 struct bnx2x *bp = params->bp; 5368 struct bnx2x *bp = params->bp;
5412 u8 port = params->port; 5369 /* Restore normal power mode*/
5370 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5371 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5413 5372
5414 /* first reset all status 5373 /* HW reset */
5415 * we assume only one line will be change at a time */ 5374 bnx2x_ext_phy_hw_reset(bp, params->port);
5416 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5417 (NIG_STATUS_XGXS0_LINK10G |
5418 NIG_STATUS_XGXS0_LINK_STATUS |
5419 NIG_STATUS_SERDES0_LINK_STATUS));
5420 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5421 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
5422 (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5423 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
5424 bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
5425 }
5426 if (vars->phy_link_up) {
5427 if (is_10g) {
5428 /* Disable the 10G link interrupt
5429 * by writing 1 to the status register
5430 */
5431 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
5432 bnx2x_bits_en(bp,
5433 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5434 NIG_STATUS_XGXS0_LINK10G);
5435 5375
5436 } else if (params->switch_cfg == SWITCH_CFG_10G) { 5376 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
5437 /* Disable the link interrupt 5377 return bnx2x_848xx_cmn_config_init(phy, params, vars);
5438 * by writing 1 to the relevant lane 5378}
5439 * in the status register
5440 */
5441 u32 ser_lane = ((params->lane_config &
5442 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
5443 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
5444 5379
5445 DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n", 5380static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
5446 vars->line_speed); 5381 struct link_params *params,
5447 bnx2x_bits_en(bp, 5382 struct link_vars *vars)
5448 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 5383{
5449 ((1 << ser_lane) << 5384 struct bnx2x *bp = params->bp;
5450 NIG_STATUS_XGXS0_LINK_STATUS_SIZE)); 5385 u8 port = params->port, initialize = 1;
5386 u16 val;
5387 u16 temp;
5388 u32 actual_phy_selection;
5389 u8 rc = 0;
5451 5390
5452 } else { /* SerDes */ 5391 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
5453 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
5454 /* Disable the link interrupt
5455 * by writing 1 to the status register
5456 */
5457 bnx2x_bits_en(bp,
5458 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
5459 NIG_STATUS_SERDES0_LINK_STATUS);
5460 }
5461 5392
5462 } else { /* link_down */ 5393 msleep(1);
5394 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5395 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
5396 port);
5397 msleep(200); /* 100 is not enough */
5398
5399 /* BCM84823 requires that XGXS links up first @ 10G for normal
5400 behavior */
5401 temp = vars->line_speed;
5402 vars->line_speed = SPEED_10000;
5403 bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
5404 bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
5405 vars->line_speed = temp;
5406
5407 /* Set dual-media configuration according to configuration */
5408
5409 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
5410 MDIO_CTL_REG_84823_MEDIA, &val);
5411 val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
5412 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
5413 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
5414 MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
5415 MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
5416 val |= MDIO_CTL_REG_84823_CTRL_MAC_XFI |
5417 MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L;
5418
5419 actual_phy_selection = bnx2x_phy_selection(params);
5420
5421 switch (actual_phy_selection) {
5422 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
5423 /* Do nothing. Essentialy this is like the priority copper */
5424 break;
5425 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
5426 val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
5427 break;
5428 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
5429 val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
5430 break;
5431 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
5432 /* Do nothing here. The first PHY won't be initialized at all */
5433 break;
5434 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
5435 val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
5436 initialize = 0;
5437 break;
5463 } 5438 }
5439 if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
5440 val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
5441
5442 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
5443 MDIO_CTL_REG_84823_MEDIA, val);
5444 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
5445 params->multi_phy_config, val);
5446
5447 if (initialize)
5448 rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
5449 else
5450 bnx2x_save_848xx_spirom_version(phy, params);
5451 return rc;
5464} 5452}
5465 5453
5466static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len) 5454static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
5455 struct link_params *params,
5456 struct link_vars *vars)
5467{ 5457{
5468 u8 *str_ptr = str; 5458 struct bnx2x *bp = params->bp;
5469 u32 mask = 0xf0000000; 5459 u16 val, val1, val2;
5470 u8 shift = 8*4; 5460 u8 link_up = 0;
5471 u8 digit; 5461
5472 if (len < 10) { 5462 /* Check 10G-BaseT link status */
5473 /* Need more than 10chars for this format */ 5463 /* Check PMD signal ok */
5474 *str_ptr = '\0'; 5464 bnx2x_cl45_read(bp, phy,
5475 return -EINVAL; 5465 MDIO_AN_DEVAD, 0xFFFA, &val1);
5476 } 5466 bnx2x_cl45_read(bp, phy,
5477 while (shift > 0) { 5467 MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
5468 &val2);
5469 DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
5470
5471 /* Check link 10G */
5472 if (val2 & (1<<11)) {
5473 vars->line_speed = SPEED_10000;
5474 link_up = 1;
5475 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
5476 } else { /* Check Legacy speed link */
5477 u16 legacy_status, legacy_speed;
5478
5479 /* Enable expansion register 0x42 (Operation mode status) */
5480 bnx2x_cl45_write(bp, phy,
5481 MDIO_AN_DEVAD,
5482 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
5483
5484 /* Get legacy speed operation status */
5485 bnx2x_cl45_read(bp, phy,
5486 MDIO_AN_DEVAD,
5487 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
5488 &legacy_status);
5489
5490 DP(NETIF_MSG_LINK, "Legacy speed status"
5491 " = 0x%x\n", legacy_status);
5492 link_up = ((legacy_status & (1<<11)) == (1<<11));
5493 if (link_up) {
5494 legacy_speed = (legacy_status & (3<<9));
5495 if (legacy_speed == (0<<9))
5496 vars->line_speed = SPEED_10;
5497 else if (legacy_speed == (1<<9))
5498 vars->line_speed = SPEED_100;
5499 else if (legacy_speed == (2<<9))
5500 vars->line_speed = SPEED_1000;
5501 else /* Should not happen */
5502 vars->line_speed = 0;
5478 5503
5479 shift -= 4; 5504 if (legacy_status & (1<<8))
5480 digit = ((num & mask) >> shift); 5505 vars->duplex = DUPLEX_FULL;
5481 if (digit < 0xa) 5506 else
5482 *str_ptr = digit + '0'; 5507 vars->duplex = DUPLEX_HALF;
5483 else 5508
5484 *str_ptr = digit - 0xa + 'a'; 5509 DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
5485 str_ptr++; 5510 " is_duplex_full= %d\n", vars->line_speed,
5486 mask = mask >> 4; 5511 (vars->duplex == DUPLEX_FULL));
5487 if (shift == 4*4) { 5512 /* Check legacy speed AN resolution */
5488 *str_ptr = ':'; 5513 bnx2x_cl45_read(bp, phy,
5489 str_ptr++; 5514 MDIO_AN_DEVAD,
5515 MDIO_AN_REG_8481_LEGACY_MII_STATUS,
5516 &val);
5517 if (val & (1<<5))
5518 vars->link_status |=
5519 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
5520 bnx2x_cl45_read(bp, phy,
5521 MDIO_AN_DEVAD,
5522 MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
5523 &val);
5524 if ((val & (1<<0)) == 0)
5525 vars->link_status |=
5526 LINK_STATUS_PARALLEL_DETECTION_USED;
5490 } 5527 }
5491 } 5528 }
5492 *str_ptr = '\0'; 5529 if (link_up) {
5493 return 0; 5530 DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
5531 vars->line_speed);
5532 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5533 }
5534
5535 return link_up;
5494} 5536}
5495 5537
5496u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, 5538static u8 bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
5497 u8 *version, u16 len)
5498{ 5539{
5499 struct bnx2x *bp; 5540 u8 status = 0;
5500 u32 ext_phy_type = 0; 5541 u32 spirom_ver;
5501 u32 spirom_ver = 0; 5542 spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
5502 u8 status; 5543 status = bnx2x_format_ver(spirom_ver, str, len);
5544 return status;
5545}
5503 5546
5504 if (version == NULL || params == NULL) 5547static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
5505 return -EINVAL; 5548 struct link_params *params)
5506 bp = params->bp; 5549{
5550 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
5551 MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
5552 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
5553 MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
5554}
5507 5555
5508 spirom_ver = REG_RD(bp, params->shmem_base + 5556static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
5509 offsetof(struct shmem_region, 5557 struct link_params *params)
5510 port_mb[params->port].ext_phy_fw_version)); 5558{
5559 bnx2x_cl45_write(params->bp, phy,
5560 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
5561 bnx2x_cl45_write(params->bp, phy,
5562 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
5563}
5511 5564
5512 status = 0; 5565static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
5513 /* reset the returned value to zero */ 5566 struct link_params *params)
5514 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5567{
5515 switch (ext_phy_type) { 5568 struct bnx2x *bp = params->bp;
5516 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 5569 u8 port = params->port;
5570 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5571 MISC_REGISTERS_GPIO_OUTPUT_LOW,
5572 port);
5573}
5517 5574
5518 if (len < 5) 5575static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
5519 return -EINVAL; 5576 struct link_params *params, u8 mode)
5577{
5578 struct bnx2x *bp = params->bp;
5579 u16 val;
5520 5580
5521 version[0] = (spirom_ver & 0xFF); 5581 switch (mode) {
5522 version[1] = (spirom_ver & 0xFF00) >> 8; 5582 case LED_MODE_OFF:
5523 version[2] = (spirom_ver & 0xFF0000) >> 16;
5524 version[3] = (spirom_ver & 0xFF000000) >> 24;
5525 version[4] = '\0';
5526 5583
5527 break; 5584 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", params->port);
5528 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5529 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5530 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5531 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5532 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5533 status = bnx2x_format_ver(spirom_ver, version, len);
5534 break;
5535 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5536 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5537 spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
5538 (spirom_ver & 0x7F);
5539 status = bnx2x_format_ver(spirom_ver, version, len);
5540 break;
5541 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5542 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5543 version[0] = '\0';
5544 break;
5545 5585
5546 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 5586 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
5547 DP(NETIF_MSG_LINK, "bnx2x_get_ext_phy_fw_version:" 5587 SHARED_HW_CFG_LED_EXTPHY1) {
5548 " type is FAILURE!\n"); 5588
5549 status = -EINVAL; 5589 /* Set LED masks */
5590 bnx2x_cl45_write(bp, phy,
5591 MDIO_PMA_DEVAD,
5592 MDIO_PMA_REG_8481_LED1_MASK,
5593 0x0);
5594
5595 bnx2x_cl45_write(bp, phy,
5596 MDIO_PMA_DEVAD,
5597 MDIO_PMA_REG_8481_LED2_MASK,
5598 0x0);
5599
5600 bnx2x_cl45_write(bp, phy,
5601 MDIO_PMA_DEVAD,
5602 MDIO_PMA_REG_8481_LED3_MASK,
5603 0x0);
5604
5605 bnx2x_cl45_write(bp, phy,
5606 MDIO_PMA_DEVAD,
5607 MDIO_PMA_REG_8481_LED5_MASK,
5608 0x0);
5609
5610 } else {
5611 bnx2x_cl45_write(bp, phy,
5612 MDIO_PMA_DEVAD,
5613 MDIO_PMA_REG_8481_LED1_MASK,
5614 0x0);
5615 }
5550 break; 5616 break;
5617 case LED_MODE_FRONT_PANEL_OFF:
5551 5618
5552 default: 5619 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
5620 params->port);
5621
5622 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
5623 SHARED_HW_CFG_LED_EXTPHY1) {
5624
5625 /* Set LED masks */
5626 bnx2x_cl45_write(bp, phy,
5627 MDIO_PMA_DEVAD,
5628 MDIO_PMA_REG_8481_LED1_MASK,
5629 0x0);
5630
5631 bnx2x_cl45_write(bp, phy,
5632 MDIO_PMA_DEVAD,
5633 MDIO_PMA_REG_8481_LED2_MASK,
5634 0x0);
5635
5636 bnx2x_cl45_write(bp, phy,
5637 MDIO_PMA_DEVAD,
5638 MDIO_PMA_REG_8481_LED3_MASK,
5639 0x0);
5640
5641 bnx2x_cl45_write(bp, phy,
5642 MDIO_PMA_DEVAD,
5643 MDIO_PMA_REG_8481_LED5_MASK,
5644 0x20);
5645
5646 } else {
5647 bnx2x_cl45_write(bp, phy,
5648 MDIO_PMA_DEVAD,
5649 MDIO_PMA_REG_8481_LED1_MASK,
5650 0x0);
5651 }
5553 break; 5652 break;
5554 } 5653 case LED_MODE_ON:
5555 return status;
5556}
5557 5654
5558static void bnx2x_set_xgxs_loopback(struct link_params *params, 5655 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", params->port);
5559 struct link_vars *vars,
5560 u8 is_10g)
5561{
5562 u8 port = params->port;
5563 struct bnx2x *bp = params->bp;
5564 5656
5565 if (is_10g) { 5657 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
5566 u32 md_devad; 5658 SHARED_HW_CFG_LED_EXTPHY1) {
5659 /* Set control reg */
5660 bnx2x_cl45_read(bp, phy,
5661 MDIO_PMA_DEVAD,
5662 MDIO_PMA_REG_8481_LINK_SIGNAL,
5663 &val);
5664 val &= 0x8000;
5665 val |= 0x2492;
5567 5666
5568 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); 5667 bnx2x_cl45_write(bp, phy,
5668 MDIO_PMA_DEVAD,
5669 MDIO_PMA_REG_8481_LINK_SIGNAL,
5670 val);
5569 5671
5570 /* change the uni_phy_addr in the nig */ 5672 /* Set LED masks */
5571 md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + 5673 bnx2x_cl45_write(bp, phy,
5572 port*0x18)); 5674 MDIO_PMA_DEVAD,
5675 MDIO_PMA_REG_8481_LED1_MASK,
5676 0x0);
5573 5677
5574 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5); 5678 bnx2x_cl45_write(bp, phy,
5679 MDIO_PMA_DEVAD,
5680 MDIO_PMA_REG_8481_LED2_MASK,
5681 0x20);
5575 5682
5576 bnx2x_cl45_write(bp, port, 0, 5683 bnx2x_cl45_write(bp, phy,
5577 params->phy_addr, 5684 MDIO_PMA_DEVAD,
5578 5, 5685 MDIO_PMA_REG_8481_LED3_MASK,
5579 (MDIO_REG_BANK_AER_BLOCK + 5686 0x20);
5580 (MDIO_AER_BLOCK_AER_REG & 0xf)),
5581 0x2800);
5582 5687
5583 bnx2x_cl45_write(bp, port, 0, 5688 bnx2x_cl45_write(bp, phy,
5584 params->phy_addr, 5689 MDIO_PMA_DEVAD,
5585 5, 5690 MDIO_PMA_REG_8481_LED5_MASK,
5586 (MDIO_REG_BANK_CL73_IEEEB0 + 5691 0x0);
5587 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 5692 } else {
5588 0x6041); 5693 bnx2x_cl45_write(bp, phy,
5589 msleep(200); 5694 MDIO_PMA_DEVAD,
5590 /* set aer mmd back */ 5695 MDIO_PMA_REG_8481_LED1_MASK,
5591 bnx2x_set_aer_mmd(params, vars); 5696 0x20);
5697 }
5698 break;
5592 5699
5593 /* and md_devad */ 5700 case LED_MODE_OPER:
5594 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
5595 md_devad);
5596 5701
5597 } else { 5702 DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", params->port);
5598 u16 mii_control;
5599 5703
5600 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); 5704 if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
5705 SHARED_HW_CFG_LED_EXTPHY1) {
5601 5706
5602 CL45_RD_OVER_CL22(bp, port, 5707 /* Set control reg */
5603 params->phy_addr, 5708 bnx2x_cl45_read(bp, phy,
5604 MDIO_REG_BANK_COMBO_IEEE0, 5709 MDIO_PMA_DEVAD,
5605 MDIO_COMBO_IEEE0_MII_CONTROL, 5710 MDIO_PMA_REG_8481_LINK_SIGNAL,
5606 &mii_control); 5711 &val);
5712
5713 if (!((val &
5714 MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
5715 >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){
5716 DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n");
5717 bnx2x_cl45_write(bp, phy,
5718 MDIO_PMA_DEVAD,
5719 MDIO_PMA_REG_8481_LINK_SIGNAL,
5720 0xa492);
5721 }
5607 5722
5608 CL45_WR_OVER_CL22(bp, port, 5723 /* Set LED masks */
5609 params->phy_addr, 5724 bnx2x_cl45_write(bp, phy,
5610 MDIO_REG_BANK_COMBO_IEEE0, 5725 MDIO_PMA_DEVAD,
5611 MDIO_COMBO_IEEE0_MII_CONTROL, 5726 MDIO_PMA_REG_8481_LED1_MASK,
5612 (mii_control | 5727 0x10);
5613 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK)); 5728
5729 bnx2x_cl45_write(bp, phy,
5730 MDIO_PMA_DEVAD,
5731 MDIO_PMA_REG_8481_LED2_MASK,
5732 0x80);
5733
5734 bnx2x_cl45_write(bp, phy,
5735 MDIO_PMA_DEVAD,
5736 MDIO_PMA_REG_8481_LED3_MASK,
5737 0x98);
5738
5739 bnx2x_cl45_write(bp, phy,
5740 MDIO_PMA_DEVAD,
5741 MDIO_PMA_REG_8481_LED5_MASK,
5742 0x40);
5743
5744 } else {
5745 bnx2x_cl45_write(bp, phy,
5746 MDIO_PMA_DEVAD,
5747 MDIO_PMA_REG_8481_LED1_MASK,
5748 0x80);
5749 }
5750 break;
5614 } 5751 }
5615} 5752}
5753/******************************************************************/
5754/* SFX7101 PHY SECTION */
5755/******************************************************************/
5756static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
5757 struct link_params *params)
5758{
5759 struct bnx2x *bp = params->bp;
5760 /* SFX7101_XGXS_TEST1 */
5761 bnx2x_cl45_write(bp, phy,
5762 MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
5763}
5616 5764
5617 5765static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
5618static void bnx2x_ext_phy_loopback(struct link_params *params) 5766 struct link_params *params,
5767 struct link_vars *vars)
5619{ 5768{
5769 u16 fw_ver1, fw_ver2, val;
5620 struct bnx2x *bp = params->bp; 5770 struct bnx2x *bp = params->bp;
5621 u8 ext_phy_addr; 5771 DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
5622 u32 ext_phy_type;
5623 5772
5624 if (params->switch_cfg == SWITCH_CFG_10G) { 5773 /* Restore normal power mode*/
5625 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 5774 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
5626 ext_phy_addr = XGXS_EXT_PHY_ADDR(params->ext_phy_config); 5775 MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
5627 /* CL37 Autoneg Enabled */ 5776 /* HW reset */
5628 switch (ext_phy_type) { 5777 bnx2x_ext_phy_hw_reset(bp, params->port);
5629 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 5778 bnx2x_wait_reset_complete(bp, phy);
5630 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN: 5779
5631 DP(NETIF_MSG_LINK, 5780 bnx2x_cl45_write(bp, phy,
5632 "ext_phy_loopback: We should not get here\n"); 5781 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
5633 break; 5782 DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
5634 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 5783 bnx2x_cl45_write(bp, phy,
5635 DP(NETIF_MSG_LINK, "ext_phy_loopback: 8705\n"); 5784 MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
5636 break; 5785
5637 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 5786 bnx2x_ext_phy_set_pause(params, phy, vars);
5638 DP(NETIF_MSG_LINK, "ext_phy_loopback: 8706\n"); 5787 /* Restart autoneg */
5639 break; 5788 bnx2x_cl45_read(bp, phy,
5640 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 5789 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
5641 DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n"); 5790 val |= 0x200;
5642 bnx2x_cl45_write(bp, params->port, ext_phy_type, 5791 bnx2x_cl45_write(bp, phy,
5643 ext_phy_addr, 5792 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
5644 MDIO_PMA_DEVAD, 5793
5645 MDIO_PMA_REG_CTRL, 5794 /* Save spirom version */
5646 0x0001); 5795 bnx2x_cl45_read(bp, phy,
5647 break; 5796 MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
5648 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 5797
5649 /* SFX7101_XGXS_TEST1 */ 5798 bnx2x_cl45_read(bp, phy,
5650 bnx2x_cl45_write(bp, params->port, ext_phy_type, 5799 MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
5651 ext_phy_addr, 5800 bnx2x_save_spirom_version(bp, params->port,
5652 MDIO_XS_DEVAD, 5801 (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
5653 MDIO_XS_SFX7101_XGXS_TEST1, 5802 return 0;
5654 0x100); 5803}
5655 DP(NETIF_MSG_LINK,
5656 "ext_phy_loopback: set ext phy loopback\n");
5657 break;
5658 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5659 5804
5660 break; 5805static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
5661 } /* switch external PHY type */ 5806 struct link_params *params,
5662 } else { 5807 struct link_vars *vars)
5663 /* serdes */ 5808{
5664 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 5809 struct bnx2x *bp = params->bp;
5665 ext_phy_addr = (params->ext_phy_config & 5810 u8 link_up;
5666 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) 5811 u16 val1, val2;
5667 >> PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT; 5812 bnx2x_cl45_read(bp, phy,
5813 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2);
5814 bnx2x_cl45_read(bp, phy,
5815 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1);
5816 DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
5817 val2, val1);
5818 bnx2x_cl45_read(bp, phy,
5819 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
5820 bnx2x_cl45_read(bp, phy,
5821 MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
5822 DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
5823 val2, val1);
5824 link_up = ((val1 & 4) == 4);
5825 /* if link is up
5826 * print the AN outcome of the SFX7101 PHY
5827 */
5828 if (link_up) {
5829 bnx2x_cl45_read(bp, phy,
5830 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
5831 &val2);
5832 vars->line_speed = SPEED_10000;
5833 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
5834 val2, (val2 & (1<<14)));
5835 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
5836 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5668 } 5837 }
5838 return link_up;
5669} 5839}
5670 5840
5671 5841
5672/* 5842static u8 bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
5673 *------------------------------------------------------------------------
5674 * bnx2x_override_led_value -
5675 *
5676 * Override the led value of the requsted led
5677 *
5678 *------------------------------------------------------------------------
5679 */
5680u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
5681 u32 led_idx, u32 value)
5682{ 5843{
5683 u32 reg_val; 5844 if (*len < 5)
5845 return -EINVAL;
5846 str[0] = (spirom_ver & 0xFF);
5847 str[1] = (spirom_ver & 0xFF00) >> 8;
5848 str[2] = (spirom_ver & 0xFF0000) >> 16;
5849 str[3] = (spirom_ver & 0xFF000000) >> 24;
5850 str[4] = '\0';
5851 *len -= 5;
5852 return 0;
5853}
5684 5854
5685 /* If port 0 then use EMAC0, else use EMAC1*/ 5855void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
5686 u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 5856{
5857 u16 val, cnt;
5687 5858
5688 DP(NETIF_MSG_LINK, 5859 bnx2x_cl45_read(bp, phy,
5689 "bnx2x_override_led_value() port %x led_idx %d value %d\n", 5860 MDIO_PMA_DEVAD,
5690 port, led_idx, value); 5861 MDIO_PMA_REG_7101_RESET, &val);
5691 5862
5692 switch (led_idx) { 5863 for (cnt = 0; cnt < 10; cnt++) {
5693 case 0: /* 10MB led */ 5864 msleep(50);
5694 /* Read the current value of the LED register in 5865 /* Writes a self-clearing reset */
5695 the EMAC block */ 5866 bnx2x_cl45_write(bp, phy,
5696 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED); 5867 MDIO_PMA_DEVAD,
5697 /* Set the OVERRIDE bit to 1 */ 5868 MDIO_PMA_REG_7101_RESET,
5698 reg_val |= EMAC_LED_OVERRIDE; 5869 (val | (1<<15)));
5699 /* If value is 1, set the 10M_OVERRIDE bit, 5870 /* Wait for clear */
5700 otherwise reset it.*/ 5871 bnx2x_cl45_read(bp, phy,
5701 reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) : 5872 MDIO_PMA_DEVAD,
5702 (reg_val & ~EMAC_LED_10MB_OVERRIDE); 5873 MDIO_PMA_REG_7101_RESET, &val);
5703 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val); 5874
5704 break; 5875 if ((val & (1<<15)) == 0)
5705 case 1: /*100MB led */ 5876 break;
5706 /*Read the current value of the LED register in 5877 }
5707 the EMAC block */ 5878}
5708 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED); 5879
5709 /* Set the OVERRIDE bit to 1 */ 5880static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
5710 reg_val |= EMAC_LED_OVERRIDE; 5881 struct link_params *params) {
5711 /* If value is 1, set the 100M_OVERRIDE bit, 5882 /* Low power mode is controlled by GPIO 2 */
5712 otherwise reset it.*/ 5883 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
5713 reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) : 5884 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
5714 (reg_val & ~EMAC_LED_100MB_OVERRIDE); 5885 /* The PHY reset is controlled by GPIO 1 */
5715 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val); 5886 bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
5887 MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
5888}
5889
5890static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
5891 struct link_params *params, u8 mode)
5892{
5893 u16 val = 0;
5894 struct bnx2x *bp = params->bp;
5895 switch (mode) {
5896 case LED_MODE_FRONT_PANEL_OFF:
5897 case LED_MODE_OFF:
5898 val = 2;
5716 break; 5899 break;
5717 case 2: /* 1000MB led */ 5900 case LED_MODE_ON:
5718 /* Read the current value of the LED register in the 5901 val = 1;
5719 EMAC block */
5720 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5721 /* Set the OVERRIDE bit to 1 */
5722 reg_val |= EMAC_LED_OVERRIDE;
5723 /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
5724 reset it. */
5725 reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
5726 (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
5727 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5728 break; 5902 break;
5729 case 3: /* 2500MB led */ 5903 case LED_MODE_OPER:
5730 /* Read the current value of the LED register in the 5904 val = 0;
5731 EMAC block*/
5732 reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
5733 /* Set the OVERRIDE bit to 1 */
5734 reg_val |= EMAC_LED_OVERRIDE;
5735 /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
5736 reset it.*/
5737 reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
5738 (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
5739 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
5740 break; 5905 break;
5741 case 4: /*10G led */ 5906 }
5742 if (port == 0) { 5907 bnx2x_cl45_write(bp, phy,
5743 REG_WR(bp, NIG_REG_LED_10G_P0, 5908 MDIO_PMA_DEVAD,
5744 value); 5909 MDIO_PMA_REG_7107_LINK_LED_CNTL,
5910 val);
5911}
5912
5913/******************************************************************/
5914/* STATIC PHY DECLARATION */
5915/******************************************************************/
5916
5917static struct bnx2x_phy phy_null = {
5918 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
5919 .addr = 0,
5920 .flags = FLAGS_INIT_XGXS_FIRST,
5921 .def_md_devad = 0,
5922 .reserved = 0,
5923 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5924 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5925 .mdio_ctrl = 0,
5926 .supported = 0,
5927 .media_type = ETH_PHY_NOT_PRESENT,
5928 .ver_addr = 0,
5929 .req_flow_ctrl = 0,
5930 .req_line_speed = 0,
5931 .speed_cap_mask = 0,
5932 .req_duplex = 0,
5933 .rsrv = 0,
5934 .config_init = (config_init_t)NULL,
5935 .read_status = (read_status_t)NULL,
5936 .link_reset = (link_reset_t)NULL,
5937 .config_loopback = (config_loopback_t)NULL,
5938 .format_fw_ver = (format_fw_ver_t)NULL,
5939 .hw_reset = (hw_reset_t)NULL,
5940 .set_link_led = (set_link_led_t)NULL,
5941 .phy_specific_func = (phy_specific_func_t)NULL
5942};
5943
5944static struct bnx2x_phy phy_serdes = {
5945 .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
5946 .addr = 0xff,
5947 .flags = 0,
5948 .def_md_devad = 0,
5949 .reserved = 0,
5950 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5951 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5952 .mdio_ctrl = 0,
5953 .supported = (SUPPORTED_10baseT_Half |
5954 SUPPORTED_10baseT_Full |
5955 SUPPORTED_100baseT_Half |
5956 SUPPORTED_100baseT_Full |
5957 SUPPORTED_1000baseT_Full |
5958 SUPPORTED_2500baseX_Full |
5959 SUPPORTED_TP |
5960 SUPPORTED_Autoneg |
5961 SUPPORTED_Pause |
5962 SUPPORTED_Asym_Pause),
5963 .media_type = ETH_PHY_UNSPECIFIED,
5964 .ver_addr = 0,
5965 .req_flow_ctrl = 0,
5966 .req_line_speed = 0,
5967 .speed_cap_mask = 0,
5968 .req_duplex = 0,
5969 .rsrv = 0,
5970 .config_init = (config_init_t)bnx2x_init_serdes,
5971 .read_status = (read_status_t)bnx2x_link_settings_status,
5972 .link_reset = (link_reset_t)bnx2x_int_link_reset,
5973 .config_loopback = (config_loopback_t)NULL,
5974 .format_fw_ver = (format_fw_ver_t)NULL,
5975 .hw_reset = (hw_reset_t)NULL,
5976 .set_link_led = (set_link_led_t)NULL,
5977 .phy_specific_func = (phy_specific_func_t)NULL
5978};
5979
5980static struct bnx2x_phy phy_xgxs = {
5981 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
5982 .addr = 0xff,
5983 .flags = 0,
5984 .def_md_devad = 0,
5985 .reserved = 0,
5986 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5987 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
5988 .mdio_ctrl = 0,
5989 .supported = (SUPPORTED_10baseT_Half |
5990 SUPPORTED_10baseT_Full |
5991 SUPPORTED_100baseT_Half |
5992 SUPPORTED_100baseT_Full |
5993 SUPPORTED_1000baseT_Full |
5994 SUPPORTED_2500baseX_Full |
5995 SUPPORTED_10000baseT_Full |
5996 SUPPORTED_FIBRE |
5997 SUPPORTED_Autoneg |
5998 SUPPORTED_Pause |
5999 SUPPORTED_Asym_Pause),
6000 .media_type = ETH_PHY_UNSPECIFIED,
6001 .ver_addr = 0,
6002 .req_flow_ctrl = 0,
6003 .req_line_speed = 0,
6004 .speed_cap_mask = 0,
6005 .req_duplex = 0,
6006 .rsrv = 0,
6007 .config_init = (config_init_t)bnx2x_init_xgxs,
6008 .read_status = (read_status_t)bnx2x_link_settings_status,
6009 .link_reset = (link_reset_t)bnx2x_int_link_reset,
6010 .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
6011 .format_fw_ver = (format_fw_ver_t)NULL,
6012 .hw_reset = (hw_reset_t)NULL,
6013 .set_link_led = (set_link_led_t)NULL,
6014 .phy_specific_func = (phy_specific_func_t)NULL
6015};
6016
6017static struct bnx2x_phy phy_7101 = {
6018 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
6019 .addr = 0xff,
6020 .flags = FLAGS_FAN_FAILURE_DET_REQ,
6021 .def_md_devad = 0,
6022 .reserved = 0,
6023 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6024 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6025 .mdio_ctrl = 0,
6026 .supported = (SUPPORTED_10000baseT_Full |
6027 SUPPORTED_TP |
6028 SUPPORTED_Autoneg |
6029 SUPPORTED_Pause |
6030 SUPPORTED_Asym_Pause),
6031 .media_type = ETH_PHY_BASE_T,
6032 .ver_addr = 0,
6033 .req_flow_ctrl = 0,
6034 .req_line_speed = 0,
6035 .speed_cap_mask = 0,
6036 .req_duplex = 0,
6037 .rsrv = 0,
6038 .config_init = (config_init_t)bnx2x_7101_config_init,
6039 .read_status = (read_status_t)bnx2x_7101_read_status,
6040 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
6041 .config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
6042 .format_fw_ver = (format_fw_ver_t)bnx2x_7101_format_ver,
6043 .hw_reset = (hw_reset_t)bnx2x_7101_hw_reset,
6044 .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
6045 .phy_specific_func = (phy_specific_func_t)NULL
6046};
6047static struct bnx2x_phy phy_8073 = {
6048 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6049 .addr = 0xff,
6050 .flags = FLAGS_HW_LOCK_REQUIRED,
6051 .def_md_devad = 0,
6052 .reserved = 0,
6053 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6054 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6055 .mdio_ctrl = 0,
6056 .supported = (SUPPORTED_10000baseT_Full |
6057 SUPPORTED_2500baseX_Full |
6058 SUPPORTED_1000baseT_Full |
6059 SUPPORTED_FIBRE |
6060 SUPPORTED_Autoneg |
6061 SUPPORTED_Pause |
6062 SUPPORTED_Asym_Pause),
6063 .media_type = ETH_PHY_UNSPECIFIED,
6064 .ver_addr = 0,
6065 .req_flow_ctrl = 0,
6066 .req_line_speed = 0,
6067 .speed_cap_mask = 0,
6068 .req_duplex = 0,
6069 .rsrv = 0,
6070 .config_init = (config_init_t)bnx2x_8073_config_init,
6071 .read_status = (read_status_t)bnx2x_8073_read_status,
6072 .link_reset = (link_reset_t)bnx2x_8073_link_reset,
6073 .config_loopback = (config_loopback_t)NULL,
6074 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
6075 .hw_reset = (hw_reset_t)NULL,
6076 .set_link_led = (set_link_led_t)NULL,
6077 .phy_specific_func = (phy_specific_func_t)NULL
6078};
6079static struct bnx2x_phy phy_8705 = {
6080 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
6081 .addr = 0xff,
6082 .flags = FLAGS_INIT_XGXS_FIRST,
6083 .def_md_devad = 0,
6084 .reserved = 0,
6085 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6086 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6087 .mdio_ctrl = 0,
6088 .supported = (SUPPORTED_10000baseT_Full |
6089 SUPPORTED_FIBRE |
6090 SUPPORTED_Pause |
6091 SUPPORTED_Asym_Pause),
6092 .media_type = ETH_PHY_XFP_FIBER,
6093 .ver_addr = 0,
6094 .req_flow_ctrl = 0,
6095 .req_line_speed = 0,
6096 .speed_cap_mask = 0,
6097 .req_duplex = 0,
6098 .rsrv = 0,
6099 .config_init = (config_init_t)bnx2x_8705_config_init,
6100 .read_status = (read_status_t)bnx2x_8705_read_status,
6101 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
6102 .config_loopback = (config_loopback_t)NULL,
6103 .format_fw_ver = (format_fw_ver_t)bnx2x_null_format_ver,
6104 .hw_reset = (hw_reset_t)NULL,
6105 .set_link_led = (set_link_led_t)NULL,
6106 .phy_specific_func = (phy_specific_func_t)NULL
6107};
6108static struct bnx2x_phy phy_8706 = {
6109 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
6110 .addr = 0xff,
6111 .flags = FLAGS_INIT_XGXS_FIRST,
6112 .def_md_devad = 0,
6113 .reserved = 0,
6114 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6115 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6116 .mdio_ctrl = 0,
6117 .supported = (SUPPORTED_10000baseT_Full |
6118 SUPPORTED_1000baseT_Full |
6119 SUPPORTED_FIBRE |
6120 SUPPORTED_Pause |
6121 SUPPORTED_Asym_Pause),
6122 .media_type = ETH_PHY_SFP_FIBER,
6123 .ver_addr = 0,
6124 .req_flow_ctrl = 0,
6125 .req_line_speed = 0,
6126 .speed_cap_mask = 0,
6127 .req_duplex = 0,
6128 .rsrv = 0,
6129 .config_init = (config_init_t)bnx2x_8706_config_init,
6130 .read_status = (read_status_t)bnx2x_8706_read_status,
6131 .link_reset = (link_reset_t)bnx2x_common_ext_link_reset,
6132 .config_loopback = (config_loopback_t)NULL,
6133 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
6134 .hw_reset = (hw_reset_t)NULL,
6135 .set_link_led = (set_link_led_t)NULL,
6136 .phy_specific_func = (phy_specific_func_t)NULL
6137};
6138
6139static struct bnx2x_phy phy_8726 = {
6140 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
6141 .addr = 0xff,
6142 .flags = (FLAGS_HW_LOCK_REQUIRED |
6143 FLAGS_INIT_XGXS_FIRST),
6144 .def_md_devad = 0,
6145 .reserved = 0,
6146 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6147 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6148 .mdio_ctrl = 0,
6149 .supported = (SUPPORTED_10000baseT_Full |
6150 SUPPORTED_1000baseT_Full |
6151 SUPPORTED_Autoneg |
6152 SUPPORTED_FIBRE |
6153 SUPPORTED_Pause |
6154 SUPPORTED_Asym_Pause),
6155 .media_type = ETH_PHY_SFP_FIBER,
6156 .ver_addr = 0,
6157 .req_flow_ctrl = 0,
6158 .req_line_speed = 0,
6159 .speed_cap_mask = 0,
6160 .req_duplex = 0,
6161 .rsrv = 0,
6162 .config_init = (config_init_t)bnx2x_8726_config_init,
6163 .read_status = (read_status_t)bnx2x_8726_read_status,
6164 .link_reset = (link_reset_t)bnx2x_8726_link_reset,
6165 .config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
6166 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
6167 .hw_reset = (hw_reset_t)NULL,
6168 .set_link_led = (set_link_led_t)NULL,
6169 .phy_specific_func = (phy_specific_func_t)NULL
6170};
6171
6172static struct bnx2x_phy phy_8727 = {
6173 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6174 .addr = 0xff,
6175 .flags = FLAGS_FAN_FAILURE_DET_REQ,
6176 .def_md_devad = 0,
6177 .reserved = 0,
6178 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6179 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6180 .mdio_ctrl = 0,
6181 .supported = (SUPPORTED_10000baseT_Full |
6182 SUPPORTED_1000baseT_Full |
6183 SUPPORTED_FIBRE |
6184 SUPPORTED_Pause |
6185 SUPPORTED_Asym_Pause),
6186 .media_type = ETH_PHY_SFP_FIBER,
6187 .ver_addr = 0,
6188 .req_flow_ctrl = 0,
6189 .req_line_speed = 0,
6190 .speed_cap_mask = 0,
6191 .req_duplex = 0,
6192 .rsrv = 0,
6193 .config_init = (config_init_t)bnx2x_8727_config_init,
6194 .read_status = (read_status_t)bnx2x_8727_read_status,
6195 .link_reset = (link_reset_t)bnx2x_8727_link_reset,
6196 .config_loopback = (config_loopback_t)NULL,
6197 .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver,
6198 .hw_reset = (hw_reset_t)bnx2x_8727_hw_reset,
6199 .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
6200 .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
6201};
6202static struct bnx2x_phy phy_8481 = {
6203 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6204 .addr = 0xff,
6205 .flags = FLAGS_FAN_FAILURE_DET_REQ |
6206 FLAGS_REARM_LATCH_SIGNAL,
6207 .def_md_devad = 0,
6208 .reserved = 0,
6209 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6210 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6211 .mdio_ctrl = 0,
6212 .supported = (SUPPORTED_10baseT_Half |
6213 SUPPORTED_10baseT_Full |
6214 SUPPORTED_100baseT_Half |
6215 SUPPORTED_100baseT_Full |
6216 SUPPORTED_1000baseT_Full |
6217 SUPPORTED_10000baseT_Full |
6218 SUPPORTED_TP |
6219 SUPPORTED_Autoneg |
6220 SUPPORTED_Pause |
6221 SUPPORTED_Asym_Pause),
6222 .media_type = ETH_PHY_BASE_T,
6223 .ver_addr = 0,
6224 .req_flow_ctrl = 0,
6225 .req_line_speed = 0,
6226 .speed_cap_mask = 0,
6227 .req_duplex = 0,
6228 .rsrv = 0,
6229 .config_init = (config_init_t)bnx2x_8481_config_init,
6230 .read_status = (read_status_t)bnx2x_848xx_read_status,
6231 .link_reset = (link_reset_t)bnx2x_8481_link_reset,
6232 .config_loopback = (config_loopback_t)NULL,
6233 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
6234 .hw_reset = (hw_reset_t)bnx2x_8481_hw_reset,
6235 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
6236 .phy_specific_func = (phy_specific_func_t)NULL
6237};
6238
6239static struct bnx2x_phy phy_84823 = {
6240 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
6241 .addr = 0xff,
6242 .flags = FLAGS_FAN_FAILURE_DET_REQ |
6243 FLAGS_REARM_LATCH_SIGNAL,
6244 .def_md_devad = 0,
6245 .reserved = 0,
6246 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6247 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
6248 .mdio_ctrl = 0,
6249 .supported = (SUPPORTED_10baseT_Half |
6250 SUPPORTED_10baseT_Full |
6251 SUPPORTED_100baseT_Half |
6252 SUPPORTED_100baseT_Full |
6253 SUPPORTED_1000baseT_Full |
6254 SUPPORTED_10000baseT_Full |
6255 SUPPORTED_TP |
6256 SUPPORTED_Autoneg |
6257 SUPPORTED_Pause |
6258 SUPPORTED_Asym_Pause),
6259 .media_type = ETH_PHY_BASE_T,
6260 .ver_addr = 0,
6261 .req_flow_ctrl = 0,
6262 .req_line_speed = 0,
6263 .speed_cap_mask = 0,
6264 .req_duplex = 0,
6265 .rsrv = 0,
6266 .config_init = (config_init_t)bnx2x_848x3_config_init,
6267 .read_status = (read_status_t)bnx2x_848xx_read_status,
6268 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
6269 .config_loopback = (config_loopback_t)NULL,
6270 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
6271 .hw_reset = (hw_reset_t)NULL,
6272 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
6273 .phy_specific_func = (phy_specific_func_t)NULL
6274};
6275
6276/*****************************************************************/
6277/* */
6278/* Populate the phy according. Main function: bnx2x_populate_phy */
6279/* */
6280/*****************************************************************/
6281
6282static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
6283 struct bnx2x_phy *phy, u8 port,
6284 u8 phy_index)
6285{
6286 /* Get the 4 lanes xgxs config rx and tx */
6287 u32 rx = 0, tx = 0, i;
6288 for (i = 0; i < 2; i++) {
6289 /**
6290 * INT_PHY and EXT_PHY1 share the same value location in the
6291 * shmem. When num_phys is greater than 1, than this value
6292 * applies only to EXT_PHY1
6293 */
6294 if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
6295 rx = REG_RD(bp, shmem_base +
6296 offsetof(struct shmem_region,
6297 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
6298
6299 tx = REG_RD(bp, shmem_base +
6300 offsetof(struct shmem_region,
6301 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
5745 } else { 6302 } else {
5746 REG_WR(bp, NIG_REG_LED_10G_P1, 6303 rx = REG_RD(bp, shmem_base +
5747 value); 6304 offsetof(struct shmem_region,
6305 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
6306
6307 tx = REG_RD(bp, shmem_base +
6308 offsetof(struct shmem_region,
6309 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
5748 } 6310 }
6311
6312 phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
6313 phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
6314
6315 phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
6316 phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
6317 }
6318}
6319
6320static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
6321 u8 phy_index, u8 port)
6322{
6323 u32 ext_phy_config = 0;
6324 switch (phy_index) {
6325 case EXT_PHY1:
6326 ext_phy_config = REG_RD(bp, shmem_base +
6327 offsetof(struct shmem_region,
6328 dev_info.port_hw_config[port].external_phy_config));
5749 break; 6329 break;
5750 case 5: /* TRAFFIC led */ 6330 case EXT_PHY2:
5751 /* Find if the traffic control is via BMAC or EMAC */ 6331 ext_phy_config = REG_RD(bp, shmem_base +
5752 if (port == 0) 6332 offsetof(struct shmem_region,
5753 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN); 6333 dev_info.port_hw_config[port].external_phy_config2));
5754 else 6334 break;
5755 reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN); 6335 default:
6336 DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
6337 return -EINVAL;
6338 }
5756 6339
5757 /* Override the traffic led in the EMAC:*/ 6340 return ext_phy_config;
5758 if (reg_val == 1) { 6341}
5759 /* Read the current value of the LED register in 6342static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
5760 the EMAC block */ 6343 struct bnx2x_phy *phy)
5761 reg_val = REG_RD(bp, emac_base + 6344{
5762 EMAC_REG_EMAC_LED); 6345 u32 phy_addr;
5763 /* Set the TRAFFIC_OVERRIDE bit to 1 */ 6346 u32 chip_id;
5764 reg_val |= EMAC_LED_OVERRIDE; 6347 u32 switch_cfg = (REG_RD(bp, shmem_base +
5765 /* If value is 1, set the TRAFFIC bit, otherwise 6348 offsetof(struct shmem_region,
5766 reset it.*/ 6349 dev_info.port_feature_config[port].link_config)) &
5767 reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) : 6350 PORT_FEATURE_CONNECTED_SWITCH_MASK);
5768 (reg_val & ~EMAC_LED_TRAFFIC); 6351 chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
5769 REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val); 6352 switch (switch_cfg) {
5770 } else { /* Override the traffic led in the BMAC: */ 6353 case SWITCH_CFG_1G:
5771 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 6354 phy_addr = REG_RD(bp,
5772 + port*4, 1); 6355 NIG_REG_SERDES0_CTRL_PHY_ADDR +
5773 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 6356 port * 0x10);
5774 value); 6357 *phy = phy_serdes;
5775 } 6358 break;
6359 case SWITCH_CFG_10G:
6360 phy_addr = REG_RD(bp,
6361 NIG_REG_XGXS0_CTRL_PHY_ADDR +
6362 port * 0x18);
6363 *phy = phy_xgxs;
5776 break; 6364 break;
5777 default: 6365 default:
5778 DP(NETIF_MSG_LINK, 6366 DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
5779 "bnx2x_override_led_value() unknown led index %d "
5780 "(should be 0-5)\n", led_idx);
5781 return -EINVAL; 6367 return -EINVAL;
5782 } 6368 }
6369 phy->addr = (u8)phy_addr;
6370 phy->mdio_ctrl = bnx2x_get_emac_base(bp,
6371 SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
6372 port);
6373 phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
6374
6375 DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
6376 port, phy->addr, phy->mdio_ctrl);
5783 6377
6378 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
5784 return 0; 6379 return 0;
5785} 6380}
5786 6381
5787 6382static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
5788u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed) 6383 u8 phy_index,
6384 u32 shmem_base,
6385 u32 shmem2_base,
6386 u8 port,
6387 struct bnx2x_phy *phy)
5789{ 6388{
5790 u8 port = params->port; 6389 u32 ext_phy_config, phy_type, config2;
5791 u16 hw_led_mode = params->hw_led_mode; 6390 u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
5792 u8 rc = 0; 6391 ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
5793 u32 tmp; 6392 phy_index, port);
5794 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 6393 phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
5795 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 6394 /* Select the phy type */
5796 struct bnx2x *bp = params->bp; 6395 switch (phy_type) {
5797 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); 6396 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5798 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", 6397 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
5799 speed, hw_led_mode); 6398 *phy = phy_8073;
5800 switch (mode) {
5801 case LED_MODE_OFF:
5802 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
5803 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5804 SHARED_HW_CFG_LED_MAC1);
5805
5806 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
5807 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
5808 break; 6399 break;
6400 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6401 *phy = phy_8705;
6402 break;
6403 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6404 *phy = phy_8706;
6405 break;
6406 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6407 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
6408 *phy = phy_8726;
6409 break;
6410 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
6411 /* BCM8727_NOC => BCM8727 no over current */
6412 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
6413 *phy = phy_8727;
6414 phy->flags |= FLAGS_NOC;
6415 break;
6416 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6417 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
6418 *phy = phy_8727;
6419 break;
6420 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6421 *phy = phy_8481;
6422 break;
6423 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6424 *phy = phy_84823;
6425 break;
6426 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6427 *phy = phy_7101;
6428 break;
6429 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6430 *phy = phy_null;
6431 return -EINVAL;
6432 default:
6433 *phy = phy_null;
6434 return 0;
6435 }
5809 6436
5810 case LED_MODE_OPER: 6437 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
5811 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { 6438 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
5812 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
5813 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5814 } else {
5815 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5816 hw_led_mode);
5817 }
5818 6439
5819 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + 6440 /**
5820 port*4, 0); 6441 * The shmem address of the phy version is located on different
5821 /* Set blinking rate to ~15.9Hz */ 6442 * structures. In case this structure is too old, do not set
5822 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, 6443 * the address
5823 LED_BLINK_RATE_VAL); 6444 */
5824 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 6445 config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
5825 port*4, 1); 6446 dev_info.shared_hw_config.config2));
5826 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6447 if (phy_index == EXT_PHY1) {
5827 EMAC_WR(bp, EMAC_REG_EMAC_LED, 6448 phy->ver_addr = shmem_base + offsetof(struct shmem_region,
5828 (tmp & (~EMAC_LED_OVERRIDE))); 6449 port_mb[port].ext_phy_fw_version);
6450
6451 /* Check specific mdc mdio settings */
6452 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
6453 mdc_mdio_access = config2 &
6454 SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
6455 } else {
6456 u32 size = REG_RD(bp, shmem2_base);
5829 6457
5830 if (CHIP_IS_E1(bp) && 6458 if (size >
5831 ((speed == SPEED_2500) || 6459 offsetof(struct shmem2_region, ext_phy_fw_version2)) {
5832 (speed == SPEED_1000) || 6460 phy->ver_addr = shmem2_base +
5833 (speed == SPEED_100) || 6461 offsetof(struct shmem2_region,
5834 (speed == SPEED_10))) { 6462 ext_phy_fw_version2[port]);
5835 /* On Everest 1 Ax chip versions for speeds less than
5836 10G LED scheme is different */
5837 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
5838 + port*4, 1);
5839 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
5840 port*4, 0);
5841 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
5842 port*4, 1);
5843 } 6463 }
5844 break; 6464 /* Check specific mdc mdio settings */
5845 6465 if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
5846 default: 6466 mdc_mdio_access = (config2 &
5847 rc = -EINVAL; 6467 SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
5848 DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n", 6468 (SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
5849 mode); 6469 SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
5850 break;
5851 } 6470 }
5852 return rc; 6471 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
5853 6472
6473 /**
6474 * In case mdc/mdio_access of the external phy is different than the
6475 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
6476 * to prevent one port interfere with another port's CL45 operations.
6477 */
6478 if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
6479 phy->flags |= FLAGS_HW_LOCK_REQUIRED;
6480 DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
6481 phy_type, port, phy_index);
6482 DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n",
6483 phy->addr, phy->mdio_ctrl);
6484 return 0;
5854} 6485}
5855 6486
5856u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars) 6487static u8 bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
6488 u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
5857{ 6489{
5858 struct bnx2x *bp = params->bp; 6490 u8 status = 0;
5859 u16 gp_status = 0; 6491 phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
5860 6492 if (phy_index == INT_PHY)
5861 CL45_RD_OVER_CL22(bp, params->port, 6493 return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
5862 params->phy_addr, 6494 status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
5863 MDIO_REG_BANK_GP_STATUS, 6495 port, phy);
5864 MDIO_GP_STATUS_TOP_AN_STATUS1, 6496 return status;
5865 &gp_status);
5866 /* link is up only if both local phy and external phy are up */
5867 if ((gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) &&
5868 bnx2x_ext_phy_is_link_up(params, vars, 1))
5869 return 0;
5870
5871 return -ESRCH;
5872} 6497}
5873 6498
5874static u8 bnx2x_link_initialize(struct link_params *params, 6499static void bnx2x_phy_def_cfg(struct link_params *params,
5875 struct link_vars *vars) 6500 struct bnx2x_phy *phy,
6501 u8 phy_index)
5876{ 6502{
5877 struct bnx2x *bp = params->bp; 6503 struct bnx2x *bp = params->bp;
5878 u8 port = params->port; 6504 u32 link_config;
5879 u8 rc = 0; 6505 /* Populate the default phy configuration for MF mode */
5880 u8 non_ext_phy; 6506 if (phy_index == EXT_PHY2) {
5881 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 6507 link_config = REG_RD(bp, params->shmem_base +
5882 6508 offsetof(struct shmem_region, dev_info.
5883 /* Activate the external PHY */ 6509 port_feature_config[params->port].link_config2));
5884 bnx2x_ext_phy_reset(params, vars); 6510 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
5885 6511 offsetof(struct shmem_region, dev_info.
5886 bnx2x_set_aer_mmd(params, vars); 6512 port_hw_config[params->port].speed_capability_mask2));
6513 } else {
6514 link_config = REG_RD(bp, params->shmem_base +
6515 offsetof(struct shmem_region, dev_info.
6516 port_feature_config[params->port].link_config));
6517 phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
6518 offsetof(struct shmem_region, dev_info.
6519 port_hw_config[params->port].speed_capability_mask));
6520 }
6521 DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
6522 " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
6523
6524 phy->req_duplex = DUPLEX_FULL;
6525 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6526 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6527 phy->req_duplex = DUPLEX_HALF;
6528 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6529 phy->req_line_speed = SPEED_10;
6530 break;
6531 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6532 phy->req_duplex = DUPLEX_HALF;
6533 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6534 phy->req_line_speed = SPEED_100;
6535 break;
6536 case PORT_FEATURE_LINK_SPEED_1G:
6537 phy->req_line_speed = SPEED_1000;
6538 break;
6539 case PORT_FEATURE_LINK_SPEED_2_5G:
6540 phy->req_line_speed = SPEED_2500;
6541 break;
6542 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6543 phy->req_line_speed = SPEED_10000;
6544 break;
6545 default:
6546 phy->req_line_speed = SPEED_AUTO_NEG;
6547 break;
6548 }
5887 6549
5888 if (vars->phy_flags & PHY_XGXS_FLAG) 6550 switch (link_config & PORT_FEATURE_FLOW_CONTROL_MASK) {
5889 bnx2x_set_master_ln(params); 6551 case PORT_FEATURE_FLOW_CONTROL_AUTO:
6552 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
6553 break;
6554 case PORT_FEATURE_FLOW_CONTROL_TX:
6555 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
6556 break;
6557 case PORT_FEATURE_FLOW_CONTROL_RX:
6558 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
6559 break;
6560 case PORT_FEATURE_FLOW_CONTROL_BOTH:
6561 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
6562 break;
6563 default:
6564 phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6565 break;
6566 }
6567}
5890 6568
5891 rc = bnx2x_reset_unicore(params); 6569u32 bnx2x_phy_selection(struct link_params *params)
5892 /* reset the SerDes and wait for reset bit return low */ 6570{
5893 if (rc != 0) 6571 u32 phy_config_swapped, prio_cfg;
5894 return rc; 6572 u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
6573
6574 phy_config_swapped = params->multi_phy_config &
6575 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
6576
6577 prio_cfg = params->multi_phy_config &
6578 PORT_HW_CFG_PHY_SELECTION_MASK;
6579
6580 if (phy_config_swapped) {
6581 switch (prio_cfg) {
6582 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
6583 return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
6584 break;
6585 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
6586 return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
6587 break;
6588 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
6589 return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
6590 break;
6591 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
6592 return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
6593 break;
6594 }
6595 } else
6596 return_cfg = prio_cfg;
5895 6597
5896 bnx2x_set_aer_mmd(params, vars); 6598 return return_cfg;
6599}
5897 6600
5898 /* setting the masterLn_def again after the reset */
5899 if (vars->phy_flags & PHY_XGXS_FLAG) {
5900 bnx2x_set_master_ln(params);
5901 bnx2x_set_swap_lanes(params);
5902 }
5903 6601
5904 if (vars->phy_flags & PHY_XGXS_FLAG) { 6602u8 bnx2x_phy_probe(struct link_params *params)
5905 if ((params->req_line_speed && 6603{
5906 ((params->req_line_speed == SPEED_100) || 6604 u8 phy_index, actual_phy_idx, link_cfg_idx;
5907 (params->req_line_speed == SPEED_10))) || 6605 u32 phy_config_swapped;
5908 (!params->req_line_speed && 6606 struct bnx2x *bp = params->bp;
5909 (params->speed_cap_mask >= 6607 struct bnx2x_phy *phy;
5910 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && 6608 params->num_phys = 0;
5911 (params->speed_cap_mask < 6609 DP(NETIF_MSG_LINK, "Begin phy probe\n");
5912 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 6610 phy_config_swapped = params->multi_phy_config &
5913 )) { 6611 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
5914 vars->phy_flags |= PHY_SGMII_FLAG; 6612
5915 } else { 6613 for (phy_index = INT_PHY; phy_index < MAX_PHYS;
5916 vars->phy_flags &= ~PHY_SGMII_FLAG; 6614 phy_index++) {
6615 link_cfg_idx = LINK_CONFIG_IDX(phy_index);
6616 actual_phy_idx = phy_index;
6617 if (phy_config_swapped) {
6618 if (phy_index == EXT_PHY1)
6619 actual_phy_idx = EXT_PHY2;
6620 else if (phy_index == EXT_PHY2)
6621 actual_phy_idx = EXT_PHY1;
5917 } 6622 }
6623 DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
6624 " actual_phy_idx %x\n", phy_config_swapped,
6625 phy_index, actual_phy_idx);
6626 phy = &params->phy[actual_phy_idx];
6627 if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
6628 params->shmem2_base, params->port,
6629 phy) != 0) {
6630 params->num_phys = 0;
6631 DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
6632 phy_index);
6633 for (phy_index = INT_PHY;
6634 phy_index < MAX_PHYS;
6635 phy_index++)
6636 *phy = phy_null;
6637 return -EINVAL;
6638 }
6639 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
6640 break;
6641
6642 bnx2x_phy_def_cfg(params, phy, phy_index);
6643 params->num_phys++;
5918 } 6644 }
5919 /* In case of external phy existance, the line speed would be the
5920 line speed linked up by the external phy. In case it is direct only,
5921 then the line_speed during initialization will be equal to the
5922 req_line_speed*/
5923 vars->line_speed = params->req_line_speed;
5924 6645
5925 bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc); 6646 DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
6647 return 0;
6648}
5926 6649
5927 /* init ext phy and enable link state int */ 6650u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
5928 non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) || 6651{
5929 (params->loopback_mode == LOOPBACK_XGXS_10)); 6652 if (phy_idx < params->num_phys)
6653 return params->phy[phy_idx].supported;
6654 return 0;
6655}
5930 6656
5931 if (non_ext_phy || 6657static void set_phy_vars(struct link_params *params)
5932 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 6658{
5933 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) || 6659 struct bnx2x *bp = params->bp;
5934 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) || 6660 u8 actual_phy_idx, phy_index, link_cfg_idx;
5935 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 6661 u8 phy_config_swapped = params->multi_phy_config &
5936 if (params->req_line_speed == SPEED_AUTO_NEG) 6662 PORT_HW_CFG_PHY_SWAPPED_ENABLED;
5937 bnx2x_set_parallel_detection(params, vars->phy_flags); 6663 for (phy_index = INT_PHY; phy_index < params->num_phys;
5938 bnx2x_init_internal_phy(params, vars, non_ext_phy); 6664 phy_index++) {
5939 } 6665 link_cfg_idx = LINK_CONFIG_IDX(phy_index);
6666 actual_phy_idx = phy_index;
6667 if (phy_config_swapped) {
6668 if (phy_index == EXT_PHY1)
6669 actual_phy_idx = EXT_PHY2;
6670 else if (phy_index == EXT_PHY2)
6671 actual_phy_idx = EXT_PHY1;
6672 }
6673 params->phy[actual_phy_idx].req_flow_ctrl =
6674 params->req_flow_ctrl[link_cfg_idx];
5940 6675
5941 if (!non_ext_phy) 6676 params->phy[actual_phy_idx].req_line_speed =
5942 rc |= bnx2x_ext_phy_init(params, vars); 6677 params->req_line_speed[link_cfg_idx];
5943 6678
5944 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 6679 params->phy[actual_phy_idx].speed_cap_mask =
5945 (NIG_STATUS_XGXS0_LINK10G | 6680 params->speed_cap_mask[link_cfg_idx];
5946 NIG_STATUS_XGXS0_LINK_STATUS |
5947 NIG_STATUS_SERDES0_LINK_STATUS));
5948 6681
5949 return rc; 6682 params->phy[actual_phy_idx].req_duplex =
6683 params->req_duplex[link_cfg_idx];
5950 6684
6685 DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
6686 " speed_cap_mask %x\n",
6687 params->phy[actual_phy_idx].req_flow_ctrl,
6688 params->phy[actual_phy_idx].req_line_speed,
6689 params->phy[actual_phy_idx].speed_cap_mask);
6690 }
5951} 6691}
5952 6692
5953
5954u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) 6693u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
5955{ 6694{
5956 struct bnx2x *bp = params->bp; 6695 struct bnx2x *bp = params->bp;
5957 u32 val;
5958
5959 DP(NETIF_MSG_LINK, "Phy Initialization started\n"); 6696 DP(NETIF_MSG_LINK, "Phy Initialization started\n");
5960 DP(NETIF_MSG_LINK, "req_speed %d, req_flowctrl %d\n", 6697 DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
5961 params->req_line_speed, params->req_flow_ctrl); 6698 params->req_line_speed[0], params->req_flow_ctrl[0]);
6699 DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
6700 params->req_line_speed[1], params->req_flow_ctrl[1]);
5962 vars->link_status = 0; 6701 vars->link_status = 0;
5963 vars->phy_link_up = 0; 6702 vars->phy_link_up = 0;
5964 vars->link_up = 0; 6703 vars->link_up = 0;
@@ -5966,11 +6705,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
5966 vars->duplex = DUPLEX_FULL; 6705 vars->duplex = DUPLEX_FULL;
5967 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 6706 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
5968 vars->mac_type = MAC_TYPE_NONE; 6707 vars->mac_type = MAC_TYPE_NONE;
5969 6708 vars->phy_flags = 0;
5970 if (params->switch_cfg == SWITCH_CFG_1G)
5971 vars->phy_flags = PHY_SERDES_FLAG;
5972 else
5973 vars->phy_flags = PHY_XGXS_FLAG;
5974 6709
5975 /* disable attentions */ 6710 /* disable attentions */
5976 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 6711 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
@@ -5981,6 +6716,13 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
5981 6716
5982 bnx2x_emac_init(params, vars); 6717 bnx2x_emac_init(params, vars);
5983 6718
6719 if (params->num_phys == 0) {
6720 DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
6721 return -EINVAL;
6722 }
6723 set_phy_vars(params);
6724
6725 DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
5984 if (CHIP_REV_IS_FPGA(bp)) { 6726 if (CHIP_REV_IS_FPGA(bp)) {
5985 6727
5986 vars->link_up = 1; 6728 vars->link_up = 1;
@@ -6040,7 +6782,8 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6040 6782
6041 vars->phy_flags = PHY_XGXS_FLAG; 6783 vars->phy_flags = PHY_XGXS_FLAG;
6042 6784
6043 bnx2x_phy_deassert(params, vars->phy_flags); 6785 bnx2x_xgxs_deassert(params);
6786
6044 /* set bmac loopback */ 6787 /* set bmac loopback */
6045 bnx2x_bmac_enable(params, vars, 1); 6788 bnx2x_bmac_enable(params, vars, 1);
6046 6789
@@ -6057,80 +6800,66 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6057 6800
6058 vars->phy_flags = PHY_XGXS_FLAG; 6801 vars->phy_flags = PHY_XGXS_FLAG;
6059 6802
6060 bnx2x_phy_deassert(params, vars->phy_flags); 6803 bnx2x_xgxs_deassert(params);
6061 /* set bmac loopback */ 6804 /* set bmac loopback */
6062 bnx2x_emac_enable(params, vars, 1); 6805 bnx2x_emac_enable(params, vars, 1);
6063 bnx2x_emac_program(params, vars->line_speed, 6806 bnx2x_emac_program(params, vars);
6064 vars->duplex);
6065 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 6807 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6066 params->port*4, 0); 6808 params->port*4, 0);
6067 6809
6068 } else if ((params->loopback_mode == LOOPBACK_XGXS_10) || 6810 } else if ((params->loopback_mode == LOOPBACK_XGXS) ||
6069 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 6811 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
6070 6812
6071 vars->link_up = 1; 6813 vars->link_up = 1;
6072 vars->line_speed = SPEED_10000;
6073 vars->duplex = DUPLEX_FULL;
6074 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 6814 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6815 vars->duplex = DUPLEX_FULL;
6816 if (params->req_line_speed[0] == SPEED_1000) {
6817 vars->line_speed = SPEED_1000;
6818 vars->mac_type = MAC_TYPE_EMAC;
6819 } else {
6820 vars->line_speed = SPEED_10000;
6821 vars->mac_type = MAC_TYPE_BMAC;
6822 }
6075 6823
6076 vars->phy_flags = PHY_XGXS_FLAG; 6824 bnx2x_xgxs_deassert(params);
6077
6078 val = REG_RD(bp,
6079 NIG_REG_XGXS0_CTRL_PHY_ADDR+
6080 params->port*0x18);
6081 params->phy_addr = (u8)val;
6082
6083 bnx2x_phy_deassert(params, vars->phy_flags);
6084 bnx2x_link_initialize(params, vars); 6825 bnx2x_link_initialize(params, vars);
6085 6826
6086 vars->mac_type = MAC_TYPE_BMAC; 6827 if (params->req_line_speed[0] == SPEED_1000) {
6087 6828 bnx2x_emac_program(params, vars);
6829 bnx2x_emac_enable(params, vars, 0);
6830 } else
6088 bnx2x_bmac_enable(params, vars, 0); 6831 bnx2x_bmac_enable(params, vars, 0);
6089 6832
6090 if (params->loopback_mode == LOOPBACK_XGXS_10) { 6833 if (params->loopback_mode == LOOPBACK_XGXS) {
6091 /* set 10G XGXS loopback */ 6834 /* set 10G XGXS loopback */
6092 bnx2x_set_xgxs_loopback(params, vars, 1); 6835 params->phy[INT_PHY].config_loopback(
6836 &params->phy[INT_PHY],
6837 params);
6838
6093 } else { 6839 } else {
6094 /* set external phy loopback */ 6840 /* set external phy loopback */
6095 bnx2x_ext_phy_loopback(params); 6841 u8 phy_index;
6842 for (phy_index = EXT_PHY1;
6843 phy_index < params->num_phys; phy_index++) {
6844 if (params->phy[phy_index].config_loopback)
6845 params->phy[phy_index].config_loopback(
6846 &params->phy[phy_index],
6847 params);
6848 }
6096 } 6849 }
6850
6097 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 6851 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6098 params->port*4, 0); 6852 params->port*4, 0);
6099 6853
6100 bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed); 6854 bnx2x_set_led(params, vars,
6855 LED_MODE_OPER, vars->line_speed);
6101 } else 6856 } else
6102 /* No loopback */ 6857 /* No loopback */
6103 { 6858 {
6104 bnx2x_phy_deassert(params, vars->phy_flags); 6859 if (params->switch_cfg == SWITCH_CFG_10G)
6105 switch (params->switch_cfg) { 6860 bnx2x_xgxs_deassert(params);
6106 case SWITCH_CFG_1G: 6861 else
6107 vars->phy_flags |= PHY_SERDES_FLAG; 6862 bnx2x_serdes_deassert(bp, params->port);
6108 if ((params->ext_phy_config &
6109 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK) ==
6110 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482) {
6111 vars->phy_flags |= PHY_SGMII_FLAG;
6112 }
6113
6114 val = REG_RD(bp,
6115 NIG_REG_SERDES0_CTRL_PHY_ADDR+
6116 params->port*0x10);
6117
6118 params->phy_addr = (u8)val;
6119
6120 break;
6121 case SWITCH_CFG_10G:
6122 vars->phy_flags |= PHY_XGXS_FLAG;
6123 val = REG_RD(bp,
6124 NIG_REG_XGXS0_CTRL_PHY_ADDR+
6125 params->port*0x18);
6126 params->phy_addr = (u8)val;
6127
6128 break;
6129 default:
6130 DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
6131 return -EINVAL;
6132 }
6133 DP(NETIF_MSG_LINK, "Phy address = 0x%x\n", params->phy_addr);
6134 6863
6135 bnx2x_link_initialize(params, vars); 6864 bnx2x_link_initialize(params, vars);
6136 msleep(30); 6865 msleep(30);
@@ -6138,29 +6867,11 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6138 } 6867 }
6139 return 0; 6868 return 0;
6140} 6869}
6141
6142static void bnx2x_8726_reset_phy(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
6143{
6144 DP(NETIF_MSG_LINK, "bnx2x_8726_reset_phy port %d\n", port);
6145
6146 /* Set serial boot control for external load */
6147 bnx2x_cl45_write(bp, port,
6148 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, ext_phy_addr,
6149 MDIO_PMA_DEVAD,
6150 MDIO_PMA_REG_GEN_CTRL, 0x0001);
6151}
6152
6153u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, 6870u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6154 u8 reset_ext_phy) 6871 u8 reset_ext_phy)
6155{ 6872{
6156 struct bnx2x *bp = params->bp; 6873 struct bnx2x *bp = params->bp;
6157 u32 ext_phy_config = params->ext_phy_config; 6874 u8 phy_index, port = params->port;
6158 u8 port = params->port;
6159 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6160 u32 val = REG_RD(bp, params->shmem_base +
6161 offsetof(struct shmem_region, dev_info.
6162 port_feature_config[params->port].
6163 config));
6164 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); 6875 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
6165 /* disable attentions */ 6876 /* disable attentions */
6166 vars->link_status = 0; 6877 vars->link_status = 0;
@@ -6189,73 +6900,21 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6189 * Hold it as vars low 6900 * Hold it as vars low
6190 */ 6901 */
6191 /* clear link led */ 6902 /* clear link led */
6192 bnx2x_set_led(params, LED_MODE_OFF, 0); 6903 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
6193 if (reset_ext_phy) {
6194 switch (ext_phy_type) {
6195 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6196 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6197 break;
6198 6904
6199 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 6905 if (reset_ext_phy) {
6200 { 6906 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6201 6907 phy_index++) {
6202 /* Disable Transmitter */ 6908 if (params->phy[phy_index].link_reset)
6203 u8 ext_phy_addr = 6909 params->phy[phy_index].link_reset(
6204 XGXS_EXT_PHY_ADDR(params->ext_phy_config); 6910 &params->phy[phy_index],
6205 if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == 6911 params);
6206 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
6207 bnx2x_sfp_set_transmitter(bp, port,
6208 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6209 ext_phy_addr, 0);
6210 break;
6211 }
6212 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6213 DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
6214 "low power mode\n",
6215 port);
6216 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6217 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6218 port);
6219 break;
6220 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6221 {
6222 u8 ext_phy_addr =
6223 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6224 /* Set soft reset */
6225 bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
6226 break;
6227 }
6228 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6229 {
6230 u8 ext_phy_addr =
6231 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6232 bnx2x_cl45_write(bp, port,
6233 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6234 ext_phy_addr,
6235 MDIO_AN_DEVAD,
6236 MDIO_AN_REG_CTRL, 0x0000);
6237 bnx2x_cl45_write(bp, port,
6238 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6239 ext_phy_addr,
6240 MDIO_PMA_DEVAD,
6241 MDIO_PMA_REG_CTRL, 1);
6242 break;
6243 }
6244 default:
6245 /* HW reset */
6246 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6247 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6248 port);
6249 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6250 MISC_REGISTERS_GPIO_OUTPUT_LOW,
6251 port);
6252 DP(NETIF_MSG_LINK, "reset external PHY\n");
6253 } 6912 }
6254 } 6913 }
6255 /* reset the SerDes/XGXS */
6256 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
6257 (0x1ff << (port*16)));
6258 6914
6915 if (params->phy[INT_PHY].link_reset)
6916 params->phy[INT_PHY].link_reset(
6917 &params->phy[INT_PHY], params);
6259 /* reset BigMac */ 6918 /* reset BigMac */
6260 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6919 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6261 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 6920 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -6269,183 +6928,25 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6269 return 0; 6928 return 0;
6270} 6929}
6271 6930
6272static u8 bnx2x_update_link_down(struct link_params *params, 6931/****************************************************************************/
6273 struct link_vars *vars) 6932/* Common function */
6933/****************************************************************************/
6934static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base, u8 phy_index)
6274{ 6935{
6275 struct bnx2x *bp = params->bp; 6936 struct bnx2x_phy phy[PORT_MAX];
6276 u8 port = params->port; 6937 struct bnx2x_phy *phy_blk[PORT_MAX];
6277
6278 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
6279 bnx2x_set_led(params, LED_MODE_OFF, 0);
6280
6281 /* indicate no mac active */
6282 vars->mac_type = MAC_TYPE_NONE;
6283
6284 /* update shared memory */
6285 vars->link_status = 0;
6286 vars->line_speed = 0;
6287 bnx2x_update_mng(params, vars->link_status);
6288
6289 /* activate nig drain */
6290 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
6291
6292 /* disable emac */
6293 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6294
6295 msleep(10);
6296
6297 /* reset BigMac */
6298 bnx2x_bmac_rx_disable(bp, params->port);
6299 REG_WR(bp, GRCBASE_MISC +
6300 MISC_REGISTERS_RESET_REG_2_CLEAR,
6301 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
6302 return 0;
6303}
6304
6305static u8 bnx2x_update_link_up(struct link_params *params,
6306 struct link_vars *vars,
6307 u8 link_10g, u32 gp_status)
6308{
6309 struct bnx2x *bp = params->bp;
6310 u8 port = params->port;
6311 u8 rc = 0;
6312
6313 vars->link_status |= LINK_STATUS_LINK_UP;
6314 if (link_10g) {
6315 bnx2x_bmac_enable(params, vars, 0);
6316 bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
6317 } else {
6318 rc = bnx2x_emac_program(params, vars->line_speed,
6319 vars->duplex);
6320
6321 bnx2x_emac_enable(params, vars, 0);
6322
6323 /* AN complete? */
6324 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
6325 if (!(vars->phy_flags &
6326 PHY_SGMII_FLAG))
6327 bnx2x_set_gmii_tx_driver(params);
6328 }
6329 }
6330
6331 /* PBF - link up */
6332 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
6333 vars->line_speed);
6334
6335 /* disable drain */
6336 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
6337
6338 /* update shared memory */
6339 bnx2x_update_mng(params, vars->link_status);
6340 msleep(20);
6341 return rc;
6342}
6343/* This function should called upon link interrupt */
6344/* In case vars->link_up, driver needs to
6345 1. Update the pbf
6346 2. Disable drain
6347 3. Update the shared memory
6348 4. Indicate link up
6349 5. Set LEDs
6350 Otherwise,
6351 1. Update shared memory
6352 2. Reset BigMac
6353 3. Report link down
6354 4. Unset LEDs
6355*/
6356u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6357{
6358 struct bnx2x *bp = params->bp;
6359 u8 port = params->port;
6360 u16 gp_status;
6361 u8 link_10g;
6362 u8 ext_phy_link_up, rc = 0;
6363 u32 ext_phy_type;
6364 u8 is_mi_int = 0;
6365
6366 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
6367 port, (vars->phy_flags & PHY_XGXS_FLAG),
6368 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
6369
6370 is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
6371 port*0x18) > 0);
6372 DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
6373 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
6374 is_mi_int,
6375 REG_RD(bp,
6376 NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
6377
6378 DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
6379 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
6380 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
6381
6382 /* disable emac */
6383 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6384
6385 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
6386
6387 /* Check external link change only for non-direct */
6388 ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars, is_mi_int);
6389
6390 /* Read gp_status */
6391 CL45_RD_OVER_CL22(bp, port, params->phy_addr,
6392 MDIO_REG_BANK_GP_STATUS,
6393 MDIO_GP_STATUS_TOP_AN_STATUS1,
6394 &gp_status);
6395
6396 rc = bnx2x_link_settings_status(params, vars, gp_status,
6397 ext_phy_link_up);
6398 if (rc != 0)
6399 return rc;
6400
6401 /* anything 10 and over uses the bmac */
6402 link_10g = ((vars->line_speed == SPEED_10000) ||
6403 (vars->line_speed == SPEED_12000) ||
6404 (vars->line_speed == SPEED_12500) ||
6405 (vars->line_speed == SPEED_13000) ||
6406 (vars->line_speed == SPEED_15000) ||
6407 (vars->line_speed == SPEED_16000));
6408
6409 bnx2x_link_int_ack(params, vars, link_10g, is_mi_int);
6410
6411 /* In case external phy link is up, and internal link is down
6412 ( not initialized yet probably after link initialization, it needs
6413 to be initialized.
6414 Note that after link down-up as result of cable plug,
6415 the xgxs link would probably become up again without the need to
6416 initialize it*/
6417
6418 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
6419 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
6420 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
6421 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
6422 (ext_phy_link_up && !vars->phy_link_up))
6423 bnx2x_init_internal_phy(params, vars, 0);
6424
6425 /* link is up only if both local phy and external phy are up */
6426 vars->link_up = (ext_phy_link_up && vars->phy_link_up);
6427
6428 if (vars->link_up)
6429 rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
6430 else
6431 rc = bnx2x_update_link_down(params, vars);
6432
6433 return rc;
6434}
6435
6436static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6437{
6438 u8 ext_phy_addr[PORT_MAX];
6439 u16 val; 6938 u16 val;
6440 s8 port; 6939 s8 port;
6441 6940
6442 /* PART1 - Reset both phys */ 6941 /* PART1 - Reset both phys */
6443 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 6942 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6444 /* Extract the ext phy address for the port */ 6943 /* Extract the ext phy address for the port */
6445 u32 ext_phy_config = REG_RD(bp, shmem_base + 6944 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
6446 offsetof(struct shmem_region, 6945 port, &phy[port]) !=
6447 dev_info.port_hw_config[port].external_phy_config)); 6946 0) {
6448 6947 DP(NETIF_MSG_LINK, "populate_phy failed\n");
6948 return -EINVAL;
6949 }
6449 /* disable attentions */ 6950 /* disable attentions */
6450 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 6951 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
6451 (NIG_MASK_XGXS0_LINK_STATUS | 6952 (NIG_MASK_XGXS0_LINK_STATUS |
@@ -6453,17 +6954,13 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6453 NIG_MASK_SERDES0_LINK_STATUS | 6954 NIG_MASK_SERDES0_LINK_STATUS |
6454 NIG_MASK_MI_INT)); 6955 NIG_MASK_MI_INT));
6455 6956
6456 ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
6457
6458 /* Need to take the phy out of low power mode in order 6957 /* Need to take the phy out of low power mode in order
6459 to write to access its registers */ 6958 to write to access its registers */
6460 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 6959 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
6461 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 6960 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
6462 6961
6463 /* Reset the phy */ 6962 /* Reset the phy */
6464 bnx2x_cl45_write(bp, port, 6963 bnx2x_cl45_write(bp, &phy[port],
6465 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6466 ext_phy_addr[port],
6467 MDIO_PMA_DEVAD, 6964 MDIO_PMA_DEVAD,
6468 MDIO_PMA_REG_CTRL, 6965 MDIO_PMA_REG_CTRL,
6469 1<<15); 6966 1<<15);
@@ -6472,15 +6969,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6472 /* Add delay of 150ms after reset */ 6969 /* Add delay of 150ms after reset */
6473 msleep(150); 6970 msleep(150);
6474 6971
6972 if (phy[PORT_0].addr & 0x1) {
6973 phy_blk[PORT_0] = &(phy[PORT_1]);
6974 phy_blk[PORT_1] = &(phy[PORT_0]);
6975 } else {
6976 phy_blk[PORT_0] = &(phy[PORT_0]);
6977 phy_blk[PORT_1] = &(phy[PORT_1]);
6978 }
6979
6475 /* PART2 - Download firmware to both phys */ 6980 /* PART2 - Download firmware to both phys */
6476 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 6981 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6477 u16 fw_ver1; 6982 u16 fw_ver1;
6478 6983
6479 bnx2x_bcm8073_external_rom_boot(bp, port, 6984 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
6480 ext_phy_addr[port], shmem_base); 6985 port);
6481 6986
6482 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 6987 bnx2x_cl45_read(bp, phy_blk[port],
6483 ext_phy_addr[port],
6484 MDIO_PMA_DEVAD, 6988 MDIO_PMA_DEVAD,
6485 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 6989 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
6486 if (fw_ver1 == 0 || fw_ver1 == 0x4321) { 6990 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
@@ -6492,16 +6996,12 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6492 } 6996 }
6493 6997
6494 /* Only set bit 10 = 1 (Tx power down) */ 6998 /* Only set bit 10 = 1 (Tx power down) */
6495 bnx2x_cl45_read(bp, port, 6999 bnx2x_cl45_read(bp, phy_blk[port],
6496 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6497 ext_phy_addr[port],
6498 MDIO_PMA_DEVAD, 7000 MDIO_PMA_DEVAD,
6499 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7001 MDIO_PMA_REG_TX_POWER_DOWN, &val);
6500 7002
6501 /* Phase1 of TX_POWER_DOWN reset */ 7003 /* Phase1 of TX_POWER_DOWN reset */
6502 bnx2x_cl45_write(bp, port, 7004 bnx2x_cl45_write(bp, phy_blk[port],
6503 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6504 ext_phy_addr[port],
6505 MDIO_PMA_DEVAD, 7005 MDIO_PMA_DEVAD,
6506 MDIO_PMA_REG_TX_POWER_DOWN, 7006 MDIO_PMA_REG_TX_POWER_DOWN,
6507 (val | 1<<10)); 7007 (val | 1<<10));
@@ -6515,28 +7015,20 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6515 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7015 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6516 /* Phase2 of POWER_DOWN_RESET */ 7016 /* Phase2 of POWER_DOWN_RESET */
6517 /* Release bit 10 (Release Tx power down) */ 7017 /* Release bit 10 (Release Tx power down) */
6518 bnx2x_cl45_read(bp, port, 7018 bnx2x_cl45_read(bp, phy_blk[port],
6519 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6520 ext_phy_addr[port],
6521 MDIO_PMA_DEVAD, 7019 MDIO_PMA_DEVAD,
6522 MDIO_PMA_REG_TX_POWER_DOWN, &val); 7020 MDIO_PMA_REG_TX_POWER_DOWN, &val);
6523 7021
6524 bnx2x_cl45_write(bp, port, 7022 bnx2x_cl45_write(bp, phy_blk[port],
6525 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6526 ext_phy_addr[port],
6527 MDIO_PMA_DEVAD, 7023 MDIO_PMA_DEVAD,
6528 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 7024 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
6529 msleep(15); 7025 msleep(15);
6530 7026
6531 /* Read modify write the SPI-ROM version select register */ 7027 /* Read modify write the SPI-ROM version select register */
6532 bnx2x_cl45_read(bp, port, 7028 bnx2x_cl45_read(bp, phy_blk[port],
6533 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6534 ext_phy_addr[port],
6535 MDIO_PMA_DEVAD, 7029 MDIO_PMA_DEVAD,
6536 MDIO_PMA_REG_EDC_FFE_MAIN, &val); 7030 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
6537 bnx2x_cl45_write(bp, port, 7031 bnx2x_cl45_write(bp, phy_blk[port],
6538 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
6539 ext_phy_addr[port],
6540 MDIO_PMA_DEVAD, 7032 MDIO_PMA_DEVAD,
6541 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); 7033 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
6542 7034
@@ -6545,33 +7037,74 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6545 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 7037 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6546 } 7038 }
6547 return 0; 7039 return 0;
6548
6549} 7040}
6550 7041
6551static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base) 7042static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7043 u32 shmem2_base, u8 phy_index)
7044{
7045 u32 val;
7046 s8 port;
7047 struct bnx2x_phy phy;
7048 /* Use port1 because of the static port-swap */
7049 /* Enable the module detection interrupt */
7050 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
7051 val |= ((1<<MISC_REGISTERS_GPIO_3)|
7052 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
7053 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
7054
7055 bnx2x_ext_phy_hw_reset(bp, 1);
7056 msleep(5);
7057 for (port = 0; port < PORT_MAX; port++) {
7058 /* Extract the ext phy address for the port */
7059 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
7060 port, &phy) !=
7061 0) {
7062 DP(NETIF_MSG_LINK, "populate phy failed\n");
7063 return -EINVAL;
7064 }
7065
7066 /* Reset phy*/
7067 bnx2x_cl45_write(bp, &phy,
7068 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
7069
7070
7071 /* Set fault module detected LED on */
7072 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
7073 MISC_REGISTERS_GPIO_HIGH,
7074 port);
7075 }
7076
7077 return 0;
7078}
7079static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7080 u32 shmem2_base, u8 phy_index)
6552{ 7081{
6553 u8 ext_phy_addr[PORT_MAX]; 7082 s8 port;
6554 s8 port, first_port, i;
6555 u32 swap_val, swap_override; 7083 u32 swap_val, swap_override;
7084 struct bnx2x_phy phy[PORT_MAX];
7085 struct bnx2x_phy *phy_blk[PORT_MAX];
6556 DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n"); 7086 DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n");
6557 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 7087 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6558 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 7088 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6559 7089
6560 bnx2x_ext_phy_hw_reset(bp, 1 ^ (swap_val && swap_override)); 7090 port = 1;
6561 msleep(5);
6562 7091
6563 if (swap_val && swap_override) 7092 bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override));
6564 first_port = PORT_0; 7093
6565 else 7094 /* Calculate the port based on port swap */
6566 first_port = PORT_1; 7095 port ^= (swap_val && swap_override);
7096
7097 msleep(5);
6567 7098
6568 /* PART1 - Reset both phys */ 7099 /* PART1 - Reset both phys */
6569 for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) { 7100 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6570 /* Extract the ext phy address for the port */ 7101 /* Extract the ext phy address for the port */
6571 u32 ext_phy_config = REG_RD(bp, shmem_base + 7102 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
6572 offsetof(struct shmem_region, 7103 port, &phy[port]) !=
6573 dev_info.port_hw_config[port].external_phy_config)); 7104 0) {
6574 7105 DP(NETIF_MSG_LINK, "populate phy failed\n");
7106 return -EINVAL;
7107 }
6575 /* disable attentions */ 7108 /* disable attentions */
6576 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7109 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
6577 (NIG_MASK_XGXS0_LINK_STATUS | 7110 (NIG_MASK_XGXS0_LINK_STATUS |
@@ -6579,12 +7112,9 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6579 NIG_MASK_SERDES0_LINK_STATUS | 7112 NIG_MASK_SERDES0_LINK_STATUS |
6580 NIG_MASK_MI_INT)); 7113 NIG_MASK_MI_INT));
6581 7114
6582 ext_phy_addr[port] = XGXS_EXT_PHY_ADDR(ext_phy_config);
6583 7115
6584 /* Reset the phy */ 7116 /* Reset the phy */
6585 bnx2x_cl45_write(bp, port, 7117 bnx2x_cl45_write(bp, &phy[port],
6586 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6587 ext_phy_addr[port],
6588 MDIO_PMA_DEVAD, 7118 MDIO_PMA_DEVAD,
6589 MDIO_PMA_REG_CTRL, 7119 MDIO_PMA_REG_CTRL,
6590 1<<15); 7120 1<<15);
@@ -6592,16 +7122,20 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6592 7122
6593 /* Add delay of 150ms after reset */ 7123 /* Add delay of 150ms after reset */
6594 msleep(150); 7124 msleep(150);
6595 7125 if (phy[PORT_0].addr & 0x1) {
7126 phy_blk[PORT_0] = &(phy[PORT_1]);
7127 phy_blk[PORT_1] = &(phy[PORT_0]);
7128 } else {
7129 phy_blk[PORT_0] = &(phy[PORT_0]);
7130 phy_blk[PORT_1] = &(phy[PORT_1]);
7131 }
6596 /* PART2 - Download firmware to both phys */ 7132 /* PART2 - Download firmware to both phys */
6597 for (i = 0, port = first_port; i < PORT_MAX; i++, port = !port) { 7133 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6598 u16 fw_ver1; 7134 u16 fw_ver1;
6599 7135
6600 bnx2x_bcm8727_external_rom_boot(bp, port, 7136 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
6601 ext_phy_addr[port], shmem_base); 7137 port);
6602 7138 bnx2x_cl45_read(bp, phy_blk[port],
6603 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
6604 ext_phy_addr[port],
6605 MDIO_PMA_DEVAD, 7139 MDIO_PMA_DEVAD,
6606 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 7140 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
6607 if (fw_ver1 == 0 || fw_ver1 == 0x4321) { 7141 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
@@ -6616,82 +7150,32 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6616 return 0; 7150 return 0;
6617} 7151}
6618 7152
6619 7153static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base,
6620static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base) 7154 u32 shmem2_base, u8 phy_index,
6621{ 7155 u32 ext_phy_type)
6622 u8 ext_phy_addr;
6623 u32 val;
6624 s8 port;
6625
6626 /* Use port1 because of the static port-swap */
6627 /* Enable the module detection interrupt */
6628 val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
6629 val |= ((1<<MISC_REGISTERS_GPIO_3)|
6630 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
6631 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
6632
6633 bnx2x_ext_phy_hw_reset(bp, 1);
6634 msleep(5);
6635 for (port = 0; port < PORT_MAX; port++) {
6636 /* Extract the ext phy address for the port */
6637 u32 ext_phy_config = REG_RD(bp, shmem_base +
6638 offsetof(struct shmem_region,
6639 dev_info.port_hw_config[port].external_phy_config));
6640
6641 ext_phy_addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
6642 DP(NETIF_MSG_LINK, "8726_common_init : ext_phy_addr = 0x%x\n",
6643 ext_phy_addr);
6644
6645 bnx2x_8726_reset_phy(bp, port, ext_phy_addr);
6646
6647 /* Set fault module detected LED on */
6648 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
6649 MISC_REGISTERS_GPIO_HIGH,
6650 port);
6651 }
6652
6653 return 0;
6654}
6655
6656
6657static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6658{
6659 /* HW reset */
6660 bnx2x_ext_phy_hw_reset(bp, 1);
6661 return 0;
6662}
6663u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6664{ 7156{
6665 u8 rc = 0; 7157 u8 rc = 0;
6666 u32 ext_phy_type;
6667
6668 DP(NETIF_MSG_LINK, "Begin common phy init\n");
6669
6670 /* Read the ext_phy_type for arbitrary port(0) */
6671 ext_phy_type = XGXS_EXT_PHY_TYPE(
6672 REG_RD(bp, shmem_base +
6673 offsetof(struct shmem_region,
6674 dev_info.port_hw_config[0].external_phy_config)));
6675 7158
6676 switch (ext_phy_type) { 7159 switch (ext_phy_type) {
6677 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 7160 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6678 { 7161 rc = bnx2x_8073_common_init_phy(bp, shmem_base,
6679 rc = bnx2x_8073_common_init_phy(bp, shmem_base); 7162 shmem2_base, phy_index);
6680 break; 7163 break;
6681 }
6682 7164
6683 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 7165 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6684 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: 7166 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
6685 rc = bnx2x_8727_common_init_phy(bp, shmem_base); 7167 rc = bnx2x_8727_common_init_phy(bp, shmem_base,
7168 shmem2_base, phy_index);
6686 break; 7169 break;
6687 7170
6688 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 7171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6689 /* GPIO1 affects both ports, so there's need to pull 7172 /* GPIO1 affects both ports, so there's need to pull
6690 it for single port alone */ 7173 it for single port alone */
6691 rc = bnx2x_8726_common_init_phy(bp, shmem_base); 7174 rc = bnx2x_8726_common_init_phy(bp, shmem_base,
7175 shmem2_base, phy_index);
6692 break; 7176 break;
6693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: 7177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6694 rc = bnx2x_84823_common_init_phy(bp, shmem_base); 7178 rc = -EINVAL;
6695 break; 7179 break;
6696 default: 7180 default:
6697 DP(NETIF_MSG_LINK, 7181 DP(NETIF_MSG_LINK,
@@ -6703,33 +7187,80 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6703 return rc; 7187 return rc;
6704} 7188}
6705 7189
6706void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr) 7190u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7191 u32 shmem2_base)
6707{ 7192{
6708 u16 val, cnt; 7193 u8 rc = 0;
7194 u8 phy_index;
7195 u32 ext_phy_type, ext_phy_config;
7196 DP(NETIF_MSG_LINK, "Begin common phy init\n");
6709 7197
6710 bnx2x_cl45_read(bp, port, 7198 if (CHIP_REV_IS_EMUL(bp))
6711 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 7199 return 0;
6712 phy_addr,
6713 MDIO_PMA_DEVAD,
6714 MDIO_PMA_REG_7101_RESET, &val);
6715 7200
6716 for (cnt = 0; cnt < 10; cnt++) { 7201 /* Read the ext_phy_type for arbitrary port(0) */
6717 msleep(50); 7202 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
6718 /* Writes a self-clearing reset */ 7203 phy_index++) {
6719 bnx2x_cl45_write(bp, port, 7204 ext_phy_config = bnx2x_get_ext_phy_config(bp,
6720 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 7205 shmem_base,
6721 phy_addr, 7206 phy_index, 0);
6722 MDIO_PMA_DEVAD, 7207 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6723 MDIO_PMA_REG_7101_RESET, 7208 rc |= bnx2x_ext_phy_common_init(bp, shmem_base,
6724 (val | (1<<15))); 7209 shmem2_base,
6725 /* Wait for clear */ 7210 phy_index, ext_phy_type);
6726 bnx2x_cl45_read(bp, port, 7211 }
6727 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 7212 return rc;
6728 phy_addr, 7213}
6729 MDIO_PMA_DEVAD,
6730 MDIO_PMA_REG_7101_RESET, &val);
6731 7214
6732 if ((val & (1<<15)) == 0) 7215u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
6733 break; 7216{
7217 u8 phy_index;
7218 struct bnx2x_phy phy;
7219 for (phy_index = INT_PHY; phy_index < MAX_PHYS;
7220 phy_index++) {
7221 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
7222 0, &phy) != 0) {
7223 DP(NETIF_MSG_LINK, "populate phy failed\n");
7224 return 0;
7225 }
7226
7227 if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
7228 return 1;
7229 }
7230 return 0;
7231}
7232
7233u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
7234 u32 shmem_base,
7235 u32 shmem2_base,
7236 u8 port)
7237{
7238 u8 phy_index, fan_failure_det_req = 0;
7239 struct bnx2x_phy phy;
7240 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
7241 phy_index++) {
7242 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
7243 port, &phy)
7244 != 0) {
7245 DP(NETIF_MSG_LINK, "populate phy failed\n");
7246 return 0;
7247 }
7248 fan_failure_det_req |= (phy.flags &
7249 FLAGS_FAN_FAILURE_DET_REQ);
7250 }
7251 return fan_failure_det_req;
7252}
7253
7254void bnx2x_hw_reset_phy(struct link_params *params)
7255{
7256 u8 phy_index;
7257 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
7258 phy_index++) {
7259 if (params->phy[phy_index].hw_reset) {
7260 params->phy[phy_index].hw_reset(
7261 &params->phy[phy_index],
7262 params);
7263 params->phy[phy_index] = phy_null;
7264 }
6734 } 7265 }
6735} 7266}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 40c2981de8ed..e98ea3d19471 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
1/* Copyright 2008-2009 Broadcom Corporation 1/* Copyright 2008-2010 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -46,9 +46,137 @@
46#define SFP_EEPROM_PART_NO_ADDR 0x28 46#define SFP_EEPROM_PART_NO_ADDR 0x28
47#define SFP_EEPROM_PART_NO_SIZE 16 47#define SFP_EEPROM_PART_NO_SIZE 16
48#define PWR_FLT_ERR_MSG_LEN 250 48#define PWR_FLT_ERR_MSG_LEN 250
49
50#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
51 ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
52#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
53 (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
54 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
55#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
56 ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
57
58/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
59#define SINGLE_MEDIA_DIRECT(params) (params->num_phys == 1)
60/* Single Media board contains single external phy */
61#define SINGLE_MEDIA(params) (params->num_phys == 2)
62/* Dual Media board contains two external phy with different media */
63#define DUAL_MEDIA(params) (params->num_phys == 3)
64#define FW_PARAM_MDIO_CTRL_OFFSET 16
65#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
66 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
49/***********************************************************/ 67/***********************************************************/
50/* Structs */ 68/* Structs */
51/***********************************************************/ 69/***********************************************************/
70#define INT_PHY 0
71#define EXT_PHY1 1
72#define EXT_PHY2 2
73#define MAX_PHYS 3
74
75/* Same configuration is shared between the XGXS and the first external phy */
76#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
77#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
78 0 : (_phy_idx - 1))
79/***********************************************************/
80/* bnx2x_phy struct */
81/* Defines the required arguments and function per phy */
82/***********************************************************/
83struct link_vars;
84struct link_params;
85struct bnx2x_phy;
86
87typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
88 struct link_vars *vars);
89typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
90 struct link_vars *vars);
91typedef void (*link_reset_t)(struct bnx2x_phy *phy,
92 struct link_params *params);
93typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
94 struct link_params *params);
95typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
96typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
97typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
98 struct link_params *params, u8 mode);
99typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
100 struct link_params *params, u32 action);
101
102struct bnx2x_phy {
103 u32 type;
104
105 /* Loaded during init */
106 u8 addr;
107
108 u8 flags;
109 /* Require HW lock */
110#define FLAGS_HW_LOCK_REQUIRED (1<<0)
111 /* No Over-Current detection */
112#define FLAGS_NOC (1<<1)
113 /* Fan failure detection required */
114#define FLAGS_FAN_FAILURE_DET_REQ (1<<2)
115 /* Initialize first the XGXS and only then the phy itself */
116#define FLAGS_INIT_XGXS_FIRST (1<<3)
117#define FLAGS_REARM_LATCH_SIGNAL (1<<6)
118#define FLAGS_SFP_NOT_APPROVED (1<<7)
119
120 u8 def_md_devad;
121 u8 reserved;
122 /* preemphasis values for the rx side */
123 u16 rx_preemphasis[4];
124
125 /* preemphasis values for the tx side */
126 u16 tx_preemphasis[4];
127
128 /* EMAC address for access MDIO */
129 u32 mdio_ctrl;
130
131 u32 supported;
132
133 u32 media_type;
134#define ETH_PHY_UNSPECIFIED 0x0
135#define ETH_PHY_SFP_FIBER 0x1
136#define ETH_PHY_XFP_FIBER 0x2
137#define ETH_PHY_DA_TWINAX 0x3
138#define ETH_PHY_BASE_T 0x4
139#define ETH_PHY_NOT_PRESENT 0xff
140
141 /* The address in which version is located*/
142 u32 ver_addr;
143
144 u16 req_flow_ctrl;
145
146 u16 req_line_speed;
147
148 u32 speed_cap_mask;
149
150 u16 req_duplex;
151 u16 rsrv;
152 /* Called per phy/port init, and it configures LASI, speed, autoneg,
153 duplex, flow control negotiation, etc. */
154 config_init_t config_init;
155
156 /* Called due to interrupt. It determines the link, speed */
157 read_status_t read_status;
158
159 /* Called when driver is unloading. Should reset the phy */
160 link_reset_t link_reset;
161
162 /* Set the loopback configuration for the phy */
163 config_loopback_t config_loopback;
164
165 /* Format the given raw number into str up to len */
166 format_fw_ver_t format_fw_ver;
167
168 /* Reset the phy (both ports) */
169 hw_reset_t hw_reset;
170
171 /* Set link led mode (on/off/oper)*/
172 set_link_led_t set_link_led;
173
174 /* PHY Specific tasks */
175 phy_specific_func_t phy_specific_func;
176#define DISABLE_TX 1
177#define ENABLE_TX 2
178};
179
52/* Inputs parameters to the CLC */ 180/* Inputs parameters to the CLC */
53struct link_params { 181struct link_params {
54 182
@@ -59,56 +187,50 @@ struct link_params {
59#define LOOPBACK_NONE 0 187#define LOOPBACK_NONE 0
60#define LOOPBACK_EMAC 1 188#define LOOPBACK_EMAC 1
61#define LOOPBACK_BMAC 2 189#define LOOPBACK_BMAC 2
62#define LOOPBACK_XGXS_10 3 190#define LOOPBACK_XGXS 3
63#define LOOPBACK_EXT_PHY 4 191#define LOOPBACK_EXT_PHY 4
64#define LOOPBACK_EXT 5 192#define LOOPBACK_EXT 5
65 193
66 u16 req_duplex;
67 u16 req_flow_ctrl;
68 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
69 req_flow_ctrl is set to AUTO */
70 u16 req_line_speed; /* Also determine AutoNeg */
71
72 /* Device parameters */ 194 /* Device parameters */
73 u8 mac_addr[6]; 195 u8 mac_addr[6];
74 196
197 u16 req_duplex[LINK_CONFIG_SIZE];
198 u16 req_flow_ctrl[LINK_CONFIG_SIZE];
199
200 u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
201
75 /* shmem parameters */ 202 /* shmem parameters */
76 u32 shmem_base; 203 u32 shmem_base;
77 u32 speed_cap_mask; 204 u32 shmem2_base;
205 u32 speed_cap_mask[LINK_CONFIG_SIZE];
78 u32 switch_cfg; 206 u32 switch_cfg;
79#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH 207#define SWITCH_CFG_1G PORT_FEATURE_CON_SWITCH_1G_SWITCH
80#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH 208#define SWITCH_CFG_10G PORT_FEATURE_CON_SWITCH_10G_SWITCH
81#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT 209#define SWITCH_CFG_AUTO_DETECT PORT_FEATURE_CON_SWITCH_AUTO_DETECT
82 210
83 u16 hw_led_mode; /* part of the hw_config read from the shmem */
84
85 /* phy_addr populated by the phy_init function */
86 u8 phy_addr;
87 /*u8 reserved1;*/
88
89 u32 lane_config; 211 u32 lane_config;
90 u32 ext_phy_config;
91#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
92 ((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
93#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
94 (((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
95 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
96#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
97 ((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
98 212
99 /* Phy register parameter */ 213 /* Phy register parameter */
100 u32 chip_id; 214 u32 chip_id;
101 215
102 u16 xgxs_config_rx[4]; /* preemphasis values for the rx side */
103 u16 xgxs_config_tx[4]; /* preemphasis values for the tx side */
104
105 u32 feature_config_flags; 216 u32 feature_config_flags;
106#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) 217#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
107#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) 218#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
108#define FEATURE_CONFIG_BCM8727_NOC (1<<3) 219#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
220 /* Will be populated during common init */
221 struct bnx2x_phy phy[MAX_PHYS];
222
223 /* Will be populated during common init */
224 u8 num_phys;
225
226 u8 rsrv;
227 u16 hw_led_mode; /* part of the hw_config read from the shmem */
228 u32 multi_phy_config;
109 229
110 /* Device pointer passed to all callback functions */ 230 /* Device pointer passed to all callback functions */
111 struct bnx2x *bp; 231 struct bnx2x *bp;
232 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
233 req_flow_ctrl is set to AUTO */
112}; 234};
113 235
114/* Output parameters */ 236/* Output parameters */
@@ -129,12 +251,6 @@ struct link_vars {
129 u16 flow_ctrl; 251 u16 flow_ctrl;
130 u16 ieee_fc; 252 u16 ieee_fc;
131 253
132 u32 autoneg;
133#define AUTO_NEG_DISABLED 0x0
134#define AUTO_NEG_ENABLED 0x1
135#define AUTO_NEG_COMPLETE 0x2
136#define AUTO_NEG_PARALLEL_DETECTION_USED 0x3
137
138 /* The same definitions as the shmem parameter */ 254 /* The same definitions as the shmem parameter */
139 u32 link_status; 255 u32 link_status;
140}; 256};
@@ -142,8 +258,6 @@ struct link_vars {
142/***********************************************************/ 258/***********************************************************/
143/* Functions */ 259/* Functions */
144/***********************************************************/ 260/***********************************************************/
145
146/* Initialize the phy */
147u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output); 261u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output);
148 262
149/* Reset the link. Should be called when driver or interface goes down 263/* Reset the link. Should be called when driver or interface goes down
@@ -155,17 +269,21 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
155/* bnx2x_link_update should be called upon link interrupt */ 269/* bnx2x_link_update should be called upon link interrupt */
156u8 bnx2x_link_update(struct link_params *input, struct link_vars *output); 270u8 bnx2x_link_update(struct link_params *input, struct link_vars *output);
157 271
158/* use the following cl45 functions to read/write from external_phy 272/* use the following phy functions to read/write from external_phy
159 In order to use it to read/write internal phy registers, use 273 In order to use it to read/write internal phy registers, use
160 DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as 274 DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
161 Use ext_phy_type of 0 in case of cl22 over cl45
162 the register */ 275 the register */
163u8 bnx2x_cl45_read(struct bnx2x *bp, u8 port, u32 ext_phy_type, 276u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
164 u8 phy_addr, u8 devad, u16 reg, u16 *ret_val); 277 u8 devad, u16 reg, u16 *ret_val);
278
279u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
280 u8 devad, u16 reg, u16 val);
165 281
166u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type, 282u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
167 u8 phy_addr, u8 devad, u16 reg, u16 val); 283 u8 devad, u16 reg, u16 *ret_val);
168 284
285u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
286 u8 devad, u16 reg, u16 val);
169/* Reads the link_status from the shmem, 287/* Reads the link_status from the shmem,
170 and update the link vars accordingly */ 288 and update the link vars accordingly */
171void bnx2x_link_status_update(struct link_params *input, 289void bnx2x_link_status_update(struct link_params *input,
@@ -178,9 +296,12 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
178 Basically, the CLC takes care of the led for the link, but in case one needs 296 Basically, the CLC takes care of the led for the link, but in case one needs
179 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to 297 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
180 blink the led, and LED_MODE_OFF to set the led off.*/ 298 blink the led, and LED_MODE_OFF to set the led off.*/
181u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed); 299u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
182#define LED_MODE_OFF 0 300 u8 mode, u32 speed);
183#define LED_MODE_OPER 2 301#define LED_MODE_OFF 0
302#define LED_MODE_ON 1
303#define LED_MODE_OPER 2
304#define LED_MODE_FRONT_PANEL_OFF 3
184 305
185u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value); 306u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
186 307
@@ -190,17 +311,38 @@ void bnx2x_handle_module_detect_int(struct link_params *params);
190 311
191/* Get the actual link status. In case it returns 0, link is up, 312/* Get the actual link status. In case it returns 0, link is up,
192 otherwise link is down*/ 313 otherwise link is down*/
193u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars); 314u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars,
315 u8 is_serdes);
194 316
195/* One-time initialization for external phy after power up */ 317/* One-time initialization for external phy after power up */
196u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base); 318u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base);
197 319
198/* Reset the external PHY using GPIO */ 320/* Reset the external PHY using GPIO */
199void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); 321void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
200 322
201void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr); 323/* Reset the external of SFX7101 */
324void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
202 325
203u8 bnx2x_read_sfp_module_eeprom(struct link_params *params, u16 addr, 326u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
327 struct link_params *params, u16 addr,
204 u8 byte_cnt, u8 *o_buf); 328 u8 byte_cnt, u8 *o_buf);
205 329
330void bnx2x_hw_reset_phy(struct link_params *params);
331
332/* Checks if HW lock is required for this phy/board type */
333u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
334 u32 shmem2_base);
335
336/* Returns the aggregative supported attributes of the phys on board */
337u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
338
339/* Check swap bit and adjust PHY order */
340u32 bnx2x_phy_selection(struct link_params *params);
341
342/* Probe the phys on board, and populate them in "params" */
343u8 bnx2x_phy_probe(struct link_params *params);
344/* Checks if fan failure detection is required on one of the phys on board */
345u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
346 u32 shmem2_base, u8 port);
347
206#endif /* BNX2X_LINK_H */ 348#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index f8c3f08e4ce7..67587fe9e358 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -781,7 +781,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
781 DP(NETIF_MSG_HW, 781 DP(NETIF_MSG_HW,
782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", 782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783 resource, HW_LOCK_MAX_RESOURCE_VALUE); 783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
784 return -EINVAL; 784 return false;
785 } 785 }
786 786
787 if (func <= 5) 787 if (func <= 5)
@@ -1227,26 +1227,66 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1227 return 0; 1227 return 0;
1228} 1228}
1229 1229
1230int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1231{
1232 u32 sel_phy_idx = 0;
1233 if (bp->link_vars.link_up) {
1234 sel_phy_idx = EXT_PHY1;
1235 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1236 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1237 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1238 sel_phy_idx = EXT_PHY2;
1239 } else {
1240
1241 switch (bnx2x_phy_selection(&bp->link_params)) {
1242 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1243 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1244 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1245 sel_phy_idx = EXT_PHY1;
1246 break;
1247 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1248 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1249 sel_phy_idx = EXT_PHY2;
1250 break;
1251 }
1252 }
1253 /*
1254 * The selected actived PHY is always after swapping (in case PHY
1255 * swapping is enabled). So when swapping is enabled, we need to reverse
1256 * the configuration
1257 */
1258
1259 if (bp->link_params.multi_phy_config &
1260 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1261 if (sel_phy_idx == EXT_PHY1)
1262 sel_phy_idx = EXT_PHY2;
1263 else if (sel_phy_idx == EXT_PHY2)
1264 sel_phy_idx = EXT_PHY1;
1265 }
1266 return LINK_CONFIG_IDX(sel_phy_idx);
1267}
1268
1230void bnx2x_calc_fc_adv(struct bnx2x *bp) 1269void bnx2x_calc_fc_adv(struct bnx2x *bp)
1231{ 1270{
1271 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1232 switch (bp->link_vars.ieee_fc & 1272 switch (bp->link_vars.ieee_fc &
1233 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 1273 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1234 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 1274 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1235 bp->port.advertising &= ~(ADVERTISED_Asym_Pause | 1275 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1236 ADVERTISED_Pause); 1276 ADVERTISED_Pause);
1237 break; 1277 break;
1238 1278
1239 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 1279 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1240 bp->port.advertising |= (ADVERTISED_Asym_Pause | 1280 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1241 ADVERTISED_Pause); 1281 ADVERTISED_Pause);
1242 break; 1282 break;
1243 1283
1244 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 1284 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1245 bp->port.advertising |= ADVERTISED_Asym_Pause; 1285 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1246 break; 1286 break;
1247 1287
1248 default: 1288 default:
1249 bp->port.advertising &= ~(ADVERTISED_Asym_Pause | 1289 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1250 ADVERTISED_Pause); 1290 ADVERTISED_Pause);
1251 break; 1291 break;
1252 } 1292 }
@@ -1257,7 +1297,8 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1257{ 1297{
1258 if (!BP_NOMCP(bp)) { 1298 if (!BP_NOMCP(bp)) {
1259 u8 rc; 1299 u8 rc;
1260 1300 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1301 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1261 /* Initialize link parameters structure variables */ 1302 /* Initialize link parameters structure variables */
1262 /* It is recommended to turn off RX FC for jumbo frames 1303 /* It is recommended to turn off RX FC for jumbo frames
1263 for better performance */ 1304 for better performance */
@@ -1268,8 +1309,10 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1268 1309
1269 bnx2x_acquire_phy_lock(bp); 1310 bnx2x_acquire_phy_lock(bp);
1270 1311
1271 if (load_mode == LOAD_DIAG) 1312 if (load_mode == LOAD_DIAG) {
1272 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 1313 bp->link_params.loopback_mode = LOOPBACK_XGXS;
1314 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1315 }
1273 1316
1274 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1317 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1275 1318
@@ -1281,7 +1324,7 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1281 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 1324 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1282 bnx2x_link_report(bp); 1325 bnx2x_link_report(bp);
1283 } 1326 }
1284 1327 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1285 return rc; 1328 return rc;
1286 } 1329 }
1287 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 1330 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
@@ -1292,6 +1335,7 @@ void bnx2x_link_set(struct bnx2x *bp)
1292{ 1335{
1293 if (!BP_NOMCP(bp)) { 1336 if (!BP_NOMCP(bp)) {
1294 bnx2x_acquire_phy_lock(bp); 1337 bnx2x_acquire_phy_lock(bp);
1338 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1295 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1339 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1296 bnx2x_release_phy_lock(bp); 1340 bnx2x_release_phy_lock(bp);
1297 1341
@@ -1310,13 +1354,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
1310 BNX2X_ERR("Bootcode is missing - can not reset link\n"); 1354 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1311} 1355}
1312 1356
1313u8 bnx2x_link_test(struct bnx2x *bp) 1357u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1314{ 1358{
1315 u8 rc = 0; 1359 u8 rc = 0;
1316 1360
1317 if (!BP_NOMCP(bp)) { 1361 if (!BP_NOMCP(bp)) {
1318 bnx2x_acquire_phy_lock(bp); 1362 bnx2x_acquire_phy_lock(bp);
1319 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 1363 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1364 is_serdes);
1320 bnx2x_release_phy_lock(bp); 1365 bnx2x_release_phy_lock(bp);
1321 } else 1366 } else
1322 BNX2X_ERR("Bootcode is missing - can not test link\n"); 1367 BNX2X_ERR("Bootcode is missing - can not test link\n");
@@ -1585,7 +1630,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
1585 */ 1630 */
1586 1631
1587/* send the MCP a request, block until there is a reply */ 1632/* send the MCP a request, block until there is a reply */
1588u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) 1633u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1589{ 1634{
1590 int func = BP_FUNC(bp); 1635 int func = BP_FUNC(bp);
1591 u32 seq = ++bp->fw_seq; 1636 u32 seq = ++bp->fw_seq;
@@ -1594,6 +1639,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1594 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 1639 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1595 1640
1596 mutex_lock(&bp->fw_mb_mutex); 1641 mutex_lock(&bp->fw_mb_mutex);
1642 SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
1597 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 1643 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1598 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 1644 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1599 1645
@@ -1715,9 +1761,9 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1715 1761
1716 /* Report results to MCP */ 1762 /* Report results to MCP */
1717 if (dcc_event) 1763 if (dcc_event)
1718 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE); 1764 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
1719 else 1765 else
1720 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK); 1766 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
1721} 1767}
1722 1768
1723/* must be called under the spq lock */ 1769/* must be called under the spq lock */
@@ -1959,12 +2005,16 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1959static inline void bnx2x_fan_failure(struct bnx2x *bp) 2005static inline void bnx2x_fan_failure(struct bnx2x *bp)
1960{ 2006{
1961 int port = BP_PORT(bp); 2007 int port = BP_PORT(bp);
1962 2008 u32 ext_phy_config;
1963 /* mark the failure */ 2009 /* mark the failure */
1964 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2010 ext_phy_config =
1965 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; 2011 SHMEM_RD(bp,
2012 dev_info.port_hw_config[port].external_phy_config);
2013
2014 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2015 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1966 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, 2016 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1967 bp->link_params.ext_phy_config); 2017 ext_phy_config);
1968 2018
1969 /* log the failure */ 2019 /* log the failure */
1970 netdev_err(bp->dev, "Fan Failure on Network Controller has caused" 2020 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
@@ -1976,7 +2026,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1976{ 2026{
1977 int port = BP_PORT(bp); 2027 int port = BP_PORT(bp);
1978 int reg_offset; 2028 int reg_offset;
1979 u32 val, swap_val, swap_override; 2029 u32 val;
1980 2030
1981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 2031 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 2032 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
@@ -1990,30 +2040,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1990 BNX2X_ERR("SPIO5 hw attention\n"); 2040 BNX2X_ERR("SPIO5 hw attention\n");
1991 2041
1992 /* Fan failure attention */ 2042 /* Fan failure attention */
1993 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { 2043 bnx2x_hw_reset_phy(&bp->link_params);
1994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1995 /* Low power mode is controlled by GPIO 2 */
1996 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1997 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1998 /* The PHY reset is controlled by GPIO 1 */
1999 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2000 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2001 break;
2002
2003 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2004 /* The PHY reset is controlled by GPIO 1 */
2005 /* fake the port number to cancel the swap done in
2006 set_gpio() */
2007 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2008 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2009 port = (swap_val && swap_override) ^ 1;
2010 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2011 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2012 break;
2013
2014 default:
2015 break;
2016 }
2017 bnx2x_fan_failure(bp); 2044 bnx2x_fan_failure(bp);
2018 } 2045 }
2019 2046
@@ -3803,10 +3830,9 @@ static const struct {
3803 3830
3804static void enable_blocks_parity(struct bnx2x *bp) 3831static void enable_blocks_parity(struct bnx2x *bp)
3805{ 3832{
3806 int i, mask_arr_len = 3833 int i;
3807 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3808 3834
3809 for (i = 0; i < mask_arr_len; i++) 3835 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
3810 REG_WR(bp, bnx2x_parity_mask[i].addr, 3836 REG_WR(bp, bnx2x_parity_mask[i].addr,
3811 bnx2x_parity_mask[i].mask); 3837 bnx2x_parity_mask[i].mask);
3812} 3838}
@@ -3862,17 +3888,12 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3862 */ 3888 */
3863 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) 3889 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3864 for (port = PORT_0; port < PORT_MAX; port++) { 3890 for (port = PORT_0; port < PORT_MAX; port++) {
3865 u32 phy_type =
3866 SHMEM_RD(bp, dev_info.port_hw_config[port].
3867 external_phy_config) &
3868 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3869 is_required |= 3891 is_required |=
3870 ((phy_type == 3892 bnx2x_fan_failure_det_req(
3871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) || 3893 bp,
3872 (phy_type == 3894 bp->common.shmem_base,
3873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || 3895 bp->common.shmem2_base,
3874 (phy_type == 3896 port);
3875 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3876 } 3897 }
3877 3898
3878 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required); 3899 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
@@ -4139,17 +4160,9 @@ static int bnx2x_init_common(struct bnx2x *bp)
4139 return -EBUSY; 4160 return -EBUSY;
4140 } 4161 }
4141 4162
4142 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { 4163 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 4164 bp->common.shmem_base,
4144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 4165 bp->common.shmem2_base);
4145 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4147 bp->port.need_hw_lock = 1;
4148 break;
4149
4150 default:
4151 break;
4152 }
4153 4166
4154 bnx2x_setup_fan_failure_detection(bp); 4167 bnx2x_setup_fan_failure_detection(bp);
4155 4168
@@ -4162,7 +4175,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
4162 4175
4163 if (!BP_NOMCP(bp)) { 4176 if (!BP_NOMCP(bp)) {
4164 bnx2x_acquire_phy_lock(bp); 4177 bnx2x_acquire_phy_lock(bp);
4165 bnx2x_common_init_phy(bp, bp->common.shmem_base); 4178 bnx2x_common_init_phy(bp, bp->common.shmem_base,
4179 bp->common.shmem2_base);
4166 bnx2x_release_phy_lock(bp); 4180 bnx2x_release_phy_lock(bp);
4167 } else 4181 } else
4168 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 4182 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
@@ -4297,60 +4311,17 @@ static int bnx2x_init_port(struct bnx2x *bp)
4297 4311
4298 bnx2x_init_block(bp, MCP_BLOCK, init_stage); 4312 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4299 bnx2x_init_block(bp, DMAE_BLOCK, init_stage); 4313 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4300 4314 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4301 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { 4315 bp->common.shmem_base,
4302 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 4316 bp->common.shmem2_base);
4303 { 4317 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
4304 u32 swap_val, swap_override, aeu_gpio_mask, offset; 4318 bp->common.shmem2_base, port)) {
4305
4306 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4307 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4308
4309 /* The GPIO should be swapped if the swap register is
4310 set and active */
4311 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4312 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4313
4314 /* Select function upon port-swap configuration */
4315 if (port == 0) {
4316 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4317 aeu_gpio_mask = (swap_val && swap_override) ?
4318 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4319 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4320 } else {
4321 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4322 aeu_gpio_mask = (swap_val && swap_override) ?
4323 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4324 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4325 }
4326 val = REG_RD(bp, offset);
4327 /* add GPIO3 to group */
4328 val |= aeu_gpio_mask;
4329 REG_WR(bp, offset, val);
4330 }
4331 bp->port.need_hw_lock = 1;
4332 break;
4333
4334 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4335 bp->port.need_hw_lock = 1;
4336 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4337 /* add SPIO 5 to group 0 */
4338 {
4339 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4319 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4340 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 4320 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4341 val = REG_RD(bp, reg_addr); 4321 val = REG_RD(bp, reg_addr);
4342 val |= AEU_INPUTS_ATTN_BITS_SPIO5; 4322 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4343 REG_WR(bp, reg_addr, val); 4323 REG_WR(bp, reg_addr, val);
4344 }
4345 break;
4346 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4347 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4348 bp->port.need_hw_lock = 1;
4349 break;
4350 default:
4351 break;
4352 } 4324 }
4353
4354 bnx2x__link_reset(bp); 4325 bnx2x__link_reset(bp);
4355 4326
4356 return 0; 4327 return 0;
@@ -4480,7 +4451,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
4480 /* Reset PCIE errors for debug */ 4451 /* Reset PCIE errors for debug */
4481 REG_WR(bp, 0x2114, 0xffffffff); 4452 REG_WR(bp, 0x2114, 0xffffffff);
4482 REG_WR(bp, 0x2120, 0xffffffff); 4453 REG_WR(bp, 0x2120, 0xffffffff);
4483 4454 bnx2x_phy_probe(&bp->link_params);
4484 return 0; 4455 return 0;
4485} 4456}
4486 4457
@@ -5302,7 +5273,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5302 5273
5303unload_error: 5274unload_error:
5304 if (!BP_NOMCP(bp)) 5275 if (!BP_NOMCP(bp))
5305 reset_code = bnx2x_fw_command(bp, reset_code); 5276 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5306 else { 5277 else {
5307 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n", 5278 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
5308 load_count[0], load_count[1], load_count[2]); 5279 load_count[0], load_count[1], load_count[2]);
@@ -5327,7 +5298,7 @@ unload_error:
5327 5298
5328 /* Report UNLOAD_DONE to MCP */ 5299 /* Report UNLOAD_DONE to MCP */
5329 if (!BP_NOMCP(bp)) 5300 if (!BP_NOMCP(bp))
5330 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 5301 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5331 5302
5332} 5303}
5333 5304
@@ -5892,13 +5863,14 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5892 bp->fw_seq = 5863 bp->fw_seq =
5893 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 5864 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5894 DRV_MSG_SEQ_NUMBER_MASK); 5865 DRV_MSG_SEQ_NUMBER_MASK);
5895 reset_code = bnx2x_fw_command(bp, reset_code); 5866 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5896 5867
5897 /* if UNDI is loaded on the other port */ 5868 /* if UNDI is loaded on the other port */
5898 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { 5869 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5899 5870
5900 /* send "DONE" for previous unload */ 5871 /* send "DONE" for previous unload */
5901 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 5872 bnx2x_fw_command(bp,
5873 DRV_MSG_CODE_UNLOAD_DONE, 0);
5902 5874
5903 /* unload UNDI on port 1 */ 5875 /* unload UNDI on port 1 */
5904 bp->func = 1; 5876 bp->func = 1;
@@ -5907,7 +5879,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5907 DRV_MSG_SEQ_NUMBER_MASK); 5879 DRV_MSG_SEQ_NUMBER_MASK);
5908 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 5880 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5909 5881
5910 bnx2x_fw_command(bp, reset_code); 5882 bnx2x_fw_command(bp, reset_code, 0);
5911 } 5883 }
5912 5884
5913 /* now it's safe to release the lock */ 5885 /* now it's safe to release the lock */
@@ -5949,7 +5921,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5949 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en); 5921 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5950 5922
5951 /* send unload done to the MCP */ 5923 /* send unload done to the MCP */
5952 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 5924 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5953 5925
5954 /* restore our func and fw_seq */ 5926 /* restore our func and fw_seq */
5955 bp->func = func; 5927 bp->func = func;
@@ -5997,6 +5969,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5997 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 5969 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5998 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0); 5970 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
5999 bp->link_params.shmem_base = bp->common.shmem_base; 5971 bp->link_params.shmem_base = bp->common.shmem_base;
5972 bp->link_params.shmem2_base = bp->common.shmem2_base;
6000 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 5973 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6001 bp->common.shmem_base, bp->common.shmem2_base); 5974 bp->common.shmem_base, bp->common.shmem2_base);
6002 5975
@@ -6039,8 +6012,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6039 "please upgrade BC\n", BNX2X_BC_VER, val); 6012 "please upgrade BC\n", BNX2X_BC_VER, val);
6040 } 6013 }
6041 bp->link_params.feature_config_flags |= 6014 bp->link_params.feature_config_flags |=
6042 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ? 6015 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
6043 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 6016 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6017 bp->link_params.feature_config_flags |=
6018 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
6019 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
6044 6020
6045 if (BP_E1HVN(bp) == 0) { 6021 if (BP_E1HVN(bp) == 0) {
6046 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); 6022 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
@@ -6064,194 +6040,55 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6064static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 6040static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6065 u32 switch_cfg) 6041 u32 switch_cfg)
6066{ 6042{
6067 int port = BP_PORT(bp); 6043 int cfg_size = 0, idx, port = BP_PORT(bp);
6068 u32 ext_phy_type;
6069
6070 switch (switch_cfg) {
6071 case SWITCH_CFG_1G:
6072 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6073
6074 ext_phy_type =
6075 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6076 switch (ext_phy_type) {
6077 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6078 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6079 ext_phy_type);
6080
6081 bp->port.supported |= (SUPPORTED_10baseT_Half |
6082 SUPPORTED_10baseT_Full |
6083 SUPPORTED_100baseT_Half |
6084 SUPPORTED_100baseT_Full |
6085 SUPPORTED_1000baseT_Full |
6086 SUPPORTED_2500baseX_Full |
6087 SUPPORTED_TP |
6088 SUPPORTED_FIBRE |
6089 SUPPORTED_Autoneg |
6090 SUPPORTED_Pause |
6091 SUPPORTED_Asym_Pause);
6092 break;
6093 6044
6094 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 6045 /* Aggregation of supported attributes of all external phys */
6095 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n", 6046 bp->port.supported[0] = 0;
6096 ext_phy_type); 6047 bp->port.supported[1] = 0;
6097 6048 switch (bp->link_params.num_phys) {
6098 bp->port.supported |= (SUPPORTED_10baseT_Half | 6049 case 1:
6099 SUPPORTED_10baseT_Full | 6050 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
6100 SUPPORTED_100baseT_Half | 6051 cfg_size = 1;
6101 SUPPORTED_100baseT_Full | 6052 break;
6102 SUPPORTED_1000baseT_Full | 6053 case 2:
6103 SUPPORTED_TP | 6054 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
6104 SUPPORTED_FIBRE | 6055 cfg_size = 1;
6105 SUPPORTED_Autoneg | 6056 break;
6106 SUPPORTED_Pause | 6057 case 3:
6107 SUPPORTED_Asym_Pause); 6058 if (bp->link_params.multi_phy_config &
6108 break; 6059 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
6060 bp->port.supported[1] =
6061 bp->link_params.phy[EXT_PHY1].supported;
6062 bp->port.supported[0] =
6063 bp->link_params.phy[EXT_PHY2].supported;
6064 } else {
6065 bp->port.supported[0] =
6066 bp->link_params.phy[EXT_PHY1].supported;
6067 bp->port.supported[1] =
6068 bp->link_params.phy[EXT_PHY2].supported;
6069 }
6070 cfg_size = 2;
6071 break;
6072 }
6109 6073
6110 default: 6074 if (!(bp->port.supported[0] || bp->port.supported[1])) {
6111 BNX2X_ERR("NVRAM config error. " 6075 BNX2X_ERR("NVRAM config error. BAD phy config."
6112 "BAD SerDes ext_phy_config 0x%x\n", 6076 "PHY1 config 0x%x, PHY2 config 0x%x\n",
6113 bp->link_params.ext_phy_config); 6077 SHMEM_RD(bp,
6078 dev_info.port_hw_config[port].external_phy_config),
6079 SHMEM_RD(bp,
6080 dev_info.port_hw_config[port].external_phy_config2));
6114 return; 6081 return;
6115 } 6082 }
6116 6083
6084 switch (switch_cfg) {
6085 case SWITCH_CFG_1G:
6117 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + 6086 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6118 port*0x10); 6087 port*0x10);
6119 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 6088 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6120 break; 6089 break;
6121 6090
6122 case SWITCH_CFG_10G: 6091 case SWITCH_CFG_10G:
6123 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6124
6125 ext_phy_type =
6126 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6127 switch (ext_phy_type) {
6128 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6129 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6130 ext_phy_type);
6131
6132 bp->port.supported |= (SUPPORTED_10baseT_Half |
6133 SUPPORTED_10baseT_Full |
6134 SUPPORTED_100baseT_Half |
6135 SUPPORTED_100baseT_Full |
6136 SUPPORTED_1000baseT_Full |
6137 SUPPORTED_2500baseX_Full |
6138 SUPPORTED_10000baseT_Full |
6139 SUPPORTED_TP |
6140 SUPPORTED_FIBRE |
6141 SUPPORTED_Autoneg |
6142 SUPPORTED_Pause |
6143 SUPPORTED_Asym_Pause);
6144 break;
6145
6146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6147 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6148 ext_phy_type);
6149
6150 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6151 SUPPORTED_1000baseT_Full |
6152 SUPPORTED_FIBRE |
6153 SUPPORTED_Autoneg |
6154 SUPPORTED_Pause |
6155 SUPPORTED_Asym_Pause);
6156 break;
6157
6158 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6159 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6160 ext_phy_type);
6161
6162 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6163 SUPPORTED_2500baseX_Full |
6164 SUPPORTED_1000baseT_Full |
6165 SUPPORTED_FIBRE |
6166 SUPPORTED_Autoneg |
6167 SUPPORTED_Pause |
6168 SUPPORTED_Asym_Pause);
6169 break;
6170
6171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6172 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6173 ext_phy_type);
6174
6175 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6176 SUPPORTED_FIBRE |
6177 SUPPORTED_Pause |
6178 SUPPORTED_Asym_Pause);
6179 break;
6180
6181 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6182 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6183 ext_phy_type);
6184
6185 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6186 SUPPORTED_1000baseT_Full |
6187 SUPPORTED_FIBRE |
6188 SUPPORTED_Pause |
6189 SUPPORTED_Asym_Pause);
6190 break;
6191
6192 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6193 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
6194 ext_phy_type);
6195
6196 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6197 SUPPORTED_1000baseT_Full |
6198 SUPPORTED_Autoneg |
6199 SUPPORTED_FIBRE |
6200 SUPPORTED_Pause |
6201 SUPPORTED_Asym_Pause);
6202 break;
6203
6204 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6205 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6206 ext_phy_type);
6207
6208 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6209 SUPPORTED_1000baseT_Full |
6210 SUPPORTED_Autoneg |
6211 SUPPORTED_FIBRE |
6212 SUPPORTED_Pause |
6213 SUPPORTED_Asym_Pause);
6214 break;
6215
6216 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6217 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6218 ext_phy_type);
6219
6220 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6221 SUPPORTED_TP |
6222 SUPPORTED_Autoneg |
6223 SUPPORTED_Pause |
6224 SUPPORTED_Asym_Pause);
6225 break;
6226
6227 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6228 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
6229 ext_phy_type);
6230
6231 bp->port.supported |= (SUPPORTED_10baseT_Half |
6232 SUPPORTED_10baseT_Full |
6233 SUPPORTED_100baseT_Half |
6234 SUPPORTED_100baseT_Full |
6235 SUPPORTED_1000baseT_Full |
6236 SUPPORTED_10000baseT_Full |
6237 SUPPORTED_TP |
6238 SUPPORTED_Autoneg |
6239 SUPPORTED_Pause |
6240 SUPPORTED_Asym_Pause);
6241 break;
6242
6243 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6244 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6245 bp->link_params.ext_phy_config);
6246 break;
6247
6248 default:
6249 BNX2X_ERR("NVRAM config error. "
6250 "BAD XGXS ext_phy_config 0x%x\n",
6251 bp->link_params.ext_phy_config);
6252 return;
6253 }
6254
6255 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + 6092 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6256 port*0x18); 6093 port*0x18);
6257 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 6094 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
@@ -6260,164 +6097,183 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6260 6097
6261 default: 6098 default:
6262 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", 6099 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6263 bp->port.link_config); 6100 bp->port.link_config[0]);
6264 return; 6101 return;
6265 } 6102 }
6266 bp->link_params.phy_addr = bp->port.phy_addr; 6103 /* mask what we support according to speed_cap_mask per configuration */
6267 6104 for (idx = 0; idx < cfg_size; idx++) {
6268 /* mask what we support according to speed_cap_mask */ 6105 if (!(bp->link_params.speed_cap_mask[idx] &
6269 if (!(bp->link_params.speed_cap_mask &
6270 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) 6106 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6271 bp->port.supported &= ~SUPPORTED_10baseT_Half; 6107 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
6272 6108
6273 if (!(bp->link_params.speed_cap_mask & 6109 if (!(bp->link_params.speed_cap_mask[idx] &
6274 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) 6110 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6275 bp->port.supported &= ~SUPPORTED_10baseT_Full; 6111 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
6276 6112
6277 if (!(bp->link_params.speed_cap_mask & 6113 if (!(bp->link_params.speed_cap_mask[idx] &
6278 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) 6114 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6279 bp->port.supported &= ~SUPPORTED_100baseT_Half; 6115 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
6280 6116
6281 if (!(bp->link_params.speed_cap_mask & 6117 if (!(bp->link_params.speed_cap_mask[idx] &
6282 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) 6118 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6283 bp->port.supported &= ~SUPPORTED_100baseT_Full; 6119 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
6284 6120
6285 if (!(bp->link_params.speed_cap_mask & 6121 if (!(bp->link_params.speed_cap_mask[idx] &
6286 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 6122 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6287 bp->port.supported &= ~(SUPPORTED_1000baseT_Half | 6123 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
6288 SUPPORTED_1000baseT_Full); 6124 SUPPORTED_1000baseT_Full);
6289 6125
6290 if (!(bp->link_params.speed_cap_mask & 6126 if (!(bp->link_params.speed_cap_mask[idx] &
6291 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 6127 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6292 bp->port.supported &= ~SUPPORTED_2500baseX_Full; 6128 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
6293 6129
6294 if (!(bp->link_params.speed_cap_mask & 6130 if (!(bp->link_params.speed_cap_mask[idx] &
6295 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 6131 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6296 bp->port.supported &= ~SUPPORTED_10000baseT_Full; 6132 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
6133
6134 }
6297 6135
6298 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported); 6136 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
6137 bp->port.supported[1]);
6299} 6138}
6300 6139
6301static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) 6140static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6302{ 6141{
6303 bp->link_params.req_duplex = DUPLEX_FULL; 6142 u32 link_config, idx, cfg_size = 0;
6304 6143 bp->port.advertising[0] = 0;
6305 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) { 6144 bp->port.advertising[1] = 0;
6145 switch (bp->link_params.num_phys) {
6146 case 1:
6147 case 2:
6148 cfg_size = 1;
6149 break;
6150 case 3:
6151 cfg_size = 2;
6152 break;
6153 }
6154 for (idx = 0; idx < cfg_size; idx++) {
6155 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
6156 link_config = bp->port.link_config[idx];
6157 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6306 case PORT_FEATURE_LINK_SPEED_AUTO: 6158 case PORT_FEATURE_LINK_SPEED_AUTO:
6307 if (bp->port.supported & SUPPORTED_Autoneg) { 6159 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
6308 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 6160 bp->link_params.req_line_speed[idx] =
6309 bp->port.advertising = bp->port.supported; 6161 SPEED_AUTO_NEG;
6162 bp->port.advertising[idx] |=
6163 bp->port.supported[idx];
6310 } else { 6164 } else {
6311 u32 ext_phy_type = 6165 /* force 10G, no AN */
6312 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 6166 bp->link_params.req_line_speed[idx] =
6313 6167 SPEED_10000;
6314 if ((ext_phy_type == 6168 bp->port.advertising[idx] |=
6315 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 6169 (ADVERTISED_10000baseT_Full |
6316 (ext_phy_type ==
6317 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6318 /* force 10G, no AN */
6319 bp->link_params.req_line_speed = SPEED_10000;
6320 bp->port.advertising =
6321 (ADVERTISED_10000baseT_Full |
6322 ADVERTISED_FIBRE); 6170 ADVERTISED_FIBRE);
6323 break; 6171 continue;
6324 }
6325 BNX2X_ERR("NVRAM config error. "
6326 "Invalid link_config 0x%x"
6327 " Autoneg not supported\n",
6328 bp->port.link_config);
6329 return;
6330 } 6172 }
6331 break; 6173 break;
6332 6174
6333 case PORT_FEATURE_LINK_SPEED_10M_FULL: 6175 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6334 if (bp->port.supported & SUPPORTED_10baseT_Full) { 6176 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6335 bp->link_params.req_line_speed = SPEED_10; 6177 bp->link_params.req_line_speed[idx] =
6336 bp->port.advertising = (ADVERTISED_10baseT_Full | 6178 SPEED_10;
6179 bp->port.advertising[idx] |=
6180 (ADVERTISED_10baseT_Full |
6337 ADVERTISED_TP); 6181 ADVERTISED_TP);
6338 } else { 6182 } else {
6339 BNX2X_ERROR("NVRAM config error. " 6183 BNX2X_ERROR("NVRAM config error. "
6340 "Invalid link_config 0x%x" 6184 "Invalid link_config 0x%x"
6341 " speed_cap_mask 0x%x\n", 6185 " speed_cap_mask 0x%x\n",
6342 bp->port.link_config, 6186 link_config,
6343 bp->link_params.speed_cap_mask); 6187 bp->link_params.speed_cap_mask[idx]);
6344 return; 6188 return;
6345 } 6189 }
6346 break; 6190 break;
6347 6191
6348 case PORT_FEATURE_LINK_SPEED_10M_HALF: 6192 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6349 if (bp->port.supported & SUPPORTED_10baseT_Half) { 6193 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6350 bp->link_params.req_line_speed = SPEED_10; 6194 bp->link_params.req_line_speed[idx] =
6351 bp->link_params.req_duplex = DUPLEX_HALF; 6195 SPEED_10;
6352 bp->port.advertising = (ADVERTISED_10baseT_Half | 6196 bp->link_params.req_duplex[idx] =
6197 DUPLEX_HALF;
6198 bp->port.advertising[idx] |=
6199 (ADVERTISED_10baseT_Half |
6353 ADVERTISED_TP); 6200 ADVERTISED_TP);
6354 } else { 6201 } else {
6355 BNX2X_ERROR("NVRAM config error. " 6202 BNX2X_ERROR("NVRAM config error. "
6356 "Invalid link_config 0x%x" 6203 "Invalid link_config 0x%x"
6357 " speed_cap_mask 0x%x\n", 6204 " speed_cap_mask 0x%x\n",
6358 bp->port.link_config, 6205 link_config,
6359 bp->link_params.speed_cap_mask); 6206 bp->link_params.speed_cap_mask[idx]);
6360 return; 6207 return;
6361 } 6208 }
6362 break; 6209 break;
6363 6210
6364 case PORT_FEATURE_LINK_SPEED_100M_FULL: 6211 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6365 if (bp->port.supported & SUPPORTED_100baseT_Full) { 6212 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
6366 bp->link_params.req_line_speed = SPEED_100; 6213 bp->link_params.req_line_speed[idx] =
6367 bp->port.advertising = (ADVERTISED_100baseT_Full | 6214 SPEED_100;
6215 bp->port.advertising[idx] |=
6216 (ADVERTISED_100baseT_Full |
6368 ADVERTISED_TP); 6217 ADVERTISED_TP);
6369 } else { 6218 } else {
6370 BNX2X_ERROR("NVRAM config error. " 6219 BNX2X_ERROR("NVRAM config error. "
6371 "Invalid link_config 0x%x" 6220 "Invalid link_config 0x%x"
6372 " speed_cap_mask 0x%x\n", 6221 " speed_cap_mask 0x%x\n",
6373 bp->port.link_config, 6222 link_config,
6374 bp->link_params.speed_cap_mask); 6223 bp->link_params.speed_cap_mask[idx]);
6375 return; 6224 return;
6376 } 6225 }
6377 break; 6226 break;
6378 6227
6379 case PORT_FEATURE_LINK_SPEED_100M_HALF: 6228 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6380 if (bp->port.supported & SUPPORTED_100baseT_Half) { 6229 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
6381 bp->link_params.req_line_speed = SPEED_100; 6230 bp->link_params.req_line_speed[idx] = SPEED_100;
6382 bp->link_params.req_duplex = DUPLEX_HALF; 6231 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
6383 bp->port.advertising = (ADVERTISED_100baseT_Half | 6232 bp->port.advertising[idx] |=
6233 (ADVERTISED_100baseT_Half |
6384 ADVERTISED_TP); 6234 ADVERTISED_TP);
6385 } else { 6235 } else {
6386 BNX2X_ERROR("NVRAM config error. " 6236 BNX2X_ERROR("NVRAM config error. "
6387 "Invalid link_config 0x%x" 6237 "Invalid link_config 0x%x"
6388 " speed_cap_mask 0x%x\n", 6238 " speed_cap_mask 0x%x\n",
6389 bp->port.link_config, 6239 link_config,
6390 bp->link_params.speed_cap_mask); 6240 bp->link_params.speed_cap_mask[idx]);
6391 return; 6241 return;
6392 } 6242 }
6393 break; 6243 break;
6394 6244
6395 case PORT_FEATURE_LINK_SPEED_1G: 6245 case PORT_FEATURE_LINK_SPEED_1G:
6396 if (bp->port.supported & SUPPORTED_1000baseT_Full) { 6246 if (bp->port.supported[idx] &
6397 bp->link_params.req_line_speed = SPEED_1000; 6247 SUPPORTED_1000baseT_Full) {
6398 bp->port.advertising = (ADVERTISED_1000baseT_Full | 6248 bp->link_params.req_line_speed[idx] =
6249 SPEED_1000;
6250 bp->port.advertising[idx] |=
6251 (ADVERTISED_1000baseT_Full |
6399 ADVERTISED_TP); 6252 ADVERTISED_TP);
6400 } else { 6253 } else {
6401 BNX2X_ERROR("NVRAM config error. " 6254 BNX2X_ERROR("NVRAM config error. "
6402 "Invalid link_config 0x%x" 6255 "Invalid link_config 0x%x"
6403 " speed_cap_mask 0x%x\n", 6256 " speed_cap_mask 0x%x\n",
6404 bp->port.link_config, 6257 link_config,
6405 bp->link_params.speed_cap_mask); 6258 bp->link_params.speed_cap_mask[idx]);
6406 return; 6259 return;
6407 } 6260 }
6408 break; 6261 break;
6409 6262
6410 case PORT_FEATURE_LINK_SPEED_2_5G: 6263 case PORT_FEATURE_LINK_SPEED_2_5G:
6411 if (bp->port.supported & SUPPORTED_2500baseX_Full) { 6264 if (bp->port.supported[idx] &
6412 bp->link_params.req_line_speed = SPEED_2500; 6265 SUPPORTED_2500baseX_Full) {
6413 bp->port.advertising = (ADVERTISED_2500baseX_Full | 6266 bp->link_params.req_line_speed[idx] =
6267 SPEED_2500;
6268 bp->port.advertising[idx] |=
6269 (ADVERTISED_2500baseX_Full |
6414 ADVERTISED_TP); 6270 ADVERTISED_TP);
6415 } else { 6271 } else {
6416 BNX2X_ERROR("NVRAM config error. " 6272 BNX2X_ERROR("NVRAM config error. "
6417 "Invalid link_config 0x%x" 6273 "Invalid link_config 0x%x"
6418 " speed_cap_mask 0x%x\n", 6274 " speed_cap_mask 0x%x\n",
6419 bp->port.link_config, 6275 link_config,
6420 bp->link_params.speed_cap_mask); 6276 bp->link_params.speed_cap_mask[idx]);
6421 return; 6277 return;
6422 } 6278 }
6423 break; 6279 break;
@@ -6425,16 +6281,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6425 case PORT_FEATURE_LINK_SPEED_10G_CX4: 6281 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6426 case PORT_FEATURE_LINK_SPEED_10G_KX4: 6282 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6427 case PORT_FEATURE_LINK_SPEED_10G_KR: 6283 case PORT_FEATURE_LINK_SPEED_10G_KR:
6428 if (bp->port.supported & SUPPORTED_10000baseT_Full) { 6284 if (bp->port.supported[idx] &
6429 bp->link_params.req_line_speed = SPEED_10000; 6285 SUPPORTED_10000baseT_Full) {
6430 bp->port.advertising = (ADVERTISED_10000baseT_Full | 6286 bp->link_params.req_line_speed[idx] =
6287 SPEED_10000;
6288 bp->port.advertising[idx] |=
6289 (ADVERTISED_10000baseT_Full |
6431 ADVERTISED_FIBRE); 6290 ADVERTISED_FIBRE);
6432 } else { 6291 } else {
6433 BNX2X_ERROR("NVRAM config error. " 6292 BNX2X_ERROR("NVRAM config error. "
6434 "Invalid link_config 0x%x" 6293 "Invalid link_config 0x%x"
6435 " speed_cap_mask 0x%x\n", 6294 " speed_cap_mask 0x%x\n",
6436 bp->port.link_config, 6295 link_config,
6437 bp->link_params.speed_cap_mask); 6296 bp->link_params.speed_cap_mask[idx]);
6438 return; 6297 return;
6439 } 6298 }
6440 break; 6299 break;
@@ -6442,23 +6301,28 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6442 default: 6301 default:
6443 BNX2X_ERROR("NVRAM config error. " 6302 BNX2X_ERROR("NVRAM config error. "
6444 "BAD link speed link_config 0x%x\n", 6303 "BAD link speed link_config 0x%x\n",
6445 bp->port.link_config); 6304 link_config);
6446 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 6305 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
6447 bp->port.advertising = bp->port.supported; 6306 bp->port.advertising[idx] = bp->port.supported[idx];
6448 break; 6307 break;
6449 } 6308 }
6450 6309
6451 bp->link_params.req_flow_ctrl = (bp->port.link_config & 6310 bp->link_params.req_flow_ctrl[idx] = (link_config &
6452 PORT_FEATURE_FLOW_CONTROL_MASK); 6311 PORT_FEATURE_FLOW_CONTROL_MASK);
6453 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) && 6312 if ((bp->link_params.req_flow_ctrl[idx] ==
6454 !(bp->port.supported & SUPPORTED_Autoneg)) 6313 BNX2X_FLOW_CTRL_AUTO) &&
6455 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE; 6314 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
6315 bp->link_params.req_flow_ctrl[idx] =
6316 BNX2X_FLOW_CTRL_NONE;
6317 }
6456 6318
6457 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" 6319 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
6458 " advertising 0x%x\n", 6320 " 0x%x advertising 0x%x\n",
6459 bp->link_params.req_line_speed, 6321 bp->link_params.req_line_speed[idx],
6460 bp->link_params.req_duplex, 6322 bp->link_params.req_duplex[idx],
6461 bp->link_params.req_flow_ctrl, bp->port.advertising); 6323 bp->link_params.req_flow_ctrl[idx],
6324 bp->port.advertising[idx]);
6325 }
6462} 6326}
6463 6327
6464static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 6328static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
@@ -6474,48 +6338,28 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6474 int port = BP_PORT(bp); 6338 int port = BP_PORT(bp);
6475 u32 val, val2; 6339 u32 val, val2;
6476 u32 config; 6340 u32 config;
6477 u16 i; 6341 u32 ext_phy_type, ext_phy_config;;
6478 u32 ext_phy_type;
6479 6342
6480 bp->link_params.bp = bp; 6343 bp->link_params.bp = bp;
6481 bp->link_params.port = port; 6344 bp->link_params.port = port;
6482 6345
6483 bp->link_params.lane_config = 6346 bp->link_params.lane_config =
6484 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 6347 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6485 bp->link_params.ext_phy_config =
6486 SHMEM_RD(bp,
6487 dev_info.port_hw_config[port].external_phy_config);
6488 /* BCM8727_NOC => BCM8727 no over current */
6489 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6490 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6491 bp->link_params.ext_phy_config &=
6492 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6493 bp->link_params.ext_phy_config |=
6494 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6495 bp->link_params.feature_config_flags |=
6496 FEATURE_CONFIG_BCM8727_NOC;
6497 }
6498 6348
6499 bp->link_params.speed_cap_mask = 6349 bp->link_params.speed_cap_mask[0] =
6500 SHMEM_RD(bp, 6350 SHMEM_RD(bp,
6501 dev_info.port_hw_config[port].speed_capability_mask); 6351 dev_info.port_hw_config[port].speed_capability_mask);
6502 6352 bp->link_params.speed_cap_mask[1] =
6503 bp->port.link_config = 6353 SHMEM_RD(bp,
6354 dev_info.port_hw_config[port].speed_capability_mask2);
6355 bp->port.link_config[0] =
6504 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 6356 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6505 6357
6506 /* Get the 4 lanes xgxs config rx and tx */ 6358 bp->port.link_config[1] =
6507 for (i = 0; i < 2; i++) { 6359 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
6508 val = SHMEM_RD(bp,
6509 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6510 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6511 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6512
6513 val = SHMEM_RD(bp,
6514 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6515 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6516 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6517 }
6518 6360
6361 bp->link_params.multi_phy_config =
6362 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
6519 /* If the device is capable of WoL, set the default state according 6363 /* If the device is capable of WoL, set the default state according
6520 * to the HW 6364 * to the HW
6521 */ 6365 */
@@ -6523,14 +6367,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6523 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 6367 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6524 (config & PORT_FEATURE_WOL_ENABLED)); 6368 (config & PORT_FEATURE_WOL_ENABLED));
6525 6369
6526 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x" 6370 BNX2X_DEV_INFO("lane_config 0x%08x"
6527 " speed_cap_mask 0x%08x link_config 0x%08x\n", 6371 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
6528 bp->link_params.lane_config, 6372 bp->link_params.lane_config,
6529 bp->link_params.ext_phy_config, 6373 bp->link_params.speed_cap_mask[0],
6530 bp->link_params.speed_cap_mask, bp->port.link_config); 6374 bp->port.link_config[0]);
6531 6375
6532 bp->link_params.switch_cfg |= (bp->port.link_config & 6376 bp->link_params.switch_cfg = (bp->port.link_config[0] &
6533 PORT_FEATURE_CONNECTED_SWITCH_MASK); 6377 PORT_FEATURE_CONNECTED_SWITCH_MASK);
6378 bnx2x_phy_probe(&bp->link_params);
6534 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 6379 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6535 6380
6536 bnx2x_link_settings_requested(bp); 6381 bnx2x_link_settings_requested(bp);
@@ -6539,14 +6384,17 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6539 * If connected directly, work with the internal PHY, otherwise, work 6384 * If connected directly, work with the internal PHY, otherwise, work
6540 * with the external PHY 6385 * with the external PHY
6541 */ 6386 */
6542 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 6387 ext_phy_config =
6388 SHMEM_RD(bp,
6389 dev_info.port_hw_config[port].external_phy_config);
6390 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6543 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 6391 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6544 bp->mdio.prtad = bp->link_params.phy_addr; 6392 bp->mdio.prtad = bp->port.phy_addr;
6545 6393
6546 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && 6394 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6547 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 6395 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6548 bp->mdio.prtad = 6396 bp->mdio.prtad =
6549 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config); 6397 XGXS_EXT_PHY_ADDR(ext_phy_config);
6550 6398
6551 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 6399 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6552 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 6400 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
@@ -6771,7 +6619,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6771 bp->mrrs = mrrs; 6619 bp->mrrs = mrrs;
6772 6620
6773 bp->tx_ring_size = MAX_TX_AVAIL; 6621 bp->tx_ring_size = MAX_TX_AVAIL;
6774 bp->rx_ring_size = MAX_RX_AVAIL;
6775 6622
6776 bp->rx_csum = 1; 6623 bp->rx_csum = 1;
6777 6624
@@ -6982,23 +6829,15 @@ static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6982 struct bnx2x *bp = netdev_priv(netdev); 6829 struct bnx2x *bp = netdev_priv(netdev);
6983 u16 value; 6830 u16 value;
6984 int rc; 6831 int rc;
6985 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6986 6832
6987 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n", 6833 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6988 prtad, devad, addr); 6834 prtad, devad, addr);
6989 6835
6990 if (prtad != bp->mdio.prtad) {
6991 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6992 prtad, bp->mdio.prtad);
6993 return -EINVAL;
6994 }
6995
6996 /* The HW expects different devad if CL22 is used */ 6836 /* The HW expects different devad if CL22 is used */
6997 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 6837 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6998 6838
6999 bnx2x_acquire_phy_lock(bp); 6839 bnx2x_acquire_phy_lock(bp);
7000 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad, 6840 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
7001 devad, addr, &value);
7002 bnx2x_release_phy_lock(bp); 6841 bnx2x_release_phy_lock(bp);
7003 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc); 6842 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
7004 6843
@@ -7012,24 +6851,16 @@ static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7012 u16 addr, u16 value) 6851 u16 addr, u16 value)
7013{ 6852{
7014 struct bnx2x *bp = netdev_priv(netdev); 6853 struct bnx2x *bp = netdev_priv(netdev);
7015 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7016 int rc; 6854 int rc;
7017 6855
7018 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x," 6856 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7019 " value 0x%x\n", prtad, devad, addr, value); 6857 " value 0x%x\n", prtad, devad, addr, value);
7020 6858
7021 if (prtad != bp->mdio.prtad) {
7022 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7023 prtad, bp->mdio.prtad);
7024 return -EINVAL;
7025 }
7026
7027 /* The HW expects different devad if CL22 is used */ 6859 /* The HW expects different devad if CL22 is used */
7028 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad; 6860 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7029 6861
7030 bnx2x_acquire_phy_lock(bp); 6862 bnx2x_acquire_phy_lock(bp);
7031 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad, 6863 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
7032 devad, addr, value);
7033 bnx2x_release_phy_lock(bp); 6864 bnx2x_release_phy_lock(bp);
7034 return rc; 6865 return rc;
7035} 6866}
@@ -7259,7 +7090,7 @@ static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7259 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; 7090 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7260} 7091}
7261 7092
7262static int __devinit bnx2x_check_firmware(struct bnx2x *bp) 7093static int bnx2x_check_firmware(struct bnx2x *bp)
7263{ 7094{
7264 const struct firmware *firmware = bp->firmware; 7095 const struct firmware *firmware = bp->firmware;
7265 struct bnx2x_fw_file_hdr *fw_hdr; 7096 struct bnx2x_fw_file_hdr *fw_hdr;
@@ -7370,7 +7201,7 @@ do { \
7370 (u8 *)bp->arr, len); \ 7201 (u8 *)bp->arr, len); \
7371} while (0) 7202} while (0)
7372 7203
7373static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev) 7204int bnx2x_init_firmware(struct bnx2x *bp)
7374{ 7205{
7375 const char *fw_file_name; 7206 const char *fw_file_name;
7376 struct bnx2x_fw_file_hdr *fw_hdr; 7207 struct bnx2x_fw_file_hdr *fw_hdr;
@@ -7381,21 +7212,21 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
7381 else if (CHIP_IS_E1H(bp)) 7212 else if (CHIP_IS_E1H(bp))
7382 fw_file_name = FW_FILE_NAME_E1H; 7213 fw_file_name = FW_FILE_NAME_E1H;
7383 else { 7214 else {
7384 dev_err(dev, "Unsupported chip revision\n"); 7215 BNX2X_ERR("Unsupported chip revision\n");
7385 return -EINVAL; 7216 return -EINVAL;
7386 } 7217 }
7387 7218
7388 dev_info(dev, "Loading %s\n", fw_file_name); 7219 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
7389 7220
7390 rc = request_firmware(&bp->firmware, fw_file_name, dev); 7221 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
7391 if (rc) { 7222 if (rc) {
7392 dev_err(dev, "Can't load firmware file %s\n", fw_file_name); 7223 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
7393 goto request_firmware_exit; 7224 goto request_firmware_exit;
7394 } 7225 }
7395 7226
7396 rc = bnx2x_check_firmware(bp); 7227 rc = bnx2x_check_firmware(bp);
7397 if (rc) { 7228 if (rc) {
7398 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name); 7229 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
7399 goto request_firmware_exit; 7230 goto request_firmware_exit;
7400 } 7231 }
7401 7232
@@ -7473,13 +7304,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7473 if (rc) 7304 if (rc)
7474 goto init_one_exit; 7305 goto init_one_exit;
7475 7306
7476 /* Set init arrays */
7477 rc = bnx2x_init_firmware(bp, &pdev->dev);
7478 if (rc) {
7479 dev_err(&pdev->dev, "Error loading firmware\n");
7480 goto init_one_exit;
7481 }
7482
7483 rc = register_netdev(dev); 7307 rc = register_netdev(dev);
7484 if (rc) { 7308 if (rc) {
7485 dev_err(&pdev->dev, "Cannot register net device\n"); 7309 dev_err(&pdev->dev, "Cannot register net device\n");
@@ -7530,11 +7354,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7530 /* Make sure RESET task is not scheduled before continuing */ 7354 /* Make sure RESET task is not scheduled before continuing */
7531 cancel_delayed_work_sync(&bp->reset_task); 7355 cancel_delayed_work_sync(&bp->reset_task);
7532 7356
7533 kfree(bp->init_ops_offsets);
7534 kfree(bp->init_ops);
7535 kfree(bp->init_data);
7536 release_firmware(bp->firmware);
7537
7538 if (bp->regview) 7357 if (bp->regview)
7539 iounmap(bp->regview); 7358 iounmap(bp->regview);
7540 7359
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index a1f3bf0cd630..6be0d09ad3fd 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -4964,6 +4964,8 @@
4964#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001 4964#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN 0x0001
4965#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040 4965#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 0x0040
4966#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14 4966#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1 0x14
4967#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII 0x0001
4968#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK 0x0002
4967#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004 4969#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX 0x0004
4968#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018 4970#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK 0x0018
4969#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3 4971#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 3
@@ -5135,28 +5137,35 @@ Theotherbitsarereservedandshouldbezero*/
5135#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005 5137#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR 0x8005
5136#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007 5138#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF 0x8007
5137#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff 5139#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
5138#define MDIO_PMA_REG_8727_MISC_CTRL 0x8309
5139#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02 5140#define MDIO_PMA_REG_8727_TX_CTRL1 0xca02
5140#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05 5141#define MDIO_PMA_REG_8727_TX_CTRL2 0xca05
5141#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 5142#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
5142#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e 5143#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
5144#define MDIO_PMA_REG_8727_PCS_GP 0xc842
5145
5146#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
5143 5147
5144#define MDIO_PMA_REG_8073_CHIP_REV 0xc801 5148#define MDIO_PMA_REG_8073_CHIP_REV 0xc801
5145#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820 5149#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS 0xc820
5146#define MDIO_PMA_REG_8073_XAUI_WA 0xc841 5150#define MDIO_PMA_REG_8073_XAUI_WA 0xc841
5151#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL 0xcd08
5147 5152
5148#define MDIO_PMA_REG_7101_RESET 0xc000 5153#define MDIO_PMA_REG_7101_RESET 0xc000
5149#define MDIO_PMA_REG_7107_LED_CNTL 0xc007 5154#define MDIO_PMA_REG_7107_LED_CNTL 0xc007
5155#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
5150#define MDIO_PMA_REG_7101_VER1 0xc026 5156#define MDIO_PMA_REG_7101_VER1 0xc026
5151#define MDIO_PMA_REG_7101_VER2 0xc027 5157#define MDIO_PMA_REG_7101_VER2 0xc027
5152 5158
5153#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811 5159#define MDIO_PMA_REG_8481_PMD_SIGNAL 0xa811
5154#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c 5160#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
5155#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f 5161#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
5156#define MDIO_PMA_REG_8481_LED3_MASK 0xa832 5162#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
5157#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834 5163#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
5158#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835 5164#define MDIO_PMA_REG_8481_LED5_MASK 0xa838
5159#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b 5165#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
5166#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
5167#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK 0x800
5168#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
5160 5169
5161 5170
5162#define MDIO_WIS_DEVAD 0x2 5171#define MDIO_WIS_DEVAD 0x2
@@ -5188,6 +5197,8 @@ Theotherbitsarereservedandshouldbezero*/
5188#define MDIO_XS_8706_REG_BANK_RX3 0x80ec 5197#define MDIO_XS_8706_REG_BANK_RX3 0x80ec
5189#define MDIO_XS_8706_REG_BANK_RXA 0x80fc 5198#define MDIO_XS_8706_REG_BANK_RXA 0x80fc
5190 5199
5200#define MDIO_XS_REG_8073_RX_CTRL_PCIE 0x80FA
5201
5191#define MDIO_AN_DEVAD 0x7 5202#define MDIO_AN_DEVAD 0x7
5192/*ieee*/ 5203/*ieee*/
5193#define MDIO_AN_REG_CTRL 0x0000 5204#define MDIO_AN_REG_CTRL 0x0000
@@ -5210,14 +5221,40 @@ Theotherbitsarereservedandshouldbezero*/
5210#define MDIO_AN_REG_CL37_FC_LP 0xffe5 5221#define MDIO_AN_REG_CL37_FC_LP 0xffe5
5211 5222
5212#define MDIO_AN_REG_8073_2_5G 0x8329 5223#define MDIO_AN_REG_8073_2_5G 0x8329
5224#define MDIO_AN_REG_8073_BAM 0x8350
5213 5225
5226#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL 0x0020
5214#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0 5227#define MDIO_AN_REG_8481_LEGACY_MII_CTRL 0xffe0
5228#define MDIO_AN_REG_8481_LEGACY_MII_STATUS 0xffe1
5215#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4 5229#define MDIO_AN_REG_8481_LEGACY_AN_ADV 0xffe4
5230#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION 0xffe6
5216#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9 5231#define MDIO_AN_REG_8481_1000T_CTRL 0xffe9
5217#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5 5232#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW 0xfff5
5218#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7 5233#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS 0xfff7
5234#define MDIO_AN_REG_8481_AUX_CTRL 0xfff8
5219#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc 5235#define MDIO_AN_REG_8481_LEGACY_SHADOW 0xfffc
5220 5236
5237/* BCM84823 only */
5238#define MDIO_CTL_DEVAD 0x1e
5239#define MDIO_CTL_REG_84823_MEDIA 0x401a
5240#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK 0x0018
5241 /* These pins configure the BCM84823 interface to MAC after reset. */
5242#define MDIO_CTL_REG_84823_CTRL_MAC_XFI 0x0008
5243#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M 0x0010
5244 /* These pins configure the BCM84823 interface to Line after reset. */
5245#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK 0x0060
5246#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L 0x0020
5247#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI 0x0040
5248 /* When this pin is active high during reset, 10GBASE-T core is power
5249 * down, When it is active low the 10GBASE-T is power up
5250 */
5251#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN 0x0080
5252#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK 0x0100
5253#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
5254#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
5255#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
5256
5257
5221#define IGU_FUNC_BASE 0x0400 5258#define IGU_FUNC_BASE 0x0400
5222 5259
5223#define IGU_ADDR_MSIX 0x0000 5260#define IGU_ADDR_MSIX 0x0000
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index c74724461020..efa1403ebf82 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -969,6 +969,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
969{ 969{
970 struct bnx2x_eth_stats *estats = &bp->eth_stats; 970 struct bnx2x_eth_stats *estats = &bp->eth_stats;
971 struct net_device_stats *nstats = &bp->dev->stats; 971 struct net_device_stats *nstats = &bp->dev->stats;
972 unsigned long tmp;
972 int i; 973 int i;
973 974
974 nstats->rx_packets = 975 nstats->rx_packets =
@@ -985,10 +986,10 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
985 986
986 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 987 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
987 988
988 nstats->rx_dropped = estats->mac_discard; 989 tmp = estats->mac_discard;
989 for_each_queue(bp, i) 990 for_each_queue(bp, i)
990 nstats->rx_dropped += 991 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
991 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 992 nstats->rx_dropped = tmp;
992 993
993 nstats->tx_dropped = 0; 994 nstats->tx_dropped = 0;
994 995
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 0ddf4c66afe2..079b9d1eead5 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -252,7 +252,7 @@ static inline void __enable_port(struct port *port)
252 */ 252 */
253static inline int __port_is_enabled(struct port *port) 253static inline int __port_is_enabled(struct port *port)
254{ 254{
255 return(port->slave->state == BOND_STATE_ACTIVE); 255 return port->slave->state == BOND_STATE_ACTIVE;
256} 256}
257 257
258/** 258/**
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3b16f62d5606..fb70c3e12927 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4678,6 +4678,10 @@ static void bond_setup(struct net_device *bond_dev)
4678 NETIF_F_HW_VLAN_RX | 4678 NETIF_F_HW_VLAN_RX |
4679 NETIF_F_HW_VLAN_FILTER); 4679 NETIF_F_HW_VLAN_FILTER);
4680 4680
4681 /* By default, we enable GRO on bonding devices.
4682 * Actual support requires lowlevel drivers are GRO ready.
4683 */
4684 bond_dev->features |= NETIF_F_GRO;
4681} 4685}
4682 4686
4683static void bond_work_cancel_all(struct bonding *bond) 4687static void bond_work_cancel_all(struct bonding *bond)
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c
index 88edb986691a..6e99d80ec409 100644
--- a/drivers/net/bsd_comp.c
+++ b/drivers/net/bsd_comp.c
@@ -429,7 +429,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
429 if (!db->lens) 429 if (!db->lens)
430 { 430 {
431 bsd_free (db); 431 bsd_free (db);
432 return (NULL); 432 return NULL;
433 } 433 }
434 } 434 }
435/* 435/*
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index b1bdc909090f..312b9c8f4f3b 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -143,12 +143,12 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
143 np_clock = of_find_matching_node(NULL, mpc512x_clock_ids); 143 np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
144 if (!np_clock) { 144 if (!np_clock) {
145 dev_err(&ofdev->dev, "couldn't find clock node\n"); 145 dev_err(&ofdev->dev, "couldn't find clock node\n");
146 return -ENODEV; 146 return 0;
147 } 147 }
148 clockctl = of_iomap(np_clock, 0); 148 clockctl = of_iomap(np_clock, 0);
149 if (!clockctl) { 149 if (!clockctl) {
150 dev_err(&ofdev->dev, "couldn't map clock registers\n"); 150 dev_err(&ofdev->dev, "couldn't map clock registers\n");
151 return 0; 151 goto exit_put;
152 } 152 }
153 153
154 /* Determine the MSCAN device index from the physical address */ 154 /* Determine the MSCAN device index from the physical address */
@@ -233,9 +233,9 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
233 clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv); 233 clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
234 234
235exit_unmap: 235exit_unmap:
236 of_node_put(np_clock);
237 iounmap(clockctl); 236 iounmap(clockctl);
238 237exit_put:
238 of_node_put(np_clock);
239 return freq; 239 return freq;
240} 240}
241#else /* !CONFIG_PPC_MPC512x */ 241#else /* !CONFIG_PPC_MPC512x */
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 28c88eeec757..d6b6d6aa565a 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -419,7 +419,7 @@ static u16 cas_phy_read(struct cas *cp, int reg)
419 udelay(10); 419 udelay(10);
420 cmd = readl(cp->regs + REG_MIF_FRAME); 420 cmd = readl(cp->regs + REG_MIF_FRAME);
421 if (cmd & MIF_FRAME_TURN_AROUND_LSB) 421 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
422 return (cmd & MIF_FRAME_DATA_MASK); 422 return cmd & MIF_FRAME_DATA_MASK;
423 } 423 }
424 return 0xFFFF; /* -1 */ 424 return 0xFFFF; /* -1 */
425} 425}
@@ -804,7 +804,7 @@ static int cas_reset_mii_phy(struct cas *cp)
804 break; 804 break;
805 udelay(10); 805 udelay(10);
806 } 806 }
807 return (limit <= 0); 807 return limit <= 0;
808} 808}
809 809
810static int cas_saturn_firmware_init(struct cas *cp) 810static int cas_saturn_firmware_init(struct cas *cp)
@@ -2149,7 +2149,7 @@ end_copy_pkt:
2149 skb->csum = csum_unfold(~csum); 2149 skb->csum = csum_unfold(~csum);
2150 skb->ip_summed = CHECKSUM_COMPLETE; 2150 skb->ip_summed = CHECKSUM_COMPLETE;
2151 } else 2151 } else
2152 skb->ip_summed = CHECKSUM_NONE; 2152 skb_checksum_none_assert(skb);
2153 return len; 2153 return len;
2154} 2154}
2155 2155
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index f01cfdb995de..340b537dc97e 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1388,7 +1388,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1388 ++st->rx_cso_good; 1388 ++st->rx_cso_good;
1389 skb->ip_summed = CHECKSUM_UNNECESSARY; 1389 skb->ip_summed = CHECKSUM_UNNECESSARY;
1390 } else 1390 } else
1391 skb->ip_summed = CHECKSUM_NONE; 1391 skb_checksum_none_assert(skb);
1392 1392
1393 if (unlikely(adapter->vlan_grp && p->vlan_valid)) { 1393 if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
1394 st->vlan_xtract++; 1394 st->vlan_xtract++;
@@ -1551,7 +1551,7 @@ static inline int responses_pending(const struct adapter *adapter)
1551 const struct respQ *Q = &adapter->sge->respQ; 1551 const struct respQ *Q = &adapter->sge->respQ;
1552 const struct respQ_e *e = &Q->entries[Q->cidx]; 1552 const struct respQ_e *e = &Q->entries[Q->cidx];
1553 1553
1554 return (e->GenerationBit == Q->genbit); 1554 return e->GenerationBit == Q->genbit;
1555} 1555}
1556 1556
1557/* 1557/*
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 599d178df62d..63ebf76d2390 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -314,14 +314,12 @@ static int mi1_mdio_write(struct net_device *dev, int phy_addr, int mmd_addr,
314 return 0; 314 return 0;
315} 315}
316 316
317#if defined(CONFIG_CHELSIO_T1_1G)
318static const struct mdio_ops mi1_mdio_ops = { 317static const struct mdio_ops mi1_mdio_ops = {
319 .init = mi1_mdio_init, 318 .init = mi1_mdio_init,
320 .read = mi1_mdio_read, 319 .read = mi1_mdio_read,
321 .write = mi1_mdio_write, 320 .write = mi1_mdio_write,
322 .mode_support = MDIO_SUPPORTS_C22 321 .mode_support = MDIO_SUPPORTS_C22
323}; 322};
324#endif
325 323
326#endif 324#endif
327 325
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
index c844111cffeb..106a590f0d9a 100644
--- a/drivers/net/chelsio/vsc7326.c
+++ b/drivers/net/chelsio/vsc7326.c
@@ -255,7 +255,7 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
255 else if ((result & (1 << 8)) != 0x0) 255 else if ((result & (1 << 8)) != 0x0)
256 pr_err("bist read error: 0x%x\n", result); 256 pr_err("bist read error: 0x%x\n", result);
257 257
258 return (result & 0xff); 258 return result & 0xff;
259} 259}
260 260
261static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) 261static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 09610323a948..2ab6a7c4ffc1 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1022,7 +1022,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1022 if (blks > cp->ethdev->ctx_tbl_len) 1022 if (blks > cp->ethdev->ctx_tbl_len)
1023 return -ENOMEM; 1023 return -ENOMEM;
1024 1024
1025 cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL); 1025 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1026 if (cp->ctx_arr == NULL) 1026 if (cp->ctx_arr == NULL)
1027 return -ENOMEM; 1027 return -ENOMEM;
1028 1028
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index e1f6156b3710..fec939f8f65f 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -38,7 +38,7 @@
38#include <linux/platform_device.h> 38#include <linux/platform_device.h>
39#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
40#include <linux/clk.h> 40#include <linux/clk.h>
41#include <asm/gpio.h> 41#include <linux/gpio.h>
42#include <asm/atomic.h> 42#include <asm/atomic.h>
43 43
44MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 44MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
@@ -108,7 +108,7 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
108#define CPMAC_RX_INT_CLEAR 0x019c 108#define CPMAC_RX_INT_CLEAR 0x019c
109#define CPMAC_MAC_INT_ENABLE 0x01a8 109#define CPMAC_MAC_INT_ENABLE 0x01a8
110#define CPMAC_MAC_INT_CLEAR 0x01ac 110#define CPMAC_MAC_INT_CLEAR 0x01ac
111#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4) 111#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
112#define CPMAC_MAC_ADDR_MID 0x01d0 112#define CPMAC_MAC_ADDR_MID 0x01d0
113#define CPMAC_MAC_ADDR_HI 0x01d4 113#define CPMAC_MAC_ADDR_HI 0x01d4
114#define CPMAC_MAC_HASH_LO 0x01d8 114#define CPMAC_MAC_HASH_LO 0x01d8
@@ -227,7 +227,7 @@ static void cpmac_dump_regs(struct net_device *dev)
227 for (i = 0; i < CPMAC_REG_END; i += 4) { 227 for (i = 0; i < CPMAC_REG_END; i += 4) {
228 if (i % 16 == 0) { 228 if (i % 16 == 0) {
229 if (i) 229 if (i)
230 printk("\n"); 230 pr_cont("\n");
231 printk(KERN_DEBUG "%s: reg[%p]:", dev->name, 231 printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
232 priv->regs + i); 232 priv->regs + i);
233 } 233 }
@@ -262,7 +262,7 @@ static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
262 for (i = 0; i < skb->len; i++) { 262 for (i = 0; i < skb->len; i++) {
263 if (i % 16 == 0) { 263 if (i % 16 == 0) {
264 if (i) 264 if (i)
265 printk("\n"); 265 pr_cont("\n");
266 printk(KERN_DEBUG "%s: data[%p]:", dev->name, 266 printk(KERN_DEBUG "%s: data[%p]:", dev->name,
267 skb->data + i); 267 skb->data + i);
268 } 268 }
@@ -391,7 +391,7 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
391 if (likely(skb)) { 391 if (likely(skb)) {
392 skb_put(desc->skb, desc->datalen); 392 skb_put(desc->skb, desc->datalen);
393 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); 393 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
394 desc->skb->ip_summed = CHECKSUM_NONE; 394 skb_checksum_none_assert(desc->skb);
395 priv->dev->stats.rx_packets++; 395 priv->dev->stats.rx_packets++;
396 priv->dev->stats.rx_bytes += desc->datalen; 396 priv->dev->stats.rx_bytes += desc->datalen;
397 result = desc->skb; 397 result = desc->skb;
@@ -506,7 +506,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
506 "restart rx from a descriptor that's " 506 "restart rx from a descriptor that's "
507 "not free: %p\n", 507 "not free: %p\n",
508 priv->dev->name, restart); 508 priv->dev->name, restart);
509 goto fatal_error; 509 goto fatal_error;
510 } 510 }
511 511
512 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); 512 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
@@ -873,7 +873,8 @@ static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
873 return -EINVAL; 873 return -EINVAL;
874} 874}
875 875
876static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 876static void cpmac_get_ringparam(struct net_device *dev,
877 struct ethtool_ringparam *ring)
877{ 878{
878 struct cpmac_priv *priv = netdev_priv(dev); 879 struct cpmac_priv *priv = netdev_priv(dev);
879 880
@@ -888,7 +889,8 @@ static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam
888 ring->tx_pending = 1; 889 ring->tx_pending = 1;
889} 890}
890 891
891static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 892static int cpmac_set_ringparam(struct net_device *dev,
893 struct ethtool_ringparam *ring)
892{ 894{
893 struct cpmac_priv *priv = netdev_priv(dev); 895 struct cpmac_priv *priv = netdev_priv(dev);
894 896
@@ -1012,8 +1014,8 @@ static int cpmac_open(struct net_device *dev)
1012 1014
1013 priv->rx_head->prev->hw_next = (u32)0; 1015 priv->rx_head->prev->hw_next = (u32)0;
1014 1016
1015 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, 1017 res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
1016 dev->name, dev))) { 1018 if (res) {
1017 if (netif_msg_drv(priv)) 1019 if (netif_msg_drv(priv))
1018 printk(KERN_ERR "%s: failed to obtain irq\n", 1020 printk(KERN_ERR "%s: failed to obtain irq\n",
1019 dev->name); 1021 dev->name);
@@ -1133,7 +1135,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1133 } 1135 }
1134 1136
1135 if (phy_id == PHY_MAX_ADDR) { 1137 if (phy_id == PHY_MAX_ADDR) {
1136 dev_err(&pdev->dev, "no PHY present, falling back to switch on MDIO bus 0\n"); 1138 dev_err(&pdev->dev, "no PHY present, falling back "
1139 "to switch on MDIO bus 0\n");
1137 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1140 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
1138 phy_id = pdev->id; 1141 phy_id = pdev->id;
1139 } 1142 }
@@ -1169,9 +1172,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1169 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1172 priv->msg_enable = netif_msg_init(debug_level, 0xff);
1170 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr)); 1173 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
1171 1174
1172 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 1175 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
1176 mdio_bus_id, phy_id);
1173 1177
1174 priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, 1178 priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
1175 PHY_INTERFACE_MODE_MII); 1179 PHY_INTERFACE_MODE_MII);
1176 1180
1177 if (IS_ERR(priv->phy)) { 1181 if (IS_ERR(priv->phy)) {
@@ -1182,7 +1186,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1182 goto fail; 1186 goto fail;
1183 } 1187 }
1184 1188
1185 if ((rc = register_netdev(dev))) { 1189 rc = register_netdev(dev);
1190 if (rc) {
1186 printk(KERN_ERR "cpmac: error %i registering device %s\n", rc, 1191 printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
1187 dev->name); 1192 dev->name);
1188 goto fail; 1193 goto fail;
@@ -1248,11 +1253,13 @@ int __devinit cpmac_init(void)
1248 1253
1249 cpmac_mii->reset(cpmac_mii); 1254 cpmac_mii->reset(cpmac_mii);
1250 1255
1251 for (i = 0; i < 300; i++) 1256 for (i = 0; i < 300; i++) {
1252 if ((mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE))) 1257 mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
1258 if (mask)
1253 break; 1259 break;
1254 else 1260 else
1255 msleep(10); 1261 msleep(10);
1262 }
1256 1263
1257 mask &= 0x7fffffff; 1264 mask &= 0x7fffffff;
1258 if (mask & (mask - 1)) { 1265 if (mask & (mask - 1)) {
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index f208712c0b90..f9eede0a4b86 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1286,7 +1286,7 @@ irq_err:
1286/* 1286/*
1287 * Release resources when all the ports and offloading have been stopped. 1287 * Release resources when all the ports and offloading have been stopped.
1288 */ 1288 */
1289static void cxgb_down(struct adapter *adapter) 1289static void cxgb_down(struct adapter *adapter, int on_wq)
1290{ 1290{
1291 t3_sge_stop(adapter); 1291 t3_sge_stop(adapter);
1292 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */ 1292 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
@@ -1296,7 +1296,8 @@ static void cxgb_down(struct adapter *adapter)
1296 free_irq_resources(adapter); 1296 free_irq_resources(adapter);
1297 quiesce_rx(adapter); 1297 quiesce_rx(adapter);
1298 t3_sge_stop(adapter); 1298 t3_sge_stop(adapter);
1299 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */ 1299 if (!on_wq)
1300 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1300} 1301}
1301 1302
1302static void schedule_chk_task(struct adapter *adap) 1303static void schedule_chk_task(struct adapter *adap)
@@ -1374,7 +1375,7 @@ static int offload_close(struct t3cdev *tdev)
1374 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); 1375 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1375 1376
1376 if (!adapter->open_device_map) 1377 if (!adapter->open_device_map)
1377 cxgb_down(adapter); 1378 cxgb_down(adapter, 0);
1378 1379
1379 cxgb3_offload_deactivate(adapter); 1380 cxgb3_offload_deactivate(adapter);
1380 return 0; 1381 return 0;
@@ -1409,7 +1410,7 @@ static int cxgb_open(struct net_device *dev)
1409 return 0; 1410 return 0;
1410} 1411}
1411 1412
1412static int cxgb_close(struct net_device *dev) 1413static int __cxgb_close(struct net_device *dev, int on_wq)
1413{ 1414{
1414 struct port_info *pi = netdev_priv(dev); 1415 struct port_info *pi = netdev_priv(dev);
1415 struct adapter *adapter = pi->adapter; 1416 struct adapter *adapter = pi->adapter;
@@ -1436,12 +1437,17 @@ static int cxgb_close(struct net_device *dev)
1436 cancel_delayed_work_sync(&adapter->adap_check_task); 1437 cancel_delayed_work_sync(&adapter->adap_check_task);
1437 1438
1438 if (!adapter->open_device_map) 1439 if (!adapter->open_device_map)
1439 cxgb_down(adapter); 1440 cxgb_down(adapter, on_wq);
1440 1441
1441 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id); 1442 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1442 return 0; 1443 return 0;
1443} 1444}
1444 1445
1446static int cxgb_close(struct net_device *dev)
1447{
1448 return __cxgb_close(dev, 0);
1449}
1450
1445static struct net_device_stats *cxgb_get_stats(struct net_device *dev) 1451static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1446{ 1452{
1447 struct port_info *pi = netdev_priv(dev); 1453 struct port_info *pi = netdev_priv(dev);
@@ -2864,7 +2870,7 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2864 spin_unlock(&adapter->work_lock); 2870 spin_unlock(&adapter->work_lock);
2865} 2871}
2866 2872
2867static int t3_adapter_error(struct adapter *adapter, int reset) 2873static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2868{ 2874{
2869 int i, ret = 0; 2875 int i, ret = 0;
2870 2876
@@ -2879,7 +2885,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
2879 struct net_device *netdev = adapter->port[i]; 2885 struct net_device *netdev = adapter->port[i];
2880 2886
2881 if (netif_running(netdev)) 2887 if (netif_running(netdev))
2882 cxgb_close(netdev); 2888 __cxgb_close(netdev, on_wq);
2883 } 2889 }
2884 2890
2885 /* Stop SGE timers */ 2891 /* Stop SGE timers */
@@ -2950,7 +2956,7 @@ static void fatal_error_task(struct work_struct *work)
2950 int err = 0; 2956 int err = 0;
2951 2957
2952 rtnl_lock(); 2958 rtnl_lock();
2953 err = t3_adapter_error(adapter, 1); 2959 err = t3_adapter_error(adapter, 1, 1);
2954 if (!err) 2960 if (!err)
2955 err = t3_reenable_adapter(adapter); 2961 err = t3_reenable_adapter(adapter);
2956 if (!err) 2962 if (!err)
@@ -3000,7 +3006,7 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3000 if (state == pci_channel_io_perm_failure) 3006 if (state == pci_channel_io_perm_failure)
3001 return PCI_ERS_RESULT_DISCONNECT; 3007 return PCI_ERS_RESULT_DISCONNECT;
3002 3008
3003 ret = t3_adapter_error(adapter, 0); 3009 ret = t3_adapter_error(adapter, 0, 0);
3004 3010
3005 /* Request a slot reset. */ 3011 /* Request a slot reset. */
3006 return PCI_ERS_RESULT_NEED_RESET; 3012 return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index c6485b39eb0e..21db7491f613 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -64,7 +64,7 @@ static inline int offload_activated(struct t3cdev *tdev)
64{ 64{
65 const struct adapter *adapter = tdev2adap(tdev); 65 const struct adapter *adapter = tdev2adap(tdev);
66 66
67 return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)); 67 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
68} 68}
69 69
70/** 70/**
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index cb42353c9fdd..6990f6c65221 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1997,6 +1997,10 @@
1997 1997
1998#define A_PL_RST 0x6f0 1998#define A_PL_RST 0x6f0
1999 1999
2000#define S_FATALPERREN 4
2001#define V_FATALPERREN(x) ((x) << S_FATALPERREN)
2002#define F_FATALPERREN V_FATALPERREN(1U)
2003
2000#define S_CRSTWRM 1 2004#define S_CRSTWRM 1
2001#define V_CRSTWRM(x) ((x) << S_CRSTWRM) 2005#define V_CRSTWRM(x) ((x) << S_CRSTWRM)
2002#define F_CRSTWRM V_CRSTWRM(1U) 2006#define F_CRSTWRM V_CRSTWRM(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 8ff96c6f6de5..c5a142bea5e9 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2022,7 +2022,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2022 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 2022 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2023 skb->ip_summed = CHECKSUM_UNNECESSARY; 2023 skb->ip_summed = CHECKSUM_UNNECESSARY;
2024 } else 2024 } else
2025 skb->ip_summed = CHECKSUM_NONE; 2025 skb_checksum_none_assert(skb);
2026 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2026 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2027 2027
2028 if (unlikely(p->vlan_valid)) { 2028 if (unlikely(p->vlan_valid)) {
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 427c451be1a7..421d5589cecd 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1408,6 +1408,7 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1408 fatal++; 1408 fatal++;
1409 CH_ALERT(adapter, "%s (0x%x)\n", 1409 CH_ALERT(adapter, "%s (0x%x)\n",
1410 acts->msg, status & acts->mask); 1410 acts->msg, status & acts->mask);
1411 status &= ~acts->mask;
1411 } else if (acts->msg) 1412 } else if (acts->msg)
1412 CH_WARN(adapter, "%s (0x%x)\n", 1413 CH_WARN(adapter, "%s (0x%x)\n",
1413 acts->msg, status & acts->mask); 1414 acts->msg, status & acts->mask);
@@ -1843,11 +1844,10 @@ static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1843 t3_os_link_fault_handler(adap, idx); 1844 t3_os_link_fault_handler(adap, idx);
1844 } 1845 }
1845 1846
1846 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1847
1848 if (cause & XGM_INTR_FATAL) 1847 if (cause & XGM_INTR_FATAL)
1849 t3_fatal_err(adap); 1848 t3_fatal_err(adap);
1850 1849
1850 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1851 return cause != 0; 1851 return cause != 0;
1852} 1852}
1853 1853
@@ -3569,6 +3569,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
3569 t3_write_reg(adapter, A_PM1_TX_MODE, 0); 3569 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3570 chan_init_hw(adapter, adapter->params.chan_map); 3570 chan_init_hw(adapter, adapter->params.chan_map);
3571 t3_sge_init(adapter, &adapter->params.sge); 3571 t3_sge_init(adapter, &adapter->params.sge);
3572 t3_set_reg_field(adapter, A_PL_RST, 0, F_FATALPERREN);
3572 3573
3573 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter)); 3574 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3574 3575
@@ -3682,7 +3683,7 @@ static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3682 mc7->name = name; 3683 mc7->name = name;
3683 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR; 3684 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3684 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); 3685 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3685 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg); 3686 mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3686 mc7->width = G_WIDTH(cfg); 3687 mc7->width = G_WIDTH(cfg);
3687} 3688}
3688 3689
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 6e562c0dad7d..3ece9f5069fa 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -463,6 +463,8 @@ struct sge {
463 u8 counter_val[SGE_NCOUNTERS]; 463 u8 counter_val[SGE_NCOUNTERS];
464 unsigned int starve_thres; 464 unsigned int starve_thres;
465 u8 idma_state[2]; 465 u8 idma_state[2];
466 unsigned int egr_start;
467 unsigned int ingr_start;
466 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ 468 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
467 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ 469 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
468 DECLARE_BITMAP(starving_fl, MAX_EGRQ); 470 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index c327527fbbc8..75b9401fd484 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -175,16 +175,26 @@ enum {
175 175
176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { 176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
177 CH_DEVICE(0xa000, 0), /* PE10K */ 177 CH_DEVICE(0xa000, 0), /* PE10K */
178 CH_DEVICE(0x4001, 0), 178 CH_DEVICE(0x4001, -1),
179 CH_DEVICE(0x4002, 0), 179 CH_DEVICE(0x4002, -1),
180 CH_DEVICE(0x4003, 0), 180 CH_DEVICE(0x4003, -1),
181 CH_DEVICE(0x4004, 0), 181 CH_DEVICE(0x4004, -1),
182 CH_DEVICE(0x4005, 0), 182 CH_DEVICE(0x4005, -1),
183 CH_DEVICE(0x4006, 0), 183 CH_DEVICE(0x4006, -1),
184 CH_DEVICE(0x4007, 0), 184 CH_DEVICE(0x4007, -1),
185 CH_DEVICE(0x4008, 0), 185 CH_DEVICE(0x4008, -1),
186 CH_DEVICE(0x4009, 0), 186 CH_DEVICE(0x4009, -1),
187 CH_DEVICE(0x400a, 0), 187 CH_DEVICE(0x400a, -1),
188 CH_DEVICE(0x4401, 4),
189 CH_DEVICE(0x4402, 4),
190 CH_DEVICE(0x4403, 4),
191 CH_DEVICE(0x4404, 4),
192 CH_DEVICE(0x4405, 4),
193 CH_DEVICE(0x4406, 4),
194 CH_DEVICE(0x4407, 4),
195 CH_DEVICE(0x4408, 4),
196 CH_DEVICE(0x4409, 4),
197 CH_DEVICE(0x440a, 4),
188 { 0, } 198 { 0, }
189}; 199};
190 200
@@ -423,10 +433,11 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
423 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 433 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
424 const struct cpl_sge_egr_update *p = (void *)rsp; 434 const struct cpl_sge_egr_update *p = (void *)rsp;
425 unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); 435 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
426 struct sge_txq *txq = q->adap->sge.egr_map[qid]; 436 struct sge_txq *txq;
427 437
438 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
428 txq->restarts++; 439 txq->restarts++;
429 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) { 440 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
430 struct sge_eth_txq *eq; 441 struct sge_eth_txq *eq;
431 442
432 eq = container_of(txq, struct sge_eth_txq, q); 443 eq = container_of(txq, struct sge_eth_txq, q);
@@ -658,6 +669,15 @@ static int setup_rss(struct adapter *adap)
658} 669}
659 670
660/* 671/*
672 * Return the channel of the ingress queue with the given qid.
673 */
674static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
675{
676 qid -= p->ingr_start;
677 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
678}
679
680/*
661 * Wait until all NAPI handlers are descheduled. 681 * Wait until all NAPI handlers are descheduled.
662 */ 682 */
663static void quiesce_rx(struct adapter *adap) 683static void quiesce_rx(struct adapter *adap)
@@ -1671,27 +1691,41 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1671 return 0; 1691 return 0;
1672} 1692}
1673 1693
1674/* 1694/**
1675 * Translate a physical EEPROM address to virtual. The first 1K is accessed 1695 * eeprom_ptov - translate a physical EEPROM address to virtual
1676 * through virtual addresses starting at 31K, the rest is accessed through 1696 * @phys_addr: the physical EEPROM address
1677 * virtual addresses starting at 0. This mapping is correct only for PF0. 1697 * @fn: the PCI function number
1698 * @sz: size of function-specific area
1699 *
1700 * Translate a physical EEPROM address to virtual. The first 1K is
1701 * accessed through virtual addresses starting at 31K, the rest is
1702 * accessed through virtual addresses starting at 0.
1703 *
1704 * The mapping is as follows:
1705 * [0..1K) -> [31K..32K)
1706 * [1K..1K+A) -> [31K-A..31K)
1707 * [1K+A..ES) -> [0..ES-A-1K)
1708 *
1709 * where A = @fn * @sz, and ES = EEPROM size.
1678 */ 1710 */
1679static int eeprom_ptov(unsigned int phys_addr) 1711static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1680{ 1712{
1713 fn *= sz;
1681 if (phys_addr < 1024) 1714 if (phys_addr < 1024)
1682 return phys_addr + (31 << 10); 1715 return phys_addr + (31 << 10);
1716 if (phys_addr < 1024 + fn)
1717 return 31744 - fn + phys_addr - 1024;
1683 if (phys_addr < EEPROMSIZE) 1718 if (phys_addr < EEPROMSIZE)
1684 return phys_addr - 1024; 1719 return phys_addr - 1024 - fn;
1685 return -EINVAL; 1720 return -EINVAL;
1686} 1721}
1687 1722
1688/* 1723/*
1689 * The next two routines implement eeprom read/write from physical addresses. 1724 * The next two routines implement eeprom read/write from physical addresses.
1690 * The physical->virtual translation is correct only for PF0.
1691 */ 1725 */
1692static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) 1726static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1693{ 1727{
1694 int vaddr = eeprom_ptov(phys_addr); 1728 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1695 1729
1696 if (vaddr >= 0) 1730 if (vaddr >= 0)
1697 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); 1731 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -1700,7 +1734,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1700 1734
1701static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) 1735static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1702{ 1736{
1703 int vaddr = eeprom_ptov(phys_addr); 1737 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1704 1738
1705 if (vaddr >= 0) 1739 if (vaddr >= 0)
1706 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); 1740 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -1743,6 +1777,14 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1743 aligned_offset = eeprom->offset & ~3; 1777 aligned_offset = eeprom->offset & ~3;
1744 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; 1778 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1745 1779
1780 if (adapter->fn > 0) {
1781 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1782
1783 if (aligned_offset < start ||
1784 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1785 return -EPERM;
1786 }
1787
1746 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { 1788 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1747 /* 1789 /*
1748 * RMW possibly needed for first or last words. 1790 * RMW possibly needed for first or last words.
@@ -2304,7 +2346,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2304 req->peer_port = htons(0); 2346 req->peer_port = htons(0);
2305 req->local_ip = sip; 2347 req->local_ip = sip;
2306 req->peer_ip = htonl(0); 2348 req->peer_ip = htonl(0);
2307 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; 2349 chan = rxq_to_chan(&adap->sge, queue);
2308 req->opt0 = cpu_to_be64(TX_CHAN(chan)); 2350 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2309 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 2351 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2310 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 2352 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
@@ -2346,7 +2388,7 @@ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2346 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); 2388 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2347 req->peer_ip_hi = cpu_to_be64(0); 2389 req->peer_ip_hi = cpu_to_be64(0);
2348 req->peer_ip_lo = cpu_to_be64(0); 2390 req->peer_ip_lo = cpu_to_be64(0);
2349 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; 2391 chan = rxq_to_chan(&adap->sge, queue);
2350 req->opt0 = cpu_to_be64(TX_CHAN(chan)); 2392 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2351 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 2393 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2352 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 2394 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
@@ -3061,12 +3103,16 @@ static int adap_init0(struct adapter *adap)
3061 params[2] = FW_PARAM_PFVF(L2T_END); 3103 params[2] = FW_PARAM_PFVF(L2T_END);
3062 params[3] = FW_PARAM_PFVF(FILTER_START); 3104 params[3] = FW_PARAM_PFVF(FILTER_START);
3063 params[4] = FW_PARAM_PFVF(FILTER_END); 3105 params[4] = FW_PARAM_PFVF(FILTER_END);
3064 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 5, params, val); 3106 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3107 params[6] = FW_PARAM_PFVF(EQ_START);
3108 ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
3065 if (ret < 0) 3109 if (ret < 0)
3066 goto bye; 3110 goto bye;
3067 port_vec = val[0]; 3111 port_vec = val[0];
3068 adap->tids.ftid_base = val[3]; 3112 adap->tids.ftid_base = val[3];
3069 adap->tids.nftids = val[4] - val[3] + 1; 3113 adap->tids.nftids = val[4] - val[3] + 1;
3114 adap->sge.ingr_start = val[5];
3115 adap->sge.egr_start = val[6];
3070 3116
3071 if (c.ofldcaps) { 3117 if (c.ofldcaps) {
3072 /* query offload-related parameters */ 3118 /* query offload-related parameters */
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index bf38cfc57565..9967f3debce7 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -557,7 +557,8 @@ out: cred = q->avail - cred;
557 557
558 if (unlikely(fl_starving(q))) { 558 if (unlikely(fl_starving(q))) {
559 smp_wmb(); 559 smp_wmb();
560 set_bit(q->cntxt_id, adap->sge.starving_fl); 560 set_bit(q->cntxt_id - adap->sge.egr_start,
561 adap->sge.starving_fl);
561 } 562 }
562 563
563 return cred; 564 return cred;
@@ -974,7 +975,7 @@ out_free: dev_kfree_skb(skb);
974 } 975 }
975 976
976 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | 977 cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
977 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0)); 978 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
978 cpl->pack = htons(0); 979 cpl->pack = htons(0);
979 cpl->len = htons(skb->len); 980 cpl->len = htons(skb->len);
980 cpl->ctrl1 = cpu_to_be64(cntrl); 981 cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1213,7 +1214,8 @@ static void txq_stop_maperr(struct sge_ofld_txq *q)
1213{ 1214{
1214 q->mapping_err++; 1215 q->mapping_err++;
1215 q->q.stops++; 1216 q->q.stops++;
1216 set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr); 1217 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1218 q->adap->sge.txq_maperr);
1217} 1219}
1218 1220
1219/** 1221/**
@@ -1603,7 +1605,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1603 rxq->stats.rx_cso++; 1605 rxq->stats.rx_cso++;
1604 } 1606 }
1605 } else 1607 } else
1606 skb->ip_summed = CHECKSUM_NONE; 1608 skb_checksum_none_assert(skb);
1607 1609
1608 if (unlikely(pkt->vlan_ex)) { 1610 if (unlikely(pkt->vlan_ex)) {
1609 struct vlan_group *grp = pi->vlan_grp; 1611 struct vlan_group *grp = pi->vlan_grp;
@@ -1835,6 +1837,7 @@ static unsigned int process_intrq(struct adapter *adap)
1835 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { 1837 if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
1836 unsigned int qid = ntohl(rc->pldbuflen_qid); 1838 unsigned int qid = ntohl(rc->pldbuflen_qid);
1837 1839
1840 qid -= adap->sge.ingr_start;
1838 napi_schedule(&adap->sge.ingr_map[qid]->napi); 1841 napi_schedule(&adap->sge.ingr_map[qid]->napi);
1839 } 1842 }
1840 1843
@@ -2050,14 +2053,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2050 /* set offset to -1 to distinguish ingress queues without FL */ 2053 /* set offset to -1 to distinguish ingress queues without FL */
2051 iq->offset = fl ? 0 : -1; 2054 iq->offset = fl ? 0 : -1;
2052 2055
2053 adap->sge.ingr_map[iq->cntxt_id] = iq; 2056 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2054 2057
2055 if (fl) { 2058 if (fl) {
2056 fl->cntxt_id = ntohs(c.fl0id); 2059 fl->cntxt_id = ntohs(c.fl0id);
2057 fl->avail = fl->pend_cred = 0; 2060 fl->avail = fl->pend_cred = 0;
2058 fl->pidx = fl->cidx = 0; 2061 fl->pidx = fl->cidx = 0;
2059 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 2062 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2060 adap->sge.egr_map[fl->cntxt_id] = fl; 2063 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2061 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); 2064 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2062 } 2065 }
2063 return 0; 2066 return 0;
@@ -2087,7 +2090,7 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2087 q->stops = q->restarts = 0; 2090 q->stops = q->restarts = 0;
2088 q->stat = (void *)&q->desc[q->size]; 2091 q->stat = (void *)&q->desc[q->size];
2089 q->cntxt_id = id; 2092 q->cntxt_id = id;
2090 adap->sge.egr_map[id] = q; 2093 adap->sge.egr_map[id - adap->sge.egr_start] = q;
2091} 2094}
2092 2095
2093int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 2096int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
@@ -2259,7 +2262,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2259{ 2262{
2260 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 2263 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2261 2264
2262 adap->sge.ingr_map[rq->cntxt_id] = NULL; 2265 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2263 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, 2266 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2264 rq->cntxt_id, fl_id, 0xffff); 2267 rq->cntxt_id, fl_id, 0xffff);
2265 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 2268 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
index 10a055565776..c26b455f37de 100644
--- a/drivers/net/cxgb4/t4_hw.h
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -42,6 +42,7 @@ enum {
42 MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ 42 MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
43 EEPROMSIZE = 17408, /* Serial EEPROM physical size */ 43 EEPROMSIZE = 17408, /* Serial EEPROM physical size */
44 EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ 44 EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
45 EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
45 RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ 46 RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
46 TCB_SIZE = 128, /* TCB size */ 47 TCB_SIZE = 128, /* TCB size */
47 NMTUS = 16, /* size of MTU table */ 48 NMTUS = 16, /* size of MTU table */
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 0969f2fbc1b0..940584a8a640 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -487,6 +487,11 @@ enum fw_params_param_pfvf {
487 FW_PARAMS_PARAM_PFVF_CPMASK = 0x25, 487 FW_PARAMS_PARAM_PFVF_CPMASK = 0x25,
488 FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26, 488 FW_PARAMS_PARAM_PFVF_OCQ_START = 0x26,
489 FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27, 489 FW_PARAMS_PARAM_PFVF_OCQ_END = 0x27,
490 FW_PARAMS_PARAM_PFVF_CONM_MAP = 0x28,
491 FW_PARAMS_PARAM_PFVF_IQFLINT_START = 0x29,
492 FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
493 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
494 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
490}; 495};
491 496
492/* 497/*
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index eb5a1c9cb2d3..f10864ddafbe 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -1520,7 +1520,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1520 __skb_pull(skb, PKTSHIFT); 1520 __skb_pull(skb, PKTSHIFT);
1521 skb->protocol = eth_type_trans(skb, rspq->netdev); 1521 skb->protocol = eth_type_trans(skb, rspq->netdev);
1522 skb_record_rx_queue(skb, rspq->idx); 1522 skb_record_rx_queue(skb, rspq->idx);
1523 skb->dev->last_rx = jiffies; /* XXX removed 2.6.29 */
1524 pi = netdev_priv(skb->dev); 1523 pi = netdev_priv(skb->dev);
1525 rxq->stats.pkts++; 1524 rxq->stats.pkts++;
1526 1525
@@ -1535,7 +1534,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1535 } 1534 }
1536 rxq->stats.rx_cso++; 1535 rxq->stats.rx_cso++;
1537 } else 1536 } else
1538 skb->ip_summed = CHECKSUM_NONE; 1537 skb_checksum_none_assert(skb);
1539 1538
1540 if (unlikely(pkt->vlan_ex)) { 1539 if (unlikely(pkt->vlan_ex)) {
1541 struct vlan_group *grp = pi->vlan_grp; 1540 struct vlan_group *grp = pi->vlan_grp;
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index f3650fd096f4..1c51a7576119 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -676,7 +676,7 @@ static int de620_rx_intr(struct net_device *dev)
676 de620_set_register(dev, W_NPRF, next_rx_page); 676 de620_set_register(dev, W_NPRF, next_rx_page);
677 pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page); 677 pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page);
678 678
679 return (next_rx_page != curr_page); /* That was slightly tricky... */ 679 return next_rx_page != curr_page; /* That was slightly tricky... */
680} 680}
681 681
682/********************************************* 682/*********************************************
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index d7de376d7178..219eb5ad5c12 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -1255,7 +1255,7 @@ static int __devinit dec_lance_probe(struct device *bdev, const int type)
1255 */ 1255 */
1256 init_timer(&lp->multicast_timer); 1256 init_timer(&lp->multicast_timer);
1257 lp->multicast_timer.data = (unsigned long) dev; 1257 lp->multicast_timer.data = (unsigned long) dev;
1258 lp->multicast_timer.function = &lance_set_multicast_retry; 1258 lp->multicast_timer.function = lance_set_multicast_retry;
1259 1259
1260 ret = register_netdev(dev); 1260 ret = register_netdev(dev);
1261 if (ret) { 1261 if (ret) {
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index e5667c55844e..417e14385623 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -1024,7 +1024,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
1024 &data) != DFX_K_SUCCESS) { 1024 &data) != DFX_K_SUCCESS) {
1025 printk("%s: Could not read adapter factory MAC address!\n", 1025 printk("%s: Could not read adapter factory MAC address!\n",
1026 print_name); 1026 print_name);
1027 return(DFX_K_FAILURE); 1027 return DFX_K_FAILURE;
1028 } 1028 }
1029 le32 = cpu_to_le32(data); 1029 le32 = cpu_to_le32(data);
1030 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32)); 1030 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
@@ -1033,7 +1033,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
1033 &data) != DFX_K_SUCCESS) { 1033 &data) != DFX_K_SUCCESS) {
1034 printk("%s: Could not read adapter factory MAC address!\n", 1034 printk("%s: Could not read adapter factory MAC address!\n",
1035 print_name); 1035 print_name);
1036 return(DFX_K_FAILURE); 1036 return DFX_K_FAILURE;
1037 } 1037 }
1038 le32 = cpu_to_le32(data); 1038 le32 = cpu_to_le32(data);
1039 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16)); 1039 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
@@ -1075,7 +1075,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
1075 if (top_v == NULL) { 1075 if (top_v == NULL) {
1076 printk("%s: Could not allocate memory for host buffers " 1076 printk("%s: Could not allocate memory for host buffers "
1077 "and structures!\n", print_name); 1077 "and structures!\n", print_name);
1078 return(DFX_K_FAILURE); 1078 return DFX_K_FAILURE;
1079 } 1079 }
1080 memset(top_v, 0, alloc_size); /* zero out memory before continuing */ 1080 memset(top_v, 0, alloc_size); /* zero out memory before continuing */
1081 top_p = bp->kmalloced_dma; /* get physical address of buffer */ 1081 top_p = bp->kmalloced_dma; /* get physical address of buffer */
@@ -1145,7 +1145,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
1145 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n", 1145 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
1146 print_name, (long)bp->cons_block_virt, bp->cons_block_phys); 1146 print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
1147 1147
1148 return(DFX_K_SUCCESS); 1148 return DFX_K_SUCCESS;
1149} 1149}
1150 1150
1151 1151
@@ -1195,7 +1195,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1195 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS) 1195 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1196 { 1196 {
1197 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name); 1197 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1198 return(DFX_K_FAILURE); 1198 return DFX_K_FAILURE;
1199 } 1199 }
1200 1200
1201 /* 1201 /*
@@ -1229,7 +1229,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1229 NULL) != DFX_K_SUCCESS) 1229 NULL) != DFX_K_SUCCESS)
1230 { 1230 {
1231 printk("%s: Could not set adapter burst size!\n", bp->dev->name); 1231 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1232 return(DFX_K_FAILURE); 1232 return DFX_K_FAILURE;
1233 } 1233 }
1234 1234
1235 /* 1235 /*
@@ -1246,7 +1246,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1246 NULL) != DFX_K_SUCCESS) 1246 NULL) != DFX_K_SUCCESS)
1247 { 1247 {
1248 printk("%s: Could not set consumer block address!\n", bp->dev->name); 1248 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1249 return(DFX_K_FAILURE); 1249 return DFX_K_FAILURE;
1250 } 1250 }
1251 1251
1252 /* 1252 /*
@@ -1278,7 +1278,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1278 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 1278 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1279 { 1279 {
1280 printk("%s: DMA command request failed!\n", bp->dev->name); 1280 printk("%s: DMA command request failed!\n", bp->dev->name);
1281 return(DFX_K_FAILURE); 1281 return DFX_K_FAILURE;
1282 } 1282 }
1283 1283
1284 /* Set the initial values for eFDXEnable and MACTReq MIB objects */ 1284 /* Set the initial values for eFDXEnable and MACTReq MIB objects */
@@ -1294,7 +1294,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1294 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 1294 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1295 { 1295 {
1296 printk("%s: DMA command request failed!\n", bp->dev->name); 1296 printk("%s: DMA command request failed!\n", bp->dev->name);
1297 return(DFX_K_FAILURE); 1297 return DFX_K_FAILURE;
1298 } 1298 }
1299 1299
1300 /* Initialize adapter CAM */ 1300 /* Initialize adapter CAM */
@@ -1302,7 +1302,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1302 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) 1302 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1303 { 1303 {
1304 printk("%s: Adapter CAM update failed!\n", bp->dev->name); 1304 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1305 return(DFX_K_FAILURE); 1305 return DFX_K_FAILURE;
1306 } 1306 }
1307 1307
1308 /* Initialize adapter filters */ 1308 /* Initialize adapter filters */
@@ -1310,7 +1310,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1310 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) 1310 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1311 { 1311 {
1312 printk("%s: Adapter filters update failed!\n", bp->dev->name); 1312 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1313 return(DFX_K_FAILURE); 1313 return DFX_K_FAILURE;
1314 } 1314 }
1315 1315
1316 /* 1316 /*
@@ -1328,7 +1328,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1328 printk("%s: Receive buffer allocation failed\n", bp->dev->name); 1328 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1329 if (get_buffers) 1329 if (get_buffers)
1330 dfx_rcv_flush(bp); 1330 dfx_rcv_flush(bp);
1331 return(DFX_K_FAILURE); 1331 return DFX_K_FAILURE;
1332 } 1332 }
1333 1333
1334 /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */ 1334 /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
@@ -1339,13 +1339,13 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1339 printk("%s: Start command failed\n", bp->dev->name); 1339 printk("%s: Start command failed\n", bp->dev->name);
1340 if (get_buffers) 1340 if (get_buffers)
1341 dfx_rcv_flush(bp); 1341 dfx_rcv_flush(bp);
1342 return(DFX_K_FAILURE); 1342 return DFX_K_FAILURE;
1343 } 1343 }
1344 1344
1345 /* Initialization succeeded, reenable PDQ interrupts */ 1345 /* Initialization succeeded, reenable PDQ interrupts */
1346 1346
1347 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS); 1347 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1348 return(DFX_K_SUCCESS); 1348 return DFX_K_SUCCESS;
1349 } 1349 }
1350 1350
1351 1351
@@ -1434,7 +1434,7 @@ static int dfx_open(struct net_device *dev)
1434 1434
1435 /* Set device structure info */ 1435 /* Set device structure info */
1436 netif_start_queue(dev); 1436 netif_start_queue(dev);
1437 return(0); 1437 return 0;
1438} 1438}
1439 1439
1440 1440
@@ -1526,7 +1526,7 @@ static int dfx_close(struct net_device *dev)
1526 1526
1527 free_irq(dev->irq, dev); 1527 free_irq(dev->irq, dev);
1528 1528
1529 return(0); 1529 return 0;
1530} 1530}
1531 1531
1532 1532
@@ -2027,7 +2027,7 @@ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2027 2027
2028 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET; 2028 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2029 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2029 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2030 return((struct net_device_stats *) &bp->stats); 2030 return (struct net_device_stats *)&bp->stats;
2031 2031
2032 /* Fill the bp->stats structure with the SMT MIB object values */ 2032 /* Fill the bp->stats structure with the SMT MIB object values */
2033 2033
@@ -2128,7 +2128,7 @@ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2128 2128
2129 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET; 2129 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2130 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2130 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2131 return((struct net_device_stats *) &bp->stats); 2131 return (struct net_device_stats *)&bp->stats;
2132 2132
2133 /* Fill the bp->stats structure with the FDDI counter values */ 2133 /* Fill the bp->stats structure with the FDDI counter values */
2134 2134
@@ -2144,7 +2144,7 @@ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2144 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls; 2144 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2145 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; 2145 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2146 2146
2147 return((struct net_device_stats *) &bp->stats); 2147 return (struct net_device_stats *)&bp->stats;
2148 } 2148 }
2149 2149
2150 2150
@@ -2354,7 +2354,7 @@ static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2354 { 2354 {
2355 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name); 2355 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2356 } 2356 }
2357 return(0); /* always return zero */ 2357 return 0; /* always return zero */
2358 } 2358 }
2359 2359
2360 2360
@@ -2438,8 +2438,8 @@ static int dfx_ctl_update_cam(DFX_board_t *bp)
2438 /* Issue command to update adapter CAM, then return */ 2438 /* Issue command to update adapter CAM, then return */
2439 2439
2440 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2440 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2441 return(DFX_K_FAILURE); 2441 return DFX_K_FAILURE;
2442 return(DFX_K_SUCCESS); 2442 return DFX_K_SUCCESS;
2443 } 2443 }
2444 2444
2445 2445
@@ -2504,8 +2504,8 @@ static int dfx_ctl_update_filters(DFX_board_t *bp)
2504 /* Issue command to update adapter filters, then return */ 2504 /* Issue command to update adapter filters, then return */
2505 2505
2506 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2506 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2507 return(DFX_K_FAILURE); 2507 return DFX_K_FAILURE;
2508 return(DFX_K_SUCCESS); 2508 return DFX_K_SUCCESS;
2509 } 2509 }
2510 2510
2511 2511
@@ -2561,7 +2561,7 @@ static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2561 (status == PI_STATE_K_HALTED) || 2561 (status == PI_STATE_K_HALTED) ||
2562 (status == PI_STATE_K_DMA_UNAVAIL) || 2562 (status == PI_STATE_K_DMA_UNAVAIL) ||
2563 (status == PI_STATE_K_UPGRADE)) 2563 (status == PI_STATE_K_UPGRADE))
2564 return(DFX_K_OUTSTATE); 2564 return DFX_K_OUTSTATE;
2565 2565
2566 /* Put response buffer on the command response queue */ 2566 /* Put response buffer on the command response queue */
2567 2567
@@ -2599,7 +2599,7 @@ static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2599 udelay(100); /* wait for 100 microseconds */ 2599 udelay(100); /* wait for 100 microseconds */
2600 } 2600 }
2601 if (timeout_cnt == 0) 2601 if (timeout_cnt == 0)
2602 return(DFX_K_HW_TIMEOUT); 2602 return DFX_K_HW_TIMEOUT;
2603 2603
2604 /* Bump (and wrap) the completion index and write out to register */ 2604 /* Bump (and wrap) the completion index and write out to register */
2605 2605
@@ -2619,14 +2619,14 @@ static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2619 udelay(100); /* wait for 100 microseconds */ 2619 udelay(100); /* wait for 100 microseconds */
2620 } 2620 }
2621 if (timeout_cnt == 0) 2621 if (timeout_cnt == 0)
2622 return(DFX_K_HW_TIMEOUT); 2622 return DFX_K_HW_TIMEOUT;
2623 2623
2624 /* Bump (and wrap) the completion index and write out to register */ 2624 /* Bump (and wrap) the completion index and write out to register */
2625 2625
2626 bp->cmd_rsp_reg.index.comp += 1; 2626 bp->cmd_rsp_reg.index.comp += 1;
2627 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1; 2627 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2628 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); 2628 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2629 return(DFX_K_SUCCESS); 2629 return DFX_K_SUCCESS;
2630 } 2630 }
2631 2631
2632 2632
@@ -2700,7 +2700,7 @@ static int dfx_hw_port_ctrl_req(
2700 udelay(100); /* wait for 100 microseconds */ 2700 udelay(100); /* wait for 100 microseconds */
2701 } 2701 }
2702 if (timeout_cnt == 0) 2702 if (timeout_cnt == 0)
2703 return(DFX_K_HW_TIMEOUT); 2703 return DFX_K_HW_TIMEOUT;
2704 2704
2705 /* 2705 /*
2706 * If the address of host_data is non-zero, assume caller has supplied a 2706 * If the address of host_data is non-zero, assume caller has supplied a
@@ -2710,7 +2710,7 @@ static int dfx_hw_port_ctrl_req(
2710 2710
2711 if (host_data != NULL) 2711 if (host_data != NULL)
2712 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data); 2712 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2713 return(DFX_K_SUCCESS); 2713 return DFX_K_SUCCESS;
2714 } 2714 }
2715 2715
2716 2716
@@ -2800,7 +2800,7 @@ static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2800 PI_UINT32 port_status; /* Port Status register value */ 2800 PI_UINT32 port_status; /* Port Status register value */
2801 2801
2802 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); 2802 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2803 return((port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE); 2803 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2804 } 2804 }
2805 2805
2806 2806
@@ -2852,8 +2852,8 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2852 udelay(100); /* wait for 100 microseconds */ 2852 udelay(100); /* wait for 100 microseconds */
2853 } 2853 }
2854 if (timeout_cnt == 0) 2854 if (timeout_cnt == 0)
2855 return(DFX_K_HW_TIMEOUT); 2855 return DFX_K_HW_TIMEOUT;
2856 return(DFX_K_SUCCESS); 2856 return DFX_K_SUCCESS;
2857 } 2857 }
2858 2858
2859/* 2859/*
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index a2f238d20caa..e1a8216ff692 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -465,7 +465,7 @@ rio_open (struct net_device *dev)
465 init_timer (&np->timer); 465 init_timer (&np->timer);
466 np->timer.expires = jiffies + 1*HZ; 466 np->timer.expires = jiffies + 1*HZ;
467 np->timer.data = (unsigned long) dev; 467 np->timer.data = (unsigned long) dev;
468 np->timer.function = &rio_timer; 468 np->timer.function = rio_timer;
469 add_timer (&np->timer); 469 add_timer (&np->timer);
470 470
471 /* Start Tx/Rx */ 471 /* Start Tx/Rx */
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 4fd6b2b4554b..9f6aeefa06bf 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1056,7 +1056,7 @@ dm9000_rx(struct net_device *dev)
1056 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0) 1056 if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
1057 skb->ip_summed = CHECKSUM_UNNECESSARY; 1057 skb->ip_summed = CHECKSUM_UNNECESSARY;
1058 else 1058 else
1059 skb->ip_summed = CHECKSUM_NONE; 1059 skb_checksum_none_assert(skb);
1060 } 1060 }
1061 netif_rx(skb); 1061 netif_rx(skb);
1062 dev->stats.rx_packets++; 1062 dev->stats.rx_packets++;
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 8e2eab4e7c75..b0aa9e68990a 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2215,10 +2215,10 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2215static int e100_asf(struct nic *nic) 2215static int e100_asf(struct nic *nic)
2216{ 2216{
2217 /* ASF can be enabled from eeprom */ 2217 /* ASF can be enabled from eeprom */
2218 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && 2218 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2219 (nic->eeprom[eeprom_config_asf] & eeprom_asf) && 2219 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2220 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && 2220 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2221 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE)); 2221 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
2222} 2222}
2223 2223
2224static int e100_up(struct nic *nic) 2224static int e100_up(struct nic *nic)
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 99288b95aead..a881dd0093bd 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -310,6 +310,9 @@ struct e1000_adapter {
310 int need_ioport; 310 int need_ioport;
311 311
312 bool discarding; 312 bool discarding;
313
314 struct work_struct fifo_stall_task;
315 struct work_struct phy_info_task;
313}; 316};
314 317
315enum e1000_state_t { 318enum e1000_state_t {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 5cc39ed289c6..cb3f84b81793 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -123,8 +123,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
123 struct e1000_rx_ring *rx_ring); 123 struct e1000_rx_ring *rx_ring);
124static void e1000_set_rx_mode(struct net_device *netdev); 124static void e1000_set_rx_mode(struct net_device *netdev);
125static void e1000_update_phy_info(unsigned long data); 125static void e1000_update_phy_info(unsigned long data);
126static void e1000_update_phy_info_task(struct work_struct *work);
126static void e1000_watchdog(unsigned long data); 127static void e1000_watchdog(unsigned long data);
127static void e1000_82547_tx_fifo_stall(unsigned long data); 128static void e1000_82547_tx_fifo_stall(unsigned long data);
129static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
128static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 130static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
129 struct net_device *netdev); 131 struct net_device *netdev);
130static struct net_device_stats * e1000_get_stats(struct net_device *netdev); 132static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
@@ -519,8 +521,21 @@ void e1000_down(struct e1000_adapter *adapter)
519 e1000_clean_all_rx_rings(adapter); 521 e1000_clean_all_rx_rings(adapter);
520} 522}
521 523
524void e1000_reinit_safe(struct e1000_adapter *adapter)
525{
526 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
527 msleep(1);
528 rtnl_lock();
529 e1000_down(adapter);
530 e1000_up(adapter);
531 rtnl_unlock();
532 clear_bit(__E1000_RESETTING, &adapter->flags);
533}
534
522void e1000_reinit_locked(struct e1000_adapter *adapter) 535void e1000_reinit_locked(struct e1000_adapter *adapter)
523{ 536{
537 /* if rtnl_lock is not held the call path is bogus */
538 ASSERT_RTNL();
524 WARN_ON(in_interrupt()); 539 WARN_ON(in_interrupt());
525 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 540 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
526 msleep(1); 541 msleep(1);
@@ -790,6 +805,70 @@ static const struct net_device_ops e1000_netdev_ops = {
790}; 805};
791 806
792/** 807/**
808 * e1000_init_hw_struct - initialize members of hw struct
809 * @adapter: board private struct
810 * @hw: structure used by e1000_hw.c
811 *
812 * Factors out initialization of the e1000_hw struct to its own function
813 * that can be called very early at init (just after struct allocation).
814 * Fields are initialized based on PCI device information and
815 * OS network device settings (MTU size).
816 * Returns negative error codes if MAC type setup fails.
817 */
818static int e1000_init_hw_struct(struct e1000_adapter *adapter,
819 struct e1000_hw *hw)
820{
821 struct pci_dev *pdev = adapter->pdev;
822
823 /* PCI config space info */
824 hw->vendor_id = pdev->vendor;
825 hw->device_id = pdev->device;
826 hw->subsystem_vendor_id = pdev->subsystem_vendor;
827 hw->subsystem_id = pdev->subsystem_device;
828 hw->revision_id = pdev->revision;
829
830 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
831
832 hw->max_frame_size = adapter->netdev->mtu +
833 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
834 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
835
836 /* identify the MAC */
837 if (e1000_set_mac_type(hw)) {
838 e_err(probe, "Unknown MAC Type\n");
839 return -EIO;
840 }
841
842 switch (hw->mac_type) {
843 default:
844 break;
845 case e1000_82541:
846 case e1000_82547:
847 case e1000_82541_rev_2:
848 case e1000_82547_rev_2:
849 hw->phy_init_script = 1;
850 break;
851 }
852
853 e1000_set_media_type(hw);
854 e1000_get_bus_info(hw);
855
856 hw->wait_autoneg_complete = false;
857 hw->tbi_compatibility_en = true;
858 hw->adaptive_ifs = true;
859
860 /* Copper options */
861
862 if (hw->media_type == e1000_media_type_copper) {
863 hw->mdix = AUTO_ALL_MODES;
864 hw->disable_polarity_correction = false;
865 hw->master_slave = E1000_MASTER_SLAVE;
866 }
867
868 return 0;
869}
870
871/**
793 * e1000_probe - Device Initialization Routine 872 * e1000_probe - Device Initialization Routine
794 * @pdev: PCI device information struct 873 * @pdev: PCI device information struct
795 * @ent: entry in e1000_pci_tbl 874 * @ent: entry in e1000_pci_tbl
@@ -826,22 +905,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
826 if (err) 905 if (err)
827 return err; 906 return err;
828 907
829 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
830 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
831 pci_using_dac = 1;
832 } else {
833 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
834 if (err) {
835 err = dma_set_coherent_mask(&pdev->dev,
836 DMA_BIT_MASK(32));
837 if (err) {
838 pr_err("No usable DMA config, aborting\n");
839 goto err_dma;
840 }
841 }
842 pci_using_dac = 0;
843 }
844
845 err = pci_request_selected_regions(pdev, bars, e1000_driver_name); 908 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
846 if (err) 909 if (err)
847 goto err_pci_reg; 910 goto err_pci_reg;
@@ -885,6 +948,32 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
885 } 948 }
886 } 949 }
887 950
951 /* make ready for any if (hw->...) below */
952 err = e1000_init_hw_struct(adapter, hw);
953 if (err)
954 goto err_sw_init;
955
956 /*
957 * there is a workaround being applied below that limits
958 * 64-bit DMA addresses to 64-bit hardware. There are some
959 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
960 */
961 pci_using_dac = 0;
962 if ((hw->bus_type == e1000_bus_type_pcix) &&
963 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
964 /*
965 * according to DMA-API-HOWTO, coherent calls will always
966 * succeed if the set call did
967 */
968 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
969 pci_using_dac = 1;
970 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
971 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
972 } else {
973 pr_err("No usable DMA config, aborting\n");
974 goto err_dma;
975 }
976
888 netdev->netdev_ops = &e1000_netdev_ops; 977 netdev->netdev_ops = &e1000_netdev_ops;
889 e1000_set_ethtool_ops(netdev); 978 e1000_set_ethtool_ops(netdev);
890 netdev->watchdog_timeo = 5 * HZ; 979 netdev->watchdog_timeo = 5 * HZ;
@@ -914,8 +1003,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
914 (hw->mac_type != e1000_82547)) 1003 (hw->mac_type != e1000_82547))
915 netdev->features |= NETIF_F_TSO; 1004 netdev->features |= NETIF_F_TSO;
916 1005
917 if (pci_using_dac) 1006 if (pci_using_dac) {
918 netdev->features |= NETIF_F_HIGHDMA; 1007 netdev->features |= NETIF_F_HIGHDMA;
1008 netdev->vlan_features |= NETIF_F_HIGHDMA;
1009 }
919 1010
920 netdev->vlan_features |= NETIF_F_TSO; 1011 netdev->vlan_features |= NETIF_F_TSO;
921 netdev->vlan_features |= NETIF_F_HW_CSUM; 1012 netdev->vlan_features |= NETIF_F_HW_CSUM;
@@ -959,21 +1050,21 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
959 if (!is_valid_ether_addr(netdev->perm_addr)) 1050 if (!is_valid_ether_addr(netdev->perm_addr))
960 e_err(probe, "Invalid MAC Address\n"); 1051 e_err(probe, "Invalid MAC Address\n");
961 1052
962 e1000_get_bus_info(hw);
963
964 init_timer(&adapter->tx_fifo_stall_timer); 1053 init_timer(&adapter->tx_fifo_stall_timer);
965 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall; 1054 adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall;
966 adapter->tx_fifo_stall_timer.data = (unsigned long)adapter; 1055 adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
967 1056
968 init_timer(&adapter->watchdog_timer); 1057 init_timer(&adapter->watchdog_timer);
969 adapter->watchdog_timer.function = &e1000_watchdog; 1058 adapter->watchdog_timer.function = e1000_watchdog;
970 adapter->watchdog_timer.data = (unsigned long) adapter; 1059 adapter->watchdog_timer.data = (unsigned long) adapter;
971 1060
972 init_timer(&adapter->phy_info_timer); 1061 init_timer(&adapter->phy_info_timer);
973 adapter->phy_info_timer.function = &e1000_update_phy_info; 1062 adapter->phy_info_timer.function = e1000_update_phy_info;
974 adapter->phy_info_timer.data = (unsigned long)adapter; 1063 adapter->phy_info_timer.data = (unsigned long)adapter;
975 1064
1065 INIT_WORK(&adapter->fifo_stall_task, e1000_82547_tx_fifo_stall_task);
976 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1066 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1067 INIT_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
977 1068
978 e1000_check_options(adapter); 1069 e1000_check_options(adapter);
979 1070
@@ -1072,6 +1163,7 @@ err_eeprom:
1072 iounmap(hw->flash_address); 1163 iounmap(hw->flash_address);
1073 kfree(adapter->tx_ring); 1164 kfree(adapter->tx_ring);
1074 kfree(adapter->rx_ring); 1165 kfree(adapter->rx_ring);
1166err_dma:
1075err_sw_init: 1167err_sw_init:
1076 iounmap(hw->hw_addr); 1168 iounmap(hw->hw_addr);
1077err_ioremap: 1169err_ioremap:
@@ -1079,7 +1171,6 @@ err_ioremap:
1079err_alloc_etherdev: 1171err_alloc_etherdev:
1080 pci_release_selected_regions(pdev, bars); 1172 pci_release_selected_regions(pdev, bars);
1081err_pci_reg: 1173err_pci_reg:
1082err_dma:
1083 pci_disable_device(pdev); 1174 pci_disable_device(pdev);
1084 return err; 1175 return err;
1085} 1176}
@@ -1131,62 +1222,12 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
1131 * @adapter: board private structure to initialize 1222 * @adapter: board private structure to initialize
1132 * 1223 *
1133 * e1000_sw_init initializes the Adapter private data structure. 1224 * e1000_sw_init initializes the Adapter private data structure.
1134 * Fields are initialized based on PCI device information and 1225 * e1000_init_hw_struct MUST be called before this function
1135 * OS network device settings (MTU size).
1136 **/ 1226 **/
1137 1227
1138static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 1228static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1139{ 1229{
1140 struct e1000_hw *hw = &adapter->hw;
1141 struct net_device *netdev = adapter->netdev;
1142 struct pci_dev *pdev = adapter->pdev;
1143
1144 /* PCI config space info */
1145
1146 hw->vendor_id = pdev->vendor;
1147 hw->device_id = pdev->device;
1148 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1149 hw->subsystem_id = pdev->subsystem_device;
1150 hw->revision_id = pdev->revision;
1151
1152 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1153
1154 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1230 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1155 hw->max_frame_size = netdev->mtu +
1156 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1157 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
1158
1159 /* identify the MAC */
1160
1161 if (e1000_set_mac_type(hw)) {
1162 e_err(probe, "Unknown MAC Type\n");
1163 return -EIO;
1164 }
1165
1166 switch (hw->mac_type) {
1167 default:
1168 break;
1169 case e1000_82541:
1170 case e1000_82547:
1171 case e1000_82541_rev_2:
1172 case e1000_82547_rev_2:
1173 hw->phy_init_script = 1;
1174 break;
1175 }
1176
1177 e1000_set_media_type(hw);
1178
1179 hw->wait_autoneg_complete = false;
1180 hw->tbi_compatibility_en = true;
1181 hw->adaptive_ifs = true;
1182
1183 /* Copper options */
1184
1185 if (hw->media_type == e1000_media_type_copper) {
1186 hw->mdix = AUTO_ALL_MODES;
1187 hw->disable_polarity_correction = false;
1188 hw->master_slave = E1000_MASTER_SLAVE;
1189 }
1190 1231
1191 adapter->num_tx_queues = 1; 1232 adapter->num_tx_queues = 1;
1192 adapter->num_rx_queues = 1; 1233 adapter->num_rx_queues = 1;
@@ -2210,22 +2251,45 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2210static void e1000_update_phy_info(unsigned long data) 2251static void e1000_update_phy_info(unsigned long data)
2211{ 2252{
2212 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 2253 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2254 schedule_work(&adapter->phy_info_task);
2255}
2256
2257static void e1000_update_phy_info_task(struct work_struct *work)
2258{
2259 struct e1000_adapter *adapter = container_of(work,
2260 struct e1000_adapter,
2261 phy_info_task);
2213 struct e1000_hw *hw = &adapter->hw; 2262 struct e1000_hw *hw = &adapter->hw;
2263
2264 rtnl_lock();
2214 e1000_phy_get_info(hw, &adapter->phy_info); 2265 e1000_phy_get_info(hw, &adapter->phy_info);
2266 rtnl_unlock();
2215} 2267}
2216 2268
2217/** 2269/**
2218 * e1000_82547_tx_fifo_stall - Timer Call-back 2270 * e1000_82547_tx_fifo_stall - Timer Call-back
2219 * @data: pointer to adapter cast into an unsigned long 2271 * @data: pointer to adapter cast into an unsigned long
2220 **/ 2272 **/
2221
2222static void e1000_82547_tx_fifo_stall(unsigned long data) 2273static void e1000_82547_tx_fifo_stall(unsigned long data)
2223{ 2274{
2224 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 2275 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2276 schedule_work(&adapter->fifo_stall_task);
2277}
2278
2279/**
2280 * e1000_82547_tx_fifo_stall_task - task to complete work
2281 * @work: work struct contained inside adapter struct
2282 **/
2283static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2284{
2285 struct e1000_adapter *adapter = container_of(work,
2286 struct e1000_adapter,
2287 fifo_stall_task);
2225 struct e1000_hw *hw = &adapter->hw; 2288 struct e1000_hw *hw = &adapter->hw;
2226 struct net_device *netdev = adapter->netdev; 2289 struct net_device *netdev = adapter->netdev;
2227 u32 tctl; 2290 u32 tctl;
2228 2291
2292 rtnl_lock();
2229 if (atomic_read(&adapter->tx_fifo_stall)) { 2293 if (atomic_read(&adapter->tx_fifo_stall)) {
2230 if ((er32(TDT) == er32(TDH)) && 2294 if ((er32(TDT) == er32(TDH)) &&
2231 (er32(TDFT) == er32(TDFH)) && 2295 (er32(TDFT) == er32(TDFH)) &&
@@ -2246,6 +2310,7 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
2246 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 2310 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2247 } 2311 }
2248 } 2312 }
2313 rtnl_unlock();
2249} 2314}
2250 2315
2251bool e1000_has_link(struct e1000_adapter *adapter) 2316bool e1000_has_link(struct e1000_adapter *adapter)
@@ -3113,7 +3178,7 @@ static void e1000_reset_task(struct work_struct *work)
3113 struct e1000_adapter *adapter = 3178 struct e1000_adapter *adapter =
3114 container_of(work, struct e1000_adapter, reset_task); 3179 container_of(work, struct e1000_adapter, reset_task);
3115 3180
3116 e1000_reinit_locked(adapter); 3181 e1000_reinit_safe(adapter);
3117} 3182}
3118 3183
3119/** 3184/**
@@ -3535,7 +3600,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3535 adapter->total_tx_packets += total_tx_packets; 3600 adapter->total_tx_packets += total_tx_packets;
3536 netdev->stats.tx_bytes += total_tx_bytes; 3601 netdev->stats.tx_bytes += total_tx_bytes;
3537 netdev->stats.tx_packets += total_tx_packets; 3602 netdev->stats.tx_packets += total_tx_packets;
3538 return (count < tx_ring->count); 3603 return count < tx_ring->count;
3539} 3604}
3540 3605
3541/** 3606/**
@@ -3552,7 +3617,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3552 struct e1000_hw *hw = &adapter->hw; 3617 struct e1000_hw *hw = &adapter->hw;
3553 u16 status = (u16)status_err; 3618 u16 status = (u16)status_err;
3554 u8 errors = (u8)(status_err >> 24); 3619 u8 errors = (u8)(status_err >> 24);
3555 skb->ip_summed = CHECKSUM_NONE; 3620
3621 skb_checksum_none_assert(skb);
3556 3622
3557 /* 82543 or newer only */ 3623 /* 82543 or newer only */
3558 if (unlikely(hw->mac_type < e1000_82543)) return; 3624 if (unlikely(hw->mac_type < e1000_82543)) return;
@@ -3598,13 +3664,14 @@ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3598static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 3664static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3599 __le16 vlan, struct sk_buff *skb) 3665 __le16 vlan, struct sk_buff *skb)
3600{ 3666{
3601 if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) { 3667 skb->protocol = eth_type_trans(skb, adapter->netdev);
3602 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3668
3603 le16_to_cpu(vlan) & 3669 if ((unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))))
3604 E1000_RXD_SPC_VLAN_MASK); 3670 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
3605 } else { 3671 le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK,
3606 netif_receive_skb(skb); 3672 skb);
3607 } 3673 else
3674 napi_gro_receive(&adapter->napi, skb);
3608} 3675}
3609 3676
3610/** 3677/**
@@ -3762,8 +3829,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3762 goto next_desc; 3829 goto next_desc;
3763 } 3830 }
3764 3831
3765 skb->protocol = eth_type_trans(skb, netdev);
3766
3767 e1000_receive_skb(adapter, status, rx_desc->special, skb); 3832 e1000_receive_skb(adapter, status, rx_desc->special, skb);
3768 3833
3769next_desc: 3834next_desc:
@@ -3926,8 +3991,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3926 ((u32)(rx_desc->errors) << 24), 3991 ((u32)(rx_desc->errors) << 24),
3927 le16_to_cpu(rx_desc->csum), skb); 3992 le16_to_cpu(rx_desc->csum), skb);
3928 3993
3929 skb->protocol = eth_type_trans(skb, netdev);
3930
3931 e1000_receive_skb(adapter, status, rx_desc->special, skb); 3994 e1000_receive_skb(adapter, status, rx_desc->special, skb);
3932 3995
3933next_desc: 3996next_desc:
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 6355a1b779d3..b7f15b3f0e03 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -368,7 +368,7 @@ out:
368static u32 e1000_get_rx_csum(struct net_device *netdev) 368static u32 e1000_get_rx_csum(struct net_device *netdev)
369{ 369{
370 struct e1000_adapter *adapter = netdev_priv(netdev); 370 struct e1000_adapter *adapter = netdev_priv(netdev);
371 return (adapter->flags & FLAG_RX_CSUM_ENABLED); 371 return adapter->flags & FLAG_RX_CSUM_ENABLED;
372} 372}
373 373
374static int e1000_set_rx_csum(struct net_device *netdev, u32 data) 374static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
@@ -389,7 +389,7 @@ static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
389 389
390static u32 e1000_get_tx_csum(struct net_device *netdev) 390static u32 e1000_get_tx_csum(struct net_device *netdev)
391{ 391{
392 return ((netdev->features & NETIF_F_HW_CSUM) != 0); 392 return (netdev->features & NETIF_F_HW_CSUM) != 0;
393} 393}
394 394
395static int e1000_set_tx_csum(struct net_device *netdev, u32 data) 395static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index e561d15c3eb1..c69563c3ce96 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -475,7 +475,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
475{ 475{
476 u16 status = (u16)status_err; 476 u16 status = (u16)status_err;
477 u8 errors = (u8)(status_err >> 24); 477 u8 errors = (u8)(status_err >> 24);
478 skb->ip_summed = CHECKSUM_NONE; 478
479 skb_checksum_none_assert(skb);
479 480
480 /* Ignore Checksum bit is set */ 481 /* Ignore Checksum bit is set */
481 if (status & E1000_RXD_STAT_IXSM) 482 if (status & E1000_RXD_STAT_IXSM)
@@ -1052,7 +1053,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1052 adapter->total_tx_packets += total_tx_packets; 1053 adapter->total_tx_packets += total_tx_packets;
1053 netdev->stats.tx_bytes += total_tx_bytes; 1054 netdev->stats.tx_bytes += total_tx_bytes;
1054 netdev->stats.tx_packets += total_tx_packets; 1055 netdev->stats.tx_packets += total_tx_packets;
1055 return (count < tx_ring->count); 1056 return count < tx_ring->count;
1056} 1057}
1057 1058
1058/** 1059/**
@@ -3411,22 +3412,16 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3411 3412
3412 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 3413 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3413 adapter->int_mode = E1000E_INT_MODE_LEGACY; 3414 adapter->int_mode = E1000E_INT_MODE_LEGACY;
3414 err = -EIO; 3415 e_info("MSI interrupt test failed, using legacy interrupt.\n");
3415 e_info("MSI interrupt test failed!\n"); 3416 } else
3416 } 3417 e_dbg("MSI interrupt test succeeded!\n");
3417 3418
3418 free_irq(adapter->pdev->irq, netdev); 3419 free_irq(adapter->pdev->irq, netdev);
3419 pci_disable_msi(adapter->pdev); 3420 pci_disable_msi(adapter->pdev);
3420 3421
3421 if (err == -EIO)
3422 goto msi_test_failed;
3423
3424 /* okay so the test worked, restore settings */
3425 e_dbg("MSI interrupt test succeeded!\n");
3426msi_test_failed: 3422msi_test_failed:
3427 e1000e_set_interrupt_capability(adapter); 3423 e1000e_set_interrupt_capability(adapter);
3428 e1000_request_irq(adapter); 3424 return e1000_request_irq(adapter);
3429 return err;
3430} 3425}
3431 3426
3432/** 3427/**
@@ -3458,21 +3453,6 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
3458 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); 3453 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3459 } 3454 }
3460 3455
3461 /* success ! */
3462 if (!err)
3463 return 0;
3464
3465 /* EIO means MSI test failed */
3466 if (err != -EIO)
3467 return err;
3468
3469 /* back to INTx mode */
3470 e_warn("MSI interrupt test failed, using legacy interrupt.\n");
3471
3472 e1000_free_irq(adapter);
3473
3474 err = e1000_request_irq(adapter);
3475
3476 return err; 3456 return err;
3477} 3457}
3478 3458
@@ -5712,8 +5692,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5712 netdev->vlan_features |= NETIF_F_HW_CSUM; 5692 netdev->vlan_features |= NETIF_F_HW_CSUM;
5713 netdev->vlan_features |= NETIF_F_SG; 5693 netdev->vlan_features |= NETIF_F_SG;
5714 5694
5715 if (pci_using_dac) 5695 if (pci_using_dac) {
5716 netdev->features |= NETIF_F_HIGHDMA; 5696 netdev->features |= NETIF_F_HIGHDMA;
5697 netdev->vlan_features |= NETIF_F_HIGHDMA;
5698 }
5717 5699
5718 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 5700 if (e1000e_enable_mng_pass_thru(&adapter->hw))
5719 adapter->flags |= FLAG_MNG_PT_ENABLED; 5701 adapter->flags |= FLAG_MNG_PT_ENABLED;
@@ -5754,11 +5736,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5754 } 5736 }
5755 5737
5756 init_timer(&adapter->watchdog_timer); 5738 init_timer(&adapter->watchdog_timer);
5757 adapter->watchdog_timer.function = &e1000_watchdog; 5739 adapter->watchdog_timer.function = e1000_watchdog;
5758 adapter->watchdog_timer.data = (unsigned long) adapter; 5740 adapter->watchdog_timer.data = (unsigned long) adapter;
5759 5741
5760 init_timer(&adapter->phy_info_timer); 5742 init_timer(&adapter->phy_info_timer);
5761 adapter->phy_info_timer.function = &e1000_update_phy_info; 5743 adapter->phy_info_timer.function = e1000_update_phy_info;
5762 adapter->phy_info_timer.data = (unsigned long) adapter; 5744 adapter->phy_info_timer.data = (unsigned long) adapter;
5763 5745
5764 INIT_WORK(&adapter->reset_task, e1000_reset_task); 5746 INIT_WORK(&adapter->reset_task, e1000_reset_task);
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 8d97f168f018..7c826319ee5a 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1457,11 +1457,11 @@ hardware_send_packet(struct net_device *dev, void *buf, short length)
1457 if (net_debug > 5) 1457 if (net_debug > 5)
1458 printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name); 1458 printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name);
1459 1459
1460 /* determine how much of the transmit buffer space is available */ 1460 /* determine how much of the transmit buffer space is available */
1461 if (lp->tx_end > lp->tx_start) 1461 if (lp->tx_end > lp->tx_start)
1462 tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start); 1462 tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start);
1463 else if (lp->tx_end < lp->tx_start) 1463 else if (lp->tx_end < lp->tx_start)
1464 tx_available = lp->tx_start - lp->tx_end; 1464 tx_available = lp->tx_start - lp->tx_end;
1465 else tx_available = lp->xmt_ram; 1465 else tx_available = lp->xmt_ram;
1466 1466
1467 if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) { 1467 if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index a333b42111b8..190fb691d20b 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -180,7 +180,7 @@ static void ehea_update_firmware_handles(void)
180 num_portres * EHEA_NUM_PORTRES_FW_HANDLES; 180 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
181 181
182 if (num_fw_handles) { 182 if (num_fw_handles) {
183 arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL); 183 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
184 if (!arr) 184 if (!arr)
185 goto out; /* Keep the existing array */ 185 goto out; /* Keep the existing array */
186 } else 186 } else
@@ -265,7 +265,7 @@ static void ehea_update_bcmc_registrations(void)
265 } 265 }
266 266
267 if (num_registrations) { 267 if (num_registrations) {
268 arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC); 268 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
269 if (!arr) 269 if (!arr)
270 goto out; /* Keep the existing array */ 270 goto out; /* Keep the existing array */
271 } else 271 } else
@@ -818,8 +818,6 @@ static void check_sqs(struct ehea_port *port)
818 } 818 }
819 } 819 }
820 } 820 }
821
822 return;
823} 821}
824 822
825 823
@@ -3721,7 +3719,7 @@ int __init ehea_module_init(void)
3721 if (ret) 3719 if (ret)
3722 ehea_info("failed registering memory remove notifier"); 3720 ehea_info("failed registering memory remove notifier");
3723 3721
3724 ret = crash_shutdown_register(&ehea_crash_handler); 3722 ret = crash_shutdown_register(ehea_crash_handler);
3725 if (ret) 3723 if (ret)
3726 ehea_info("failed registering crash handler"); 3724 ehea_info("failed registering crash handler");
3727 3725
@@ -3746,7 +3744,7 @@ out3:
3746out2: 3744out2:
3747 unregister_memory_notifier(&ehea_mem_nb); 3745 unregister_memory_notifier(&ehea_mem_nb);
3748 unregister_reboot_notifier(&ehea_reboot_nb); 3746 unregister_reboot_notifier(&ehea_reboot_nb);
3749 crash_shutdown_unregister(&ehea_crash_handler); 3747 crash_shutdown_unregister(ehea_crash_handler);
3750out: 3748out:
3751 return ret; 3749 return ret;
3752} 3750}
@@ -3759,7 +3757,7 @@ static void __exit ehea_module_exit(void)
3759 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3757 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3760 ibmebus_unregister_driver(&ehea_driver); 3758 ibmebus_unregister_driver(&ehea_driver);
3761 unregister_reboot_notifier(&ehea_reboot_nb); 3759 unregister_reboot_notifier(&ehea_reboot_nb);
3762 ret = crash_shutdown_unregister(&ehea_crash_handler); 3760 ret = crash_shutdown_unregister(ehea_crash_handler);
3763 if (ret) 3761 if (ret)
3764 ehea_info("failed unregistering crash handler"); 3762 ehea_info("failed unregistering crash handler");
3765 unregister_memory_notifier(&ehea_mem_nb); 3763 unregister_memory_notifier(&ehea_mem_nb);
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index f239aa8c6f4c..75869ed7226f 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "1.4.1.1" 35#define DRV_VERSION "1.4.1.2"
36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 9aab85366d21..711077a2e345 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -911,7 +911,9 @@ static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
911 911
912static int enic_set_mac_address(struct net_device *netdev, void *p) 912static int enic_set_mac_address(struct net_device *netdev, void *p)
913{ 913{
914 return -EOPNOTSUPP; 914 struct sockaddr *saddr = p;
915
916 return enic_set_mac_addr(netdev, (char *)saddr->sa_data);
915} 917}
916 918
917static int enic_dev_packet_filter(struct enic *enic, int directed, 919static int enic_dev_packet_filter(struct enic *enic, int directed,
@@ -2152,17 +2154,6 @@ void enic_dev_deinit(struct enic *enic)
2152 enic_clear_intr_mode(enic); 2154 enic_clear_intr_mode(enic);
2153} 2155}
2154 2156
2155static int enic_dev_stats_clear(struct enic *enic)
2156{
2157 int err;
2158
2159 spin_lock(&enic->devcmd_lock);
2160 err = vnic_dev_stats_clear(enic->vdev);
2161 spin_unlock(&enic->devcmd_lock);
2162
2163 return err;
2164}
2165
2166int enic_dev_init(struct enic *enic) 2157int enic_dev_init(struct enic *enic)
2167{ 2158{
2168 struct device *dev = enic_get_dev(enic); 2159 struct device *dev = enic_get_dev(enic);
@@ -2205,10 +2196,6 @@ int enic_dev_init(struct enic *enic)
2205 2196
2206 enic_init_vnic_resources(enic); 2197 enic_init_vnic_resources(enic);
2207 2198
2208 /* Clear LIF stats
2209 */
2210 enic_dev_stats_clear(enic);
2211
2212 err = enic_set_rq_alloc_buf(enic); 2199 err = enic_set_rq_alloc_buf(enic);
2213 if (err) { 2200 if (err) {
2214 dev_err(dev, "Failed to set RQ buffer allocator, aborting\n"); 2201 dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 6a5b578a69e1..08d5d42da260 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -74,6 +74,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
74 struct vnic_dev_bar *bar, unsigned int num_bars) 74 struct vnic_dev_bar *bar, unsigned int num_bars)
75{ 75{
76 struct vnic_resource_header __iomem *rh; 76 struct vnic_resource_header __iomem *rh;
77 struct mgmt_barmap_hdr __iomem *mrh;
77 struct vnic_resource __iomem *r; 78 struct vnic_resource __iomem *r;
78 u8 type; 79 u8 type;
79 80
@@ -85,22 +86,32 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
85 return -EINVAL; 86 return -EINVAL;
86 } 87 }
87 88
88 rh = bar->vaddr; 89 rh = bar->vaddr;
90 mrh = bar->vaddr;
89 if (!rh) { 91 if (!rh) {
90 pr_err("vNIC BAR0 res hdr not mem-mapped\n"); 92 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
91 return -EINVAL; 93 return -EINVAL;
92 } 94 }
93 95
94 if (ioread32(&rh->magic) != VNIC_RES_MAGIC || 96 /* Check for mgmt vnic in addition to normal vnic */
95 ioread32(&rh->version) != VNIC_RES_VERSION) { 97 if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
96 pr_err("vNIC BAR0 res magic/version error " 98 (ioread32(&rh->version) != VNIC_RES_VERSION)) {
97 "exp (%lx/%lx) curr (%x/%x)\n", 99 if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
100 (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
101 pr_err("vNIC BAR0 res magic/version error "
102 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
98 VNIC_RES_MAGIC, VNIC_RES_VERSION, 103 VNIC_RES_MAGIC, VNIC_RES_VERSION,
104 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
99 ioread32(&rh->magic), ioread32(&rh->version)); 105 ioread32(&rh->magic), ioread32(&rh->version));
100 return -EINVAL; 106 return -EINVAL;
107 }
101 } 108 }
102 109
103 r = (struct vnic_resource __iomem *)(rh + 1); 110 if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
111 r = (struct vnic_resource __iomem *)(mrh + 1);
112 else
113 r = (struct vnic_resource __iomem *)(rh + 1);
114
104 115
105 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { 116 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
106 117
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index 20661755df6b..9abb3d51dea1 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -238,6 +238,18 @@ enum vnic_devcmd_cmd {
238 * out: (u32)a0=status of proxied cmd 238 * out: (u32)a0=status of proxied cmd
239 * a1-a15=out args of proxied cmd */ 239 * a1-a15=out args of proxied cmd */
240 CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), 240 CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
241
242 /*
243 * As for BY_BDF except a0 is index of hvnlink subordinate vnic
244 * or SR-IOV virtual vnic */
245 CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
246
247 /*
248 * in: (u64)a0=paddr of buffer to put latest VIC VIF-CONFIG-INFO TLV in
249 * (u32)a1=length of buffer in a0
250 * out: (u64)a0=paddr of buffer with latest VIC VIF-CONFIG-INFO TLV
251 * (u32)a1=actual length of latest VIC VIF-CONFIG-INFO TLV */
252 CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
241}; 253};
242 254
243/* flags for CMD_OPEN */ 255/* flags for CMD_OPEN */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 3b3291248956..e8740e3704e4 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -30,7 +30,7 @@ struct vnic_enet_config {
30 u32 wq_desc_count; 30 u32 wq_desc_count;
31 u32 rq_desc_count; 31 u32 rq_desc_count;
32 u16 mtu; 32 u16 mtu;
33 u16 intr_timer; 33 u16 intr_timer_deprecated;
34 u8 intr_timer_type; 34 u8 intr_timer_type;
35 u8 intr_mode; 35 u8 intr_mode;
36 char devname[16]; 36 char devname[16];
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
index 810287beff14..e0a73f1ca6f4 100644
--- a/drivers/net/enic/vnic_resource.h
+++ b/drivers/net/enic/vnic_resource.h
@@ -22,6 +22,11 @@
22 22
23#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */ 23#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
24#define VNIC_RES_VERSION 0x00000000L 24#define VNIC_RES_VERSION 0x00000000L
25#define MGMTVNIC_MAGIC 0x544d474dL /* 'MGMT' */
26#define MGMTVNIC_VERSION 0x00000000L
27
28/* The MAC address assigned to the CFG vNIC is fixed. */
29#define MGMTVNIC_MAC { 0x02, 0x00, 0x54, 0x4d, 0x47, 0x4d }
25 30
26/* vNIC resource types */ 31/* vNIC resource types */
27enum vnic_res_type { 32enum vnic_res_type {
@@ -52,6 +57,14 @@ struct vnic_resource_header {
52 u32 version; 57 u32 version;
53}; 58};
54 59
60struct mgmt_barmap_hdr {
61 u32 magic; /* magic number */
62 u32 version; /* header format version */
63 u16 lif; /* loopback lif for mgmt frames */
64 u16 pci_slot; /* installed pci slot */
65 char serial[16]; /* card serial number */
66};
67
55struct vnic_resource { 68struct vnic_resource {
56 u8 type; 69 u8 type;
57 u8 bar; 70 u8 bar;
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index dbb2aca258b9..b236d7cbc137 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -77,8 +77,10 @@ void vnic_rq_free(struct vnic_rq *rq)
77 vnic_dev_free_desc_ring(vdev, &rq->ring); 77 vnic_dev_free_desc_ring(vdev, &rq->ring);
78 78
79 for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) { 79 for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
80 kfree(rq->bufs[i]); 80 if (rq->bufs[i]) {
81 rq->bufs[i] = NULL; 81 kfree(rq->bufs[i]);
82 rq->bufs[i] = NULL;
83 }
82 } 84 }
83 85
84 rq->ctrl = NULL; 86 rq->ctrl = NULL;
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 2dc48f91abf7..4b6f0212c8a2 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -143,7 +143,7 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
143 143
144static inline int vnic_rq_posting_soon(struct vnic_rq *rq) 144static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
145{ 145{
146 return ((rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0); 146 return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
147} 147}
148 148
149static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) 149static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index 197c9d24af82..4725b79de0ef 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -54,8 +54,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
54 if (!vp || !value) 54 if (!vp || !value)
55 return -EINVAL; 55 return -EINVAL;
56 56
57 if (ntohl(vp->length) + sizeof(*tlv) + length > 57 if (ntohl(vp->length) + offsetof(struct vic_provinfo_tlv, value) +
58 VIC_PROVINFO_MAX_TLV_DATA) 58 length > VIC_PROVINFO_MAX_TLV_DATA)
59 return -ENOMEM; 59 return -ENOMEM;
60 60
61 tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv + 61 tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv +
@@ -66,7 +66,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
66 memcpy(tlv->value, value, length); 66 memcpy(tlv->value, value, length);
67 67
68 vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1); 68 vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
69 vp->length = htonl(ntohl(vp->length) + sizeof(*tlv) + length); 69 vp->length = htonl(ntohl(vp->length) +
70 offsetof(struct vic_provinfo_tlv, value) + length);
70 71
71 return 0; 72 return 0;
72} 73}
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index 122e33bcc578..4b2a6c6a569b 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -77,8 +77,10 @@ void vnic_wq_free(struct vnic_wq *wq)
77 vnic_dev_free_desc_ring(vdev, &wq->ring); 77 vnic_dev_free_desc_ring(vdev, &wq->ring);
78 78
79 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { 79 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
80 kfree(wq->bufs[i]); 80 if (wq->bufs[i]) {
81 wq->bufs[i] = NULL; 81 kfree(wq->bufs[i]);
82 wq->bufs[i] = NULL;
83 }
82 } 84 }
83 85
84 wq->ctrl = NULL; 86 wq->ctrl = NULL;
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 57c8ac0ef3f1..32543a300b81 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -758,7 +758,7 @@ static int epic_open(struct net_device *dev)
758 init_timer(&ep->timer); 758 init_timer(&ep->timer);
759 ep->timer.expires = jiffies + 3*HZ; 759 ep->timer.expires = jiffies + 3*HZ;
760 ep->timer.data = (unsigned long)dev; 760 ep->timer.data = (unsigned long)dev;
761 ep->timer.function = &epic_timer; /* timer handler */ 761 ep->timer.function = epic_timer; /* timer handler */
762 add_timer(&ep->timer); 762 add_timer(&ep->timer);
763 763
764 return 0; 764 return 0;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 10e39f2b31c3..fb717be511f6 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -637,7 +637,9 @@ static void eth16i_initialize(struct net_device *dev, int boot)
637 637
638 /* Set interface port type */ 638 /* Set interface port type */
639 if(boot) { 639 if(boot) {
640 char *porttype[] = {"BNC", "DIX", "TP", "AUTO", "FROM_EPROM" }; 640 static const char * const porttype[] = {
641 "BNC", "DIX", "TP", "AUTO", "FROM_EPROM"
642 };
641 643
642 switch(dev->if_port) 644 switch(dev->if_port)
643 { 645 {
@@ -794,7 +796,7 @@ static int eth16i_receive_probe_packet(int ioaddr)
794 796
795 if(eth16i_debug > 1) 797 if(eth16i_debug > 1)
796 printk(KERN_DEBUG "RECEIVE_PACKET\n"); 798 printk(KERN_DEBUG "RECEIVE_PACKET\n");
797 return(0); /* Found receive packet */ 799 return 0; /* Found receive packet */
798 } 800 }
799 } 801 }
800 802
@@ -803,7 +805,7 @@ static int eth16i_receive_probe_packet(int ioaddr)
803 printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG)); 805 printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG));
804 } 806 }
805 807
806 return(0); /* Return success */ 808 return 0; /* Return success */
807} 809}
808 810
809#if 0 811#if 0
@@ -839,7 +841,7 @@ static int __init eth16i_get_irq(int ioaddr)
839 841
840 if( ioaddr < 0x1000) { 842 if( ioaddr < 0x1000) {
841 cbyte = inb(ioaddr + JUMPERLESS_CONFIG); 843 cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
842 return( eth16i_irqmap[ ((cbyte & 0xC0) >> 6) ] ); 844 return eth16i_irqmap[((cbyte & 0xC0) >> 6)];
843 } else { /* Oh..the card is EISA so method getting IRQ different */ 845 } else { /* Oh..the card is EISA so method getting IRQ different */
844 unsigned short index = 0; 846 unsigned short index = 0;
845 cbyte = inb(ioaddr + EISA_IRQ_REG); 847 cbyte = inb(ioaddr + EISA_IRQ_REG);
@@ -847,7 +849,7 @@ static int __init eth16i_get_irq(int ioaddr)
847 cbyte = cbyte >> 1; 849 cbyte = cbyte >> 1;
848 index++; 850 index++;
849 } 851 }
850 return( eth32i_irqmap[ index ] ); 852 return eth32i_irqmap[index];
851 } 853 }
852} 854}
853 855
@@ -907,7 +909,7 @@ static int eth16i_read_eeprom(int ioaddr, int offset)
907 data = eth16i_read_eeprom_word(ioaddr); 909 data = eth16i_read_eeprom_word(ioaddr);
908 outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG); 910 outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
909 911
910 return(data); 912 return data;
911} 913}
912 914
913static int eth16i_read_eeprom_word(int ioaddr) 915static int eth16i_read_eeprom_word(int ioaddr)
@@ -926,7 +928,7 @@ static int eth16i_read_eeprom_word(int ioaddr)
926 eeprom_slow_io(); 928 eeprom_slow_io();
927 } 929 }
928 930
929 return(data); 931 return data;
930} 932}
931 933
932static void eth16i_eeprom_cmd(int ioaddr, unsigned char command) 934static void eth16i_eeprom_cmd(int ioaddr, unsigned char command)
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 6d653c459c1f..c5a2fe099a8d 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -806,11 +806,6 @@ static void ethoc_tx_timeout(struct net_device *dev)
806 ethoc_interrupt(dev->irq, dev); 806 ethoc_interrupt(dev->irq, dev);
807} 807}
808 808
809static struct net_device_stats *ethoc_stats(struct net_device *dev)
810{
811 return &dev->stats;
812}
813
814static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) 809static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
815{ 810{
816 struct ethoc *priv = netdev_priv(dev); 811 struct ethoc *priv = netdev_priv(dev);
@@ -863,7 +858,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
863 .ndo_set_multicast_list = ethoc_set_multicast_list, 858 .ndo_set_multicast_list = ethoc_set_multicast_list,
864 .ndo_change_mtu = ethoc_change_mtu, 859 .ndo_change_mtu = ethoc_change_mtu,
865 .ndo_tx_timeout = ethoc_tx_timeout, 860 .ndo_tx_timeout = ethoc_tx_timeout,
866 .ndo_get_stats = ethoc_stats,
867 .ndo_start_xmit = ethoc_start_xmit, 861 .ndo_start_xmit = ethoc_start_xmit,
868}; 862};
869 863
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index d7e8f6b8f4cf..dd54abe2f710 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -915,14 +915,14 @@ static int netdev_open(struct net_device *dev)
915 init_timer(&np->timer); 915 init_timer(&np->timer);
916 np->timer.expires = RUN_AT(3 * HZ); 916 np->timer.expires = RUN_AT(3 * HZ);
917 np->timer.data = (unsigned long) dev; 917 np->timer.data = (unsigned long) dev;
918 np->timer.function = &netdev_timer; 918 np->timer.function = netdev_timer;
919 919
920 /* timer handler */ 920 /* timer handler */
921 add_timer(&np->timer); 921 add_timer(&np->timer);
922 922
923 init_timer(&np->reset_timer); 923 init_timer(&np->reset_timer);
924 np->reset_timer.data = (unsigned long) dev; 924 np->reset_timer.data = (unsigned long) dev;
925 np->reset_timer.function = &reset_timer; 925 np->reset_timer.function = reset_timer;
926 np->reset_timer_armed = 0; 926 np->reset_timer_armed = 0;
927 927
928 return 0; 928 return 0;
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index e3e10b4add9c..e9f5d030bc26 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -771,11 +771,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
771 771
772 772
773/* ethtool interface */ 773/* ethtool interface */
774static void mpc52xx_fec_get_drvinfo(struct net_device *dev,
775 struct ethtool_drvinfo *info)
776{
777 strcpy(info->driver, DRIVER_NAME);
778}
779 774
780static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 775static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
781{ 776{
@@ -810,7 +805,6 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
810} 805}
811 806
812static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { 807static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
813 .get_drvinfo = mpc52xx_fec_get_drvinfo,
814 .get_settings = mpc52xx_fec_get_settings, 808 .get_settings = mpc52xx_fec_get_settings,
815 .set_settings = mpc52xx_fec_set_settings, 809 .set_settings = mpc52xx_fec_set_settings,
816 .get_link = ethtool_op_get_link, 810 .get_link = ethtool_op_get_link,
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 4da05b1b445c..ddac63cefbaa 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -4620,7 +4620,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4620static u32 nv_get_rx_csum(struct net_device *dev) 4620static u32 nv_get_rx_csum(struct net_device *dev)
4621{ 4621{
4622 struct fe_priv *np = netdev_priv(dev); 4622 struct fe_priv *np = netdev_priv(dev);
4623 return (np->rx_csum) != 0; 4623 return np->rx_csum != 0;
4624} 4624}
4625 4625
4626static int nv_set_rx_csum(struct net_device *dev, u32 data) 4626static int nv_set_rx_csum(struct net_device *dev, u32 data)
@@ -5440,13 +5440,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5440 5440
5441 init_timer(&np->oom_kick); 5441 init_timer(&np->oom_kick);
5442 np->oom_kick.data = (unsigned long) dev; 5442 np->oom_kick.data = (unsigned long) dev;
5443 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */ 5443 np->oom_kick.function = nv_do_rx_refill; /* timer handler */
5444 init_timer(&np->nic_poll); 5444 init_timer(&np->nic_poll);
5445 np->nic_poll.data = (unsigned long) dev; 5445 np->nic_poll.data = (unsigned long) dev;
5446 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */ 5446 np->nic_poll.function = nv_do_nic_poll; /* timer handler */
5447 init_timer(&np->stats_poll); 5447 init_timer(&np->stats_poll);
5448 np->stats_poll.data = (unsigned long) dev; 5448 np->stats_poll.data = (unsigned long) dev;
5449 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ 5449 np->stats_poll.function = nv_do_stats_poll; /* timer handler */
5450 5450
5451 err = pci_enable_device(pci_dev); 5451 err = pci_enable_device(pci_dev);
5452 if (err) 5452 if (err)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index d6e3111959ab..d684f187de57 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1036,7 +1036,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev,
1036 ndev = alloc_etherdev(privsize); 1036 ndev = alloc_etherdev(privsize);
1037 if (!ndev) { 1037 if (!ndev) {
1038 ret = -ENOMEM; 1038 ret = -ENOMEM;
1039 goto out_free_fpi; 1039 goto out_put;
1040 } 1040 }
1041 1041
1042 SET_NETDEV_DEV(ndev, &ofdev->dev); 1042 SET_NETDEV_DEV(ndev, &ofdev->dev);
@@ -1099,6 +1099,7 @@ out_cleanup_data:
1099out_free_dev: 1099out_free_dev:
1100 free_netdev(ndev); 1100 free_netdev(ndev);
1101 dev_set_drvdata(&ofdev->dev, NULL); 1101 dev_set_drvdata(&ofdev->dev, NULL);
1102out_put:
1102 of_node_put(fpi->phy_node); 1103 of_node_put(fpi->phy_node);
1103out_free_fpi: 1104out_free_fpi:
1104 kfree(fpi); 1105 kfree(fpi);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d4bf91aac25f..8d3a2ccbc953 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -125,7 +125,7 @@ int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
125 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); 125 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
126 126
127 /* Write to the local MII regs */ 127 /* Write to the local MII regs */
128 return(fsl_pq_local_mdio_write(regs, mii_id, regnum, value)); 128 return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
129} 129}
130 130
131/* 131/*
@@ -137,7 +137,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
137 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); 137 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
138 138
139 /* Read the local MII regs */ 139 /* Read the local MII regs */
140 return(fsl_pq_local_mdio_read(regs, mii_id, regnum)); 140 return fsl_pq_local_mdio_read(regs, mii_id, regnum);
141} 141}
142 142
143/* Reset the MIIM registers, and wait for the bus to free */ 143/* Reset the MIIM registers, and wait for the bus to free */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4f7c3f3ca234..f30adbf86bb2 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1859,7 +1859,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1859 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1859 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1860 dev->name, grp->interruptError); 1860 dev->name, grp->interruptError);
1861 1861
1862 goto err_irq_fail; 1862 goto err_irq_fail;
1863 } 1863 }
1864 1864
1865 if ((err = request_irq(grp->interruptTransmit, gfar_transmit, 1865 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
@@ -2048,7 +2048,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2048 u32 bufaddr; 2048 u32 bufaddr;
2049 unsigned long flags; 2049 unsigned long flags;
2050 unsigned int nr_frags, nr_txbds, length; 2050 unsigned int nr_frags, nr_txbds, length;
2051 union skb_shared_tx *shtx;
2052 2051
2053 /* 2052 /*
2054 * TOE=1 frames larger than 2500 bytes may see excess delays 2053 * TOE=1 frames larger than 2500 bytes may see excess delays
@@ -2069,10 +2068,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2069 txq = netdev_get_tx_queue(dev, rq); 2068 txq = netdev_get_tx_queue(dev, rq);
2070 base = tx_queue->tx_bd_base; 2069 base = tx_queue->tx_bd_base;
2071 regs = tx_queue->grp->regs; 2070 regs = tx_queue->grp->regs;
2072 shtx = skb_tx(skb);
2073 2071
2074 /* check if time stamp should be generated */ 2072 /* check if time stamp should be generated */
2075 if (unlikely(shtx->hardware && priv->hwts_tx_en)) 2073 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
2074 priv->hwts_tx_en))
2076 do_tstamp = 1; 2075 do_tstamp = 1;
2077 2076
2078 /* make space for additional header when fcb is needed */ 2077 /* make space for additional header when fcb is needed */
@@ -2174,7 +2173,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2174 2173
2175 /* Setup tx hardware time stamping if requested */ 2174 /* Setup tx hardware time stamping if requested */
2176 if (unlikely(do_tstamp)) { 2175 if (unlikely(do_tstamp)) {
2177 shtx->in_progress = 1; 2176 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2178 if (fcb == NULL) 2177 if (fcb == NULL)
2179 fcb = gfar_add_fcb(skb); 2178 fcb = gfar_add_fcb(skb);
2180 fcb->ptp = 1; 2179 fcb->ptp = 1;
@@ -2446,7 +2445,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2446 int howmany = 0; 2445 int howmany = 0;
2447 u32 lstatus; 2446 u32 lstatus;
2448 size_t buflen; 2447 size_t buflen;
2449 union skb_shared_tx *shtx;
2450 2448
2451 rx_queue = priv->rx_queue[tx_queue->qindex]; 2449 rx_queue = priv->rx_queue[tx_queue->qindex];
2452 bdp = tx_queue->dirty_tx; 2450 bdp = tx_queue->dirty_tx;
@@ -2461,8 +2459,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2461 * When time stamping, one additional TxBD must be freed. 2459 * When time stamping, one additional TxBD must be freed.
2462 * Also, we need to dma_unmap_single() the TxPAL. 2460 * Also, we need to dma_unmap_single() the TxPAL.
2463 */ 2461 */
2464 shtx = skb_tx(skb); 2462 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2465 if (unlikely(shtx->in_progress))
2466 nr_txbds = frags + 2; 2463 nr_txbds = frags + 2;
2467 else 2464 else
2468 nr_txbds = frags + 1; 2465 nr_txbds = frags + 1;
@@ -2476,7 +2473,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2476 (lstatus & BD_LENGTH_MASK)) 2473 (lstatus & BD_LENGTH_MASK))
2477 break; 2474 break;
2478 2475
2479 if (unlikely(shtx->in_progress)) { 2476 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2480 next = next_txbd(bdp, base, tx_ring_size); 2477 next = next_txbd(bdp, base, tx_ring_size);
2481 buflen = next->length + GMAC_FCB_LEN; 2478 buflen = next->length + GMAC_FCB_LEN;
2482 } else 2479 } else
@@ -2485,7 +2482,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2485 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2482 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2486 buflen, DMA_TO_DEVICE); 2483 buflen, DMA_TO_DEVICE);
2487 2484
2488 if (unlikely(shtx->in_progress)) { 2485 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
2489 struct skb_shared_hwtstamps shhwtstamps; 2486 struct skb_shared_hwtstamps shhwtstamps;
2490 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); 2487 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2491 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 2488 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -2657,7 +2654,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2657 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU)) 2654 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
2658 skb->ip_summed = CHECKSUM_UNNECESSARY; 2655 skb->ip_summed = CHECKSUM_UNNECESSARY;
2659 else 2656 else
2660 skb->ip_summed = CHECKSUM_NONE; 2657 skb_checksum_none_assert(skb);
2661} 2658}
2662 2659
2663 2660
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 9bda023c0235..ae8e5d3c6c1f 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -254,7 +254,7 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
254 254
255 /* Make sure we return a number greater than 0 255 /* Make sure we return a number greater than 0
256 * if usecs > 0 */ 256 * if usecs > 0 */
257 return ((usecs * 1000 + count - 1) / count); 257 return (usecs * 1000 + count - 1) / count;
258} 258}
259 259
260/* Convert ethernet clock ticks to microseconds */ 260/* Convert ethernet clock ticks to microseconds */
@@ -278,7 +278,7 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
278 278
279 /* Make sure we return a number greater than 0 */ 279 /* Make sure we return a number greater than 0 */
280 /* if ticks is > 0 */ 280 /* if ticks is > 0 */
281 return ((ticks * count) / 1000); 281 return (ticks * count) / 1000;
282} 282}
283 283
284/* Get the coalescing parameters, and put them in the cvals 284/* Get the coalescing parameters, and put them in the cvals
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index f15c64f1cd38..27d6960ce09e 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -893,7 +893,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
893 if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status)) 893 if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
894 skb->ip_summed = CHECKSUM_UNNECESSARY; 894 skb->ip_summed = CHECKSUM_UNNECESSARY;
895 else 895 else
896 skb->ip_summed = CHECKSUM_NONE; 896 skb_checksum_none_assert(skb);
897 897
898 skb->protocol = eth_type_trans(skb, dev); 898 skb->protocol = eth_type_trans(skb, dev);
899 dev->stats.rx_packets++; 899 dev->stats.rx_packets++;
@@ -1547,10 +1547,10 @@ static int __devinit greth_of_probe(struct platform_device *ofdev, const struct
1547 dev->netdev_ops = &greth_netdev_ops; 1547 dev->netdev_ops = &greth_netdev_ops;
1548 dev->ethtool_ops = &greth_ethtool_ops; 1548 dev->ethtool_ops = &greth_ethtool_ops;
1549 1549
1550 if (register_netdev(dev)) { 1550 err = register_netdev(dev);
1551 if (err) {
1551 if (netif_msg_probe(greth)) 1552 if (netif_msg_probe(greth))
1552 dev_err(greth->dev, "netdevice registration failed.\n"); 1553 dev_err(greth->dev, "netdevice registration failed.\n");
1553 err = -ENOMEM;
1554 goto error5; 1554 goto error5;
1555 } 1555 }
1556 1556
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 49aac7027fbb..9a6485892b3d 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1004,7 +1004,7 @@ static int hamachi_open(struct net_device *dev)
1004 init_timer(&hmp->timer); 1004 init_timer(&hmp->timer);
1005 hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */ 1005 hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1006 hmp->timer.data = (unsigned long)dev; 1006 hmp->timer.data = (unsigned long)dev;
1007 hmp->timer.function = &hamachi_timer; /* timer handler */ 1007 hmp->timer.function = hamachi_timer; /* timer handler */
1008 add_timer(&hmp->timer); 1008 add_timer(&hmp->timer);
1009 1009
1010 return 0; 1010 return 0;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 14f01d156db9..ac1d323c5eb5 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -168,7 +168,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
168 168
169static inline int dev_is_ethdev(struct net_device *dev) 169static inline int dev_is_ethdev(struct net_device *dev)
170{ 170{
171 return (dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5)); 171 return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
172} 172}
173 173
174/* ------------------------------------------------------------------------ */ 174/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index b8bdf9d51cd4..5b37579e84b7 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -110,7 +110,7 @@ static int calc_crc_ccitt(const unsigned char *buf, int cnt)
110 for (; cnt > 0; cnt--) 110 for (; cnt > 0; cnt--)
111 crc = (crc >> 8) ^ crc_ccitt_table[(crc ^ *buf++) & 0xff]; 111 crc = (crc >> 8) ^ crc_ccitt_table[(crc ^ *buf++) & 0xff];
112 crc ^= 0xffff; 112 crc ^= 0xffff;
113 return (crc & 0xffff); 113 return crc & 0xffff;
114} 114}
115#endif 115#endif
116 116
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 9f64c8637208..33655814448e 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1069,7 +1069,8 @@ static void scc_tx_done(struct scc_channel *scc)
1069 case KISS_DUPLEX_LINK: 1069 case KISS_DUPLEX_LINK:
1070 scc->stat.tx_state = TXS_IDLE2; 1070 scc->stat.tx_state = TXS_IDLE2;
1071 if (scc->kiss.idletime != TIMER_OFF) 1071 if (scc->kiss.idletime != TIMER_OFF)
1072 scc_start_tx_timer(scc, t_idle, scc->kiss.idletime*100); 1072 scc_start_tx_timer(scc, t_idle,
1073 scc->kiss.idletime*100);
1073 break; 1074 break;
1074 case KISS_DUPLEX_OPTIMA: 1075 case KISS_DUPLEX_OPTIMA:
1075 scc_notify(scc, HWEV_ALL_SENT); 1076 scc_notify(scc, HWEV_ALL_SENT);
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 86ececd3c658..d15d2f2ba78e 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -204,10 +204,10 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
204 ei_status.rx_start_page = HP_START_PG + TX_PAGES; 204 ei_status.rx_start_page = HP_START_PG + TX_PAGES;
205 ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG; 205 ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
206 206
207 ei_status.reset_8390 = &hp_reset_8390; 207 ei_status.reset_8390 = hp_reset_8390;
208 ei_status.get_8390_hdr = &hp_get_8390_hdr; 208 ei_status.get_8390_hdr = hp_get_8390_hdr;
209 ei_status.block_input = &hp_block_input; 209 ei_status.block_input = hp_block_input;
210 ei_status.block_output = &hp_block_output; 210 ei_status.block_output = hp_block_output;
211 hp_init_card(dev); 211 hp_init_card(dev);
212 212
213 retval = register_netdev(dev); 213 retval = register_netdev(dev);
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 095b17ecf609..8e2c4601b5f5 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1312,7 +1312,7 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
1312 for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++) 1312 for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++)
1313 printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p); 1313 printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p);
1314#endif 1314#endif
1315 return (1); 1315 return 1;
1316 } 1316 }
1317 /* else: */ 1317 /* else: */
1318 /* alloc_skb failed (no memory) -> still can receive the header 1318 /* alloc_skb failed (no memory) -> still can receive the header
@@ -1325,7 +1325,7 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
1325 1325
1326 ringptr->pdl[0] = 0x00010000; /* PDH: Count=1 Fragment */ 1326 ringptr->pdl[0] = 0x00010000; /* PDH: Count=1 Fragment */
1327 1327
1328 return (0); 1328 return 0;
1329} 1329}
1330 1330
1331/* 1331/*
@@ -2752,7 +2752,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
2752 hp100_outw(HP100_MISC_ERROR, IRQ_STATUS); 2752 hp100_outw(HP100_MISC_ERROR, IRQ_STATUS);
2753 2753
2754 if (val & HP100_LINK_UP_ST) 2754 if (val & HP100_LINK_UP_ST)
2755 return (0); /* login was ok */ 2755 return 0; /* login was ok */
2756 else { 2756 else {
2757 printk("hp100: %s: Training failed.\n", dev->name); 2757 printk("hp100: %s: Training failed.\n", dev->name);
2758 hp100_down_vg_link(dev); 2758 hp100_down_vg_link(dev);
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 07d8e5b634f3..c5ef62ceb840 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -155,10 +155,10 @@ static int __devinit hydra_init(struct zorro_dev *z)
155 155
156 ei_status.rx_start_page = start_page + TX_PAGES; 156 ei_status.rx_start_page = start_page + TX_PAGES;
157 157
158 ei_status.reset_8390 = &hydra_reset_8390; 158 ei_status.reset_8390 = hydra_reset_8390;
159 ei_status.block_input = &hydra_block_input; 159 ei_status.block_input = hydra_block_input;
160 ei_status.block_output = &hydra_block_output; 160 ei_status.block_output = hydra_block_output;
161 ei_status.get_8390_hdr = &hydra_get_8390_hdr; 161 ei_status.get_8390_hdr = hydra_get_8390_hdr;
162 ei_status.reg_offset = hydra_offsets; 162 ei_status.reg_offset = hydra_offsets;
163 163
164 dev->netdev_ops = &hydra_netdev_ops; 164 dev->netdev_ops = &hydra_netdev_ops;
@@ -173,9 +173,8 @@ static int __devinit hydra_init(struct zorro_dev *z)
173 173
174 zorro_set_drvdata(z, dev); 174 zorro_set_drvdata(z, dev);
175 175
176 printk(KERN_INFO "%s: Hydra at 0x%08llx, address " 176 pr_info("%s: Hydra at %pR, address %pM (hydra.c " HYDRA_VERSION ")\n",
177 "%pM (hydra.c " HYDRA_VERSION ")\n", 177 dev->name, &z->resource, dev->dev_addr);
178 dev->name, (unsigned long long)z->resource.start, dev->dev_addr);
179 178
180 return 0; 179 return 0;
181} 180}
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 519e19e23955..385dc3204cb7 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2095,11 +2095,11 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2095 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { 2095 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2096 hdr->version = EMAC4_ETHTOOL_REGS_VER; 2096 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2097 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev)); 2097 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2098 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev)); 2098 return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
2099 } else { 2099 } else {
2100 hdr->version = EMAC_ETHTOOL_REGS_VER; 2100 hdr->version = EMAC_ETHTOOL_REGS_VER;
2101 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev)); 2101 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2102 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev)); 2102 return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
2103 } 2103 }
2104} 2104}
2105 2105
@@ -2293,7 +2293,7 @@ static int __devinit emac_check_deps(struct emac_instance *dev,
2293 if (deps[i].drvdata != NULL) 2293 if (deps[i].drvdata != NULL)
2294 there++; 2294 there++;
2295 } 2295 }
2296 return (there == EMAC_DEP_COUNT); 2296 return there == EMAC_DEP_COUNT;
2297} 2297}
2298 2298
2299static void emac_put_deps(struct emac_instance *dev) 2299static void emac_put_deps(struct emac_instance *dev)
diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h
index 9e37e3d9c51d..4fec0844d59d 100644
--- a/drivers/net/ibm_newemac/core.h
+++ b/drivers/net/ibm_newemac/core.h
@@ -410,7 +410,7 @@ static inline u32 *emac_xaht_base(struct emac_instance *dev)
410 else 410 else
411 offset = offsetof(struct emac_regs, u0.emac4.iaht1); 411 offset = offsetof(struct emac_regs, u0.emac4.iaht1);
412 412
413 return ((u32 *)((ptrdiff_t)p + offset)); 413 return (u32 *)((ptrdiff_t)p + offset);
414} 414}
415 415
416static inline u32 *emac_gaht_base(struct emac_instance *dev) 416static inline u32 *emac_gaht_base(struct emac_instance *dev)
@@ -418,7 +418,7 @@ static inline u32 *emac_gaht_base(struct emac_instance *dev)
418 /* GAHT registers always come after an identical number of 418 /* GAHT registers always come after an identical number of
419 * IAHT registers. 419 * IAHT registers.
420 */ 420 */
421 return (emac_xaht_base(dev) + EMAC_XAHT_REGS(dev)); 421 return emac_xaht_base(dev) + EMAC_XAHT_REGS(dev);
422} 422}
423 423
424static inline u32 *emac_iaht_base(struct emac_instance *dev) 424static inline u32 *emac_iaht_base(struct emac_instance *dev)
@@ -426,7 +426,7 @@ static inline u32 *emac_iaht_base(struct emac_instance *dev)
426 /* IAHT registers always come before an identical number of 426 /* IAHT registers always come before an identical number of
427 * GAHT registers. 427 * GAHT registers.
428 */ 428 */
429 return (emac_xaht_base(dev)); 429 return emac_xaht_base(dev);
430} 430}
431 431
432/* Ethtool get_regs complex data. 432/* Ethtool get_regs complex data.
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 294ccfb427cf..0037a696cd0a 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -602,7 +602,7 @@ static void irqrx_handler(struct net_device *dev)
602 /* set up skb fields */ 602 /* set up skb fields */
603 603
604 skb->protocol = eth_type_trans(skb, dev); 604 skb->protocol = eth_type_trans(skb, dev);
605 skb->ip_summed = CHECKSUM_NONE; 605 skb_checksum_none_assert(skb);
606 606
607 /* bookkeeping */ 607 /* bookkeeping */
608 dev->stats.rx_packets++; 608 dev->stats.rx_packets++;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 4734c939ad03..b3e157ed6776 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1,122 +1,84 @@
1/**************************************************************************/
2/* */
3/* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4/* Copyright (C) 2003 IBM Corp. */
5/* Originally written by Dave Larson (larson1@us.ibm.com) */
6/* Maintained by Santiago Leon (santil@us.ibm.com) */
7/* */
8/* This program is free software; you can redistribute it and/or modify */
9/* it under the terms of the GNU General Public License as published by */
10/* the Free Software Foundation; either version 2 of the License, or */
11/* (at your option) any later version. */
12/* */
13/* This program is distributed in the hope that it will be useful, */
14/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16/* GNU General Public License for more details. */
17/* */
18/* You should have received a copy of the GNU General Public License */
19/* along with this program; if not, write to the Free Software */
20/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
21/* USA */
22/* */
23/* This module contains the implementation of a virtual ethernet device */
24/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25/* option of the RS/6000 Platform Architechture to interface with virtual */
26/* ethernet NICs that are presented to the partition by the hypervisor. */
27/* */
28/**************************************************************************/
29/* 1/*
30 TODO: 2 * IBM Power Virtual Ethernet Device Driver
31 - add support for sysfs 3 *
32 - possibly remove procfs support 4 * This program is free software; you can redistribute it and/or modify
33*/ 5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2003, 2010
19 *
20 * Authors: Dave Larson <larson1@us.ibm.com>
21 * Santiago Leon <santil@linux.vnet.ibm.com>
22 * Brian King <brking@linux.vnet.ibm.com>
23 * Robert Jennings <rcj@linux.vnet.ibm.com>
24 * Anton Blanchard <anton@au.ibm.com>
25 */
34 26
35#include <linux/module.h> 27#include <linux/module.h>
36#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
37#include <linux/types.h> 29#include <linux/types.h>
38#include <linux/errno.h> 30#include <linux/errno.h>
39#include <linux/ioport.h>
40#include <linux/dma-mapping.h> 31#include <linux/dma-mapping.h>
41#include <linux/kernel.h> 32#include <linux/kernel.h>
42#include <linux/netdevice.h> 33#include <linux/netdevice.h>
43#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
44#include <linux/skbuff.h> 35#include <linux/skbuff.h>
45#include <linux/init.h> 36#include <linux/init.h>
46#include <linux/delay.h>
47#include <linux/mm.h> 37#include <linux/mm.h>
48#include <linux/pm.h> 38#include <linux/pm.h>
49#include <linux/ethtool.h> 39#include <linux/ethtool.h>
50#include <linux/proc_fs.h>
51#include <linux/in.h> 40#include <linux/in.h>
52#include <linux/ip.h> 41#include <linux/ip.h>
42#include <linux/ipv6.h>
53#include <linux/slab.h> 43#include <linux/slab.h>
54#include <net/net_namespace.h>
55#include <asm/hvcall.h> 44#include <asm/hvcall.h>
56#include <asm/atomic.h> 45#include <asm/atomic.h>
57#include <asm/vio.h> 46#include <asm/vio.h>
58#include <asm/iommu.h> 47#include <asm/iommu.h>
59#include <asm/uaccess.h>
60#include <asm/firmware.h> 48#include <asm/firmware.h>
61#include <linux/seq_file.h>
62 49
63#include "ibmveth.h" 50#include "ibmveth.h"
64 51
65#undef DEBUG
66
67#define ibmveth_printk(fmt, args...) \
68 printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
69
70#define ibmveth_error_printk(fmt, args...) \
71 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
72
73#ifdef DEBUG
74#define ibmveth_debug_printk_no_adapter(fmt, args...) \
75 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
76#define ibmveth_debug_printk(fmt, args...) \
77 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
78#define ibmveth_assert(expr) \
79 if(!(expr)) { \
80 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
81 BUG(); \
82 }
83#else
84#define ibmveth_debug_printk_no_adapter(fmt, args...)
85#define ibmveth_debug_printk(fmt, args...)
86#define ibmveth_assert(expr)
87#endif
88
89static int ibmveth_open(struct net_device *dev);
90static int ibmveth_close(struct net_device *dev);
91static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
92static int ibmveth_poll(struct napi_struct *napi, int budget);
93static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
94static void ibmveth_set_multicast_list(struct net_device *dev);
95static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
96static void ibmveth_proc_register_driver(void);
97static void ibmveth_proc_unregister_driver(void);
98static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
99static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
100static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance); 52static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
101static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 53static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
102static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev); 54static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
103static struct kobj_type ktype_veth_pool;
104 55
56static struct kobj_type ktype_veth_pool;
105 57
106#ifdef CONFIG_PROC_FS
107#define IBMVETH_PROC_DIR "ibmveth"
108static struct proc_dir_entry *ibmveth_proc_dir;
109#endif
110 58
111static const char ibmveth_driver_name[] = "ibmveth"; 59static const char ibmveth_driver_name[] = "ibmveth";
112static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver"; 60static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
113#define ibmveth_driver_version "1.03" 61#define ibmveth_driver_version "1.04"
114 62
115MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>"); 63MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
116MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver"); 64MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
117MODULE_LICENSE("GPL"); 65MODULE_LICENSE("GPL");
118MODULE_VERSION(ibmveth_driver_version); 66MODULE_VERSION(ibmveth_driver_version);
119 67
68static unsigned int tx_copybreak __read_mostly = 128;
69module_param(tx_copybreak, uint, 0644);
70MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
72
73static unsigned int rx_copybreak __read_mostly = 128;
74module_param(rx_copybreak, uint, 0644);
75MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
77
78static unsigned int rx_flush __read_mostly = 0;
79module_param(rx_flush, uint, 0644);
80MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
81
120struct ibmveth_stat { 82struct ibmveth_stat {
121 char name[ETH_GSTRING_LEN]; 83 char name[ETH_GSTRING_LEN];
122 int offset; 84 int offset;
@@ -128,12 +90,16 @@ struct ibmveth_stat {
128struct ibmveth_stat ibmveth_stats[] = { 90struct ibmveth_stat ibmveth_stats[] = {
129 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) }, 91 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
130 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) }, 92 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
131 { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) }, 93 { "replenish_add_buff_failure",
132 { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) }, 94 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
95 { "replenish_add_buff_success",
96 IBMVETH_STAT_OFF(replenish_add_buff_success) },
133 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) }, 97 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
134 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) }, 98 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
135 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) }, 99 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
136 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) }, 100 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
101 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
102 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
137}; 103};
138 104
139/* simple methods of getting data from the current rxq entry */ 105/* simple methods of getting data from the current rxq entry */
@@ -144,41 +110,44 @@ static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
144 110
145static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter) 111static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
146{ 112{
147 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT; 113 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
114 IBMVETH_RXQ_TOGGLE_SHIFT;
148} 115}
149 116
150static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter) 117static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
151{ 118{
152 return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle); 119 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
153} 120}
154 121
155static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter) 122static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
156{ 123{
157 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID); 124 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
158} 125}
159 126
160static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter) 127static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
161{ 128{
162 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK); 129 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
163} 130}
164 131
165static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter) 132static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
166{ 133{
167 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length); 134 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
168} 135}
169 136
170static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter) 137static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
171{ 138{
172 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD); 139 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
173} 140}
174 141
175/* setup the initial settings for a buffer pool */ 142/* setup the initial settings for a buffer pool */
176static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active) 143static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
144 u32 pool_index, u32 pool_size,
145 u32 buff_size, u32 pool_active)
177{ 146{
178 pool->size = pool_size; 147 pool->size = pool_size;
179 pool->index = pool_index; 148 pool->index = pool_index;
180 pool->buff_size = buff_size; 149 pool->buff_size = buff_size;
181 pool->threshold = pool_size / 2; 150 pool->threshold = pool_size * 7 / 8;
182 pool->active = pool_active; 151 pool->active = pool_active;
183} 152}
184 153
@@ -189,12 +158,11 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
189 158
190 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); 159 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
191 160
192 if(!pool->free_map) { 161 if (!pool->free_map)
193 return -1; 162 return -1;
194 }
195 163
196 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); 164 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
197 if(!pool->dma_addr) { 165 if (!pool->dma_addr) {
198 kfree(pool->free_map); 166 kfree(pool->free_map);
199 pool->free_map = NULL; 167 pool->free_map = NULL;
200 return -1; 168 return -1;
@@ -202,7 +170,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
202 170
203 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL); 171 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
204 172
205 if(!pool->skbuff) { 173 if (!pool->skbuff) {
206 kfree(pool->dma_addr); 174 kfree(pool->dma_addr);
207 pool->dma_addr = NULL; 175 pool->dma_addr = NULL;
208 176
@@ -213,9 +181,8 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
213 181
214 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); 182 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
215 183
216 for(i = 0; i < pool->size; ++i) { 184 for (i = 0; i < pool->size; ++i)
217 pool->free_map[i] = i; 185 pool->free_map[i] = i;
218 }
219 186
220 atomic_set(&pool->available, 0); 187 atomic_set(&pool->available, 0);
221 pool->producer_index = 0; 188 pool->producer_index = 0;
@@ -224,10 +191,19 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
224 return 0; 191 return 0;
225} 192}
226 193
194static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
195{
196 unsigned long offset;
197
198 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
199 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
200}
201
227/* replenish the buffers for a pool. note that we don't need to 202/* replenish the buffers for a pool. note that we don't need to
228 * skb_reserve these since they are used for incoming... 203 * skb_reserve these since they are used for incoming...
229 */ 204 */
230static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) 205static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
206 struct ibmveth_buff_pool *pool)
231{ 207{
232 u32 i; 208 u32 i;
233 u32 count = pool->size - atomic_read(&pool->available); 209 u32 count = pool->size - atomic_read(&pool->available);
@@ -240,23 +216,26 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
240 216
241 mb(); 217 mb();
242 218
243 for(i = 0; i < count; ++i) { 219 for (i = 0; i < count; ++i) {
244 union ibmveth_buf_desc desc; 220 union ibmveth_buf_desc desc;
245 221
246 skb = alloc_skb(pool->buff_size, GFP_ATOMIC); 222 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
247 223
248 if(!skb) { 224 if (!skb) {
249 ibmveth_debug_printk("replenish: unable to allocate skb\n"); 225 netdev_dbg(adapter->netdev,
226 "replenish: unable to allocate skb\n");
250 adapter->replenish_no_mem++; 227 adapter->replenish_no_mem++;
251 break; 228 break;
252 } 229 }
253 230
254 free_index = pool->consumer_index; 231 free_index = pool->consumer_index;
255 pool->consumer_index = (pool->consumer_index + 1) % pool->size; 232 pool->consumer_index++;
233 if (pool->consumer_index >= pool->size)
234 pool->consumer_index = 0;
256 index = pool->free_map[free_index]; 235 index = pool->free_map[free_index];
257 236
258 ibmveth_assert(index != IBM_VETH_INVALID_MAP); 237 BUG_ON(index == IBM_VETH_INVALID_MAP);
259 ibmveth_assert(pool->skbuff[index] == NULL); 238 BUG_ON(pool->skbuff[index] != NULL);
260 239
261 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 240 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
262 pool->buff_size, DMA_FROM_DEVICE); 241 pool->buff_size, DMA_FROM_DEVICE);
@@ -269,16 +248,23 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
269 pool->skbuff[index] = skb; 248 pool->skbuff[index] = skb;
270 249
271 correlator = ((u64)pool->index << 32) | index; 250 correlator = ((u64)pool->index << 32) | index;
272 *(u64*)skb->data = correlator; 251 *(u64 *)skb->data = correlator;
273 252
274 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size; 253 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
275 desc.fields.address = dma_addr; 254 desc.fields.address = dma_addr;
276 255
277 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 256 if (rx_flush) {
257 unsigned int len = min(pool->buff_size,
258 adapter->netdev->mtu +
259 IBMVETH_BUFF_OH);
260 ibmveth_flush_buffer(skb->data, len);
261 }
262 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
263 desc.desc);
278 264
279 if (lpar_rc != H_SUCCESS) 265 if (lpar_rc != H_SUCCESS) {
280 goto failure; 266 goto failure;
281 else { 267 } else {
282 buffers_added++; 268 buffers_added++;
283 adapter->replenish_add_buff_success++; 269 adapter->replenish_add_buff_success++;
284 } 270 }
@@ -313,26 +299,31 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
313 299
314 adapter->replenish_task_cycles++; 300 adapter->replenish_task_cycles++;
315 301
316 for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) 302 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
317 if(adapter->rx_buff_pool[i].active) 303 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
318 ibmveth_replenish_buffer_pool(adapter,
319 &adapter->rx_buff_pool[i]);
320 304
321 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 305 if (pool->active &&
306 (atomic_read(&pool->available) < pool->threshold))
307 ibmveth_replenish_buffer_pool(adapter, pool);
308 }
309
310 adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
311 4096 - 8);
322} 312}
323 313
324/* empty and free ana buffer pool - also used to do cleanup in error paths */ 314/* empty and free ana buffer pool - also used to do cleanup in error paths */
325static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool) 315static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
316 struct ibmveth_buff_pool *pool)
326{ 317{
327 int i; 318 int i;
328 319
329 kfree(pool->free_map); 320 kfree(pool->free_map);
330 pool->free_map = NULL; 321 pool->free_map = NULL;
331 322
332 if(pool->skbuff && pool->dma_addr) { 323 if (pool->skbuff && pool->dma_addr) {
333 for(i = 0; i < pool->size; ++i) { 324 for (i = 0; i < pool->size; ++i) {
334 struct sk_buff *skb = pool->skbuff[i]; 325 struct sk_buff *skb = pool->skbuff[i];
335 if(skb) { 326 if (skb) {
336 dma_unmap_single(&adapter->vdev->dev, 327 dma_unmap_single(&adapter->vdev->dev,
337 pool->dma_addr[i], 328 pool->dma_addr[i],
338 pool->buff_size, 329 pool->buff_size,
@@ -343,31 +334,32 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
343 } 334 }
344 } 335 }
345 336
346 if(pool->dma_addr) { 337 if (pool->dma_addr) {
347 kfree(pool->dma_addr); 338 kfree(pool->dma_addr);
348 pool->dma_addr = NULL; 339 pool->dma_addr = NULL;
349 } 340 }
350 341
351 if(pool->skbuff) { 342 if (pool->skbuff) {
352 kfree(pool->skbuff); 343 kfree(pool->skbuff);
353 pool->skbuff = NULL; 344 pool->skbuff = NULL;
354 } 345 }
355} 346}
356 347
357/* remove a buffer from a pool */ 348/* remove a buffer from a pool */
358static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator) 349static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
350 u64 correlator)
359{ 351{
360 unsigned int pool = correlator >> 32; 352 unsigned int pool = correlator >> 32;
361 unsigned int index = correlator & 0xffffffffUL; 353 unsigned int index = correlator & 0xffffffffUL;
362 unsigned int free_index; 354 unsigned int free_index;
363 struct sk_buff *skb; 355 struct sk_buff *skb;
364 356
365 ibmveth_assert(pool < IbmVethNumBufferPools); 357 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
366 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 358 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
367 359
368 skb = adapter->rx_buff_pool[pool].skbuff[index]; 360 skb = adapter->rx_buff_pool[pool].skbuff[index];
369 361
370 ibmveth_assert(skb != NULL); 362 BUG_ON(skb == NULL);
371 363
372 adapter->rx_buff_pool[pool].skbuff[index] = NULL; 364 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
373 365
@@ -377,9 +369,10 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
377 DMA_FROM_DEVICE); 369 DMA_FROM_DEVICE);
378 370
379 free_index = adapter->rx_buff_pool[pool].producer_index; 371 free_index = adapter->rx_buff_pool[pool].producer_index;
380 adapter->rx_buff_pool[pool].producer_index 372 adapter->rx_buff_pool[pool].producer_index++;
381 = (adapter->rx_buff_pool[pool].producer_index + 1) 373 if (adapter->rx_buff_pool[pool].producer_index >=
382 % adapter->rx_buff_pool[pool].size; 374 adapter->rx_buff_pool[pool].size)
375 adapter->rx_buff_pool[pool].producer_index = 0;
383 adapter->rx_buff_pool[pool].free_map[free_index] = index; 376 adapter->rx_buff_pool[pool].free_map[free_index] = index;
384 377
385 mb(); 378 mb();
@@ -394,8 +387,8 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
394 unsigned int pool = correlator >> 32; 387 unsigned int pool = correlator >> 32;
395 unsigned int index = correlator & 0xffffffffUL; 388 unsigned int index = correlator & 0xffffffffUL;
396 389
397 ibmveth_assert(pool < IbmVethNumBufferPools); 390 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
398 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 391 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
399 392
400 return adapter->rx_buff_pool[pool].skbuff[index]; 393 return adapter->rx_buff_pool[pool].skbuff[index];
401} 394}
@@ -410,10 +403,10 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
410 union ibmveth_buf_desc desc; 403 union ibmveth_buf_desc desc;
411 unsigned long lpar_rc; 404 unsigned long lpar_rc;
412 405
413 ibmveth_assert(pool < IbmVethNumBufferPools); 406 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
414 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 407 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
415 408
416 if(!adapter->rx_buff_pool[pool].active) { 409 if (!adapter->rx_buff_pool[pool].active) {
417 ibmveth_rxq_harvest_buffer(adapter); 410 ibmveth_rxq_harvest_buffer(adapter);
418 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]); 411 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
419 return; 412 return;
@@ -425,12 +418,13 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
425 418
426 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 419 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
427 420
428 if(lpar_rc != H_SUCCESS) { 421 if (lpar_rc != H_SUCCESS) {
429 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc); 422 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
423 "during recycle rc=%ld", lpar_rc);
430 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 424 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
431 } 425 }
432 426
433 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 427 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
434 adapter->rx_queue.index = 0; 428 adapter->rx_queue.index = 0;
435 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 429 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
436 } 430 }
@@ -440,7 +434,7 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
440{ 434{
441 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator); 435 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
442 436
443 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) { 437 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
444 adapter->rx_queue.index = 0; 438 adapter->rx_queue.index = 0;
445 adapter->rx_queue.toggle = !adapter->rx_queue.toggle; 439 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
446 } 440 }
@@ -451,7 +445,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
451 int i; 445 int i;
452 struct device *dev = &adapter->vdev->dev; 446 struct device *dev = &adapter->vdev->dev;
453 447
454 if(adapter->buffer_list_addr != NULL) { 448 if (adapter->buffer_list_addr != NULL) {
455 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { 449 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
456 dma_unmap_single(dev, adapter->buffer_list_dma, 4096, 450 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
457 DMA_BIDIRECTIONAL); 451 DMA_BIDIRECTIONAL);
@@ -461,7 +455,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
461 adapter->buffer_list_addr = NULL; 455 adapter->buffer_list_addr = NULL;
462 } 456 }
463 457
464 if(adapter->filter_list_addr != NULL) { 458 if (adapter->filter_list_addr != NULL) {
465 if (!dma_mapping_error(dev, adapter->filter_list_dma)) { 459 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
466 dma_unmap_single(dev, adapter->filter_list_dma, 4096, 460 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
467 DMA_BIDIRECTIONAL); 461 DMA_BIDIRECTIONAL);
@@ -471,7 +465,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
471 adapter->filter_list_addr = NULL; 465 adapter->filter_list_addr = NULL;
472 } 466 }
473 467
474 if(adapter->rx_queue.queue_addr != NULL) { 468 if (adapter->rx_queue.queue_addr != NULL) {
475 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { 469 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
476 dma_unmap_single(dev, 470 dma_unmap_single(dev,
477 adapter->rx_queue.queue_dma, 471 adapter->rx_queue.queue_dma,
@@ -483,7 +477,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
483 adapter->rx_queue.queue_addr = NULL; 477 adapter->rx_queue.queue_addr = NULL;
484 } 478 }
485 479
486 for(i = 0; i<IbmVethNumBufferPools; i++) 480 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
487 if (adapter->rx_buff_pool[i].active) 481 if (adapter->rx_buff_pool[i].active)
488 ibmveth_free_buffer_pool(adapter, 482 ibmveth_free_buffer_pool(adapter,
489 &adapter->rx_buff_pool[i]); 483 &adapter->rx_buff_pool[i]);
@@ -506,9 +500,11 @@ static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
506{ 500{
507 int rc, try_again = 1; 501 int rc, try_again = 1;
508 502
509 /* After a kexec the adapter will still be open, so our attempt to 503 /*
510 * open it will fail. So if we get a failure we free the adapter and 504 * After a kexec the adapter will still be open, so our attempt to
511 * try again, but only once. */ 505 * open it will fail. So if we get a failure we free the adapter and
506 * try again, but only once.
507 */
512retry: 508retry:
513 rc = h_register_logical_lan(adapter->vdev->unit_address, 509 rc = h_register_logical_lan(adapter->vdev->unit_address,
514 adapter->buffer_list_dma, rxq_desc.desc, 510 adapter->buffer_list_dma, rxq_desc.desc,
@@ -537,28 +533,31 @@ static int ibmveth_open(struct net_device *netdev)
537 int i; 533 int i;
538 struct device *dev; 534 struct device *dev;
539 535
540 ibmveth_debug_printk("open starting\n"); 536 netdev_dbg(netdev, "open starting\n");
541 537
542 napi_enable(&adapter->napi); 538 napi_enable(&adapter->napi);
543 539
544 for(i = 0; i<IbmVethNumBufferPools; i++) 540 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
545 rxq_entries += adapter->rx_buff_pool[i].size; 541 rxq_entries += adapter->rx_buff_pool[i].size;
546 542
547 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 543 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
548 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 544 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
549 545
550 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { 546 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
551 ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); 547 netdev_err(netdev, "unable to allocate filter or buffer list "
548 "pages\n");
552 ibmveth_cleanup(adapter); 549 ibmveth_cleanup(adapter);
553 napi_disable(&adapter->napi); 550 napi_disable(&adapter->napi);
554 return -ENOMEM; 551 return -ENOMEM;
555 } 552 }
556 553
557 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; 554 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
558 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL); 555 rxq_entries;
556 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
557 GFP_KERNEL);
559 558
560 if(!adapter->rx_queue.queue_addr) { 559 if (!adapter->rx_queue.queue_addr) {
561 ibmveth_error_printk("unable to allocate rx queue pages\n"); 560 netdev_err(netdev, "unable to allocate rx queue pages\n");
562 ibmveth_cleanup(adapter); 561 ibmveth_cleanup(adapter);
563 napi_disable(&adapter->napi); 562 napi_disable(&adapter->napi);
564 return -ENOMEM; 563 return -ENOMEM;
@@ -577,7 +576,8 @@ static int ibmveth_open(struct net_device *netdev)
577 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || 576 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
578 (dma_mapping_error(dev, adapter->filter_list_dma)) || 577 (dma_mapping_error(dev, adapter->filter_list_dma)) ||
579 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { 578 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
580 ibmveth_error_printk("unable to map filter or buffer list pages\n"); 579 netdev_err(netdev, "unable to map filter or buffer list "
580 "pages\n");
581 ibmveth_cleanup(adapter); 581 ibmveth_cleanup(adapter);
582 napi_disable(&adapter->napi); 582 napi_disable(&adapter->napi);
583 return -ENOMEM; 583 return -ENOMEM;
@@ -590,20 +590,23 @@ static int ibmveth_open(struct net_device *netdev)
590 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 590 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
591 mac_address = mac_address >> 16; 591 mac_address = mac_address >> 16;
592 592
593 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; 593 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
594 adapter->rx_queue.queue_len;
594 rxq_desc.fields.address = adapter->rx_queue.queue_dma; 595 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
595 596
596 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr); 597 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
597 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); 598 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
598 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); 599 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
599 600
600 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); 601 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
601 602
602 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address); 603 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
603 604
604 if(lpar_rc != H_SUCCESS) { 605 if (lpar_rc != H_SUCCESS) {
605 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); 606 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
606 ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n", 607 lpar_rc);
608 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
609 "desc:0x%llx MAC:0x%llx\n",
607 adapter->buffer_list_dma, 610 adapter->buffer_list_dma,
608 adapter->filter_list_dma, 611 adapter->filter_list_dma,
609 rxq_desc.desc, 612 rxq_desc.desc,
@@ -613,11 +616,11 @@ static int ibmveth_open(struct net_device *netdev)
613 return -ENONET; 616 return -ENONET;
614 } 617 }
615 618
616 for(i = 0; i<IbmVethNumBufferPools; i++) { 619 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
617 if(!adapter->rx_buff_pool[i].active) 620 if (!adapter->rx_buff_pool[i].active)
618 continue; 621 continue;
619 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { 622 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
620 ibmveth_error_printk("unable to alloc pool\n"); 623 netdev_err(netdev, "unable to alloc pool\n");
621 adapter->rx_buff_pool[i].active = 0; 624 adapter->rx_buff_pool[i].active = 0;
622 ibmveth_cleanup(adapter); 625 ibmveth_cleanup(adapter);
623 napi_disable(&adapter->napi); 626 napi_disable(&adapter->napi);
@@ -625,9 +628,12 @@ static int ibmveth_open(struct net_device *netdev)
625 } 628 }
626 } 629 }
627 630
628 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); 631 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
629 if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { 632 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
630 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); 633 netdev);
634 if (rc != 0) {
635 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
636 netdev->irq, rc);
631 do { 637 do {
632 rc = h_free_logical_lan(adapter->vdev->unit_address); 638 rc = h_free_logical_lan(adapter->vdev->unit_address);
633 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 639 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
@@ -640,7 +646,7 @@ static int ibmveth_open(struct net_device *netdev)
640 adapter->bounce_buffer = 646 adapter->bounce_buffer =
641 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); 647 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
642 if (!adapter->bounce_buffer) { 648 if (!adapter->bounce_buffer) {
643 ibmveth_error_printk("unable to allocate bounce buffer\n"); 649 netdev_err(netdev, "unable to allocate bounce buffer\n");
644 ibmveth_cleanup(adapter); 650 ibmveth_cleanup(adapter);
645 napi_disable(&adapter->napi); 651 napi_disable(&adapter->napi);
646 return -ENOMEM; 652 return -ENOMEM;
@@ -649,18 +655,18 @@ static int ibmveth_open(struct net_device *netdev)
649 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, 655 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
650 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); 656 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
651 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 657 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
652 ibmveth_error_printk("unable to map bounce buffer\n"); 658 netdev_err(netdev, "unable to map bounce buffer\n");
653 ibmveth_cleanup(adapter); 659 ibmveth_cleanup(adapter);
654 napi_disable(&adapter->napi); 660 napi_disable(&adapter->napi);
655 return -ENOMEM; 661 return -ENOMEM;
656 } 662 }
657 663
658 ibmveth_debug_printk("initial replenish cycle\n"); 664 netdev_dbg(netdev, "initial replenish cycle\n");
659 ibmveth_interrupt(netdev->irq, netdev); 665 ibmveth_interrupt(netdev->irq, netdev);
660 666
661 netif_start_queue(netdev); 667 netif_start_queue(netdev);
662 668
663 ibmveth_debug_printk("open complete\n"); 669 netdev_dbg(netdev, "open complete\n");
664 670
665 return 0; 671 return 0;
666} 672}
@@ -670,7 +676,7 @@ static int ibmveth_close(struct net_device *netdev)
670 struct ibmveth_adapter *adapter = netdev_priv(netdev); 676 struct ibmveth_adapter *adapter = netdev_priv(netdev);
671 long lpar_rc; 677 long lpar_rc;
672 678
673 ibmveth_debug_printk("close starting\n"); 679 netdev_dbg(netdev, "close starting\n");
674 680
675 napi_disable(&adapter->napi); 681 napi_disable(&adapter->napi);
676 682
@@ -683,26 +689,29 @@ static int ibmveth_close(struct net_device *netdev)
683 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 689 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
684 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); 690 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
685 691
686 if(lpar_rc != H_SUCCESS) 692 if (lpar_rc != H_SUCCESS) {
687 { 693 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
688 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n", 694 "continuing with close\n", lpar_rc);
689 lpar_rc);
690 } 695 }
691 696
692 free_irq(netdev->irq, netdev); 697 free_irq(netdev->irq, netdev);
693 698
694 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 699 adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
700 4096 - 8);
695 701
696 ibmveth_cleanup(adapter); 702 ibmveth_cleanup(adapter);
697 703
698 ibmveth_debug_printk("close complete\n"); 704 netdev_dbg(netdev, "close complete\n");
699 705
700 return 0; 706 return 0;
701} 707}
702 708
703static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { 709static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
704 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE); 710{
705 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE); 711 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
712 SUPPORTED_FIBRE);
713 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
714 ADVERTISED_FIBRE);
706 cmd->speed = SPEED_1000; 715 cmd->speed = SPEED_1000;
707 cmd->duplex = DUPLEX_FULL; 716 cmd->duplex = DUPLEX_FULL;
708 cmd->port = PORT_FIBRE; 717 cmd->port = PORT_FIBRE;
@@ -714,12 +723,16 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
714 return 0; 723 return 0;
715} 724}
716 725
717static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) { 726static void netdev_get_drvinfo(struct net_device *dev,
727 struct ethtool_drvinfo *info)
728{
718 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1); 729 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
719 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1); 730 strncpy(info->version, ibmveth_driver_version,
731 sizeof(info->version) - 1);
720} 732}
721 733
722static u32 netdev_get_link(struct net_device *dev) { 734static u32 netdev_get_link(struct net_device *dev)
735{
723 return 1; 736 return 1;
724} 737}
725 738
@@ -727,18 +740,20 @@ static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
727{ 740{
728 struct ibmveth_adapter *adapter = netdev_priv(dev); 741 struct ibmveth_adapter *adapter = netdev_priv(dev);
729 742
730 if (data) 743 if (data) {
731 adapter->rx_csum = 1; 744 adapter->rx_csum = 1;
732 else { 745 } else {
733 /* 746 /*
734 * Since the ibmveth firmware interface does not have the concept of 747 * Since the ibmveth firmware interface does not have the
735 * separate tx/rx checksum offload enable, if rx checksum is disabled 748 * concept of separate tx/rx checksum offload enable, if rx
736 * we also have to disable tx checksum offload. Once we disable rx 749 * checksum is disabled we also have to disable tx checksum
737 * checksum offload, we are no longer allowed to send tx buffers that 750 * offload. Once we disable rx checksum offload, we are no
738 * are not properly checksummed. 751 * longer allowed to send tx buffers that are not properly
752 * checksummed.
739 */ 753 */
740 adapter->rx_csum = 0; 754 adapter->rx_csum = 0;
741 dev->features &= ~NETIF_F_IP_CSUM; 755 dev->features &= ~NETIF_F_IP_CSUM;
756 dev->features &= ~NETIF_F_IPV6_CSUM;
742 } 757 }
743} 758}
744 759
@@ -747,10 +762,15 @@ static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
747 struct ibmveth_adapter *adapter = netdev_priv(dev); 762 struct ibmveth_adapter *adapter = netdev_priv(dev);
748 763
749 if (data) { 764 if (data) {
750 dev->features |= NETIF_F_IP_CSUM; 765 if (adapter->fw_ipv4_csum_support)
766 dev->features |= NETIF_F_IP_CSUM;
767 if (adapter->fw_ipv6_csum_support)
768 dev->features |= NETIF_F_IPV6_CSUM;
751 adapter->rx_csum = 1; 769 adapter->rx_csum = 1;
752 } else 770 } else {
753 dev->features &= ~NETIF_F_IP_CSUM; 771 dev->features &= ~NETIF_F_IP_CSUM;
772 dev->features &= ~NETIF_F_IPV6_CSUM;
773 }
754} 774}
755 775
756static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, 776static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
@@ -758,7 +778,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
758{ 778{
759 struct ibmveth_adapter *adapter = netdev_priv(dev); 779 struct ibmveth_adapter *adapter = netdev_priv(dev);
760 unsigned long set_attr, clr_attr, ret_attr; 780 unsigned long set_attr, clr_attr, ret_attr;
761 long ret; 781 unsigned long set_attr6, clr_attr6;
782 long ret, ret6;
762 int rc1 = 0, rc2 = 0; 783 int rc1 = 0, rc2 = 0;
763 int restart = 0; 784 int restart = 0;
764 785
@@ -772,10 +793,13 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
772 set_attr = 0; 793 set_attr = 0;
773 clr_attr = 0; 794 clr_attr = 0;
774 795
775 if (data) 796 if (data) {
776 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 797 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
777 else 798 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
799 } else {
778 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; 800 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
801 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
802 }
779 803
780 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); 804 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
781 805
@@ -786,18 +810,39 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
786 set_attr, &ret_attr); 810 set_attr, &ret_attr);
787 811
788 if (ret != H_SUCCESS) { 812 if (ret != H_SUCCESS) {
789 rc1 = -EIO; 813 netdev_err(dev, "unable to change IPv4 checksum "
790 ibmveth_error_printk("unable to change checksum offload settings." 814 "offload settings. %d rc=%ld\n",
791 " %d rc=%ld\n", data, ret); 815 data, ret);
792 816
793 ret = h_illan_attributes(adapter->vdev->unit_address, 817 ret = h_illan_attributes(adapter->vdev->unit_address,
794 set_attr, clr_attr, &ret_attr); 818 set_attr, clr_attr, &ret_attr);
819 } else {
820 adapter->fw_ipv4_csum_support = data;
821 }
822
823 ret6 = h_illan_attributes(adapter->vdev->unit_address,
824 clr_attr6, set_attr6, &ret_attr);
825
826 if (ret6 != H_SUCCESS) {
827 netdev_err(dev, "unable to change IPv6 checksum "
828 "offload settings. %d rc=%ld\n",
829 data, ret);
830
831 ret = h_illan_attributes(adapter->vdev->unit_address,
832 set_attr6, clr_attr6,
833 &ret_attr);
795 } else 834 } else
835 adapter->fw_ipv6_csum_support = data;
836
837 if (ret == H_SUCCESS || ret6 == H_SUCCESS)
796 done(dev, data); 838 done(dev, data);
839 else
840 rc1 = -EIO;
797 } else { 841 } else {
798 rc1 = -EIO; 842 rc1 = -EIO;
799 ibmveth_error_printk("unable to change checksum offload settings." 843 netdev_err(dev, "unable to change checksum offload settings."
800 " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); 844 " %d rc=%ld ret_attr=%lx\n", data, ret,
845 ret_attr);
801 } 846 }
802 847
803 if (restart) 848 if (restart)
@@ -821,13 +866,14 @@ static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
821 struct ibmveth_adapter *adapter = netdev_priv(dev); 866 struct ibmveth_adapter *adapter = netdev_priv(dev);
822 int rc = 0; 867 int rc = 0;
823 868
824 if (data && (dev->features & NETIF_F_IP_CSUM)) 869 if (data && (dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
825 return 0; 870 return 0;
826 if (!data && !(dev->features & NETIF_F_IP_CSUM)) 871 if (!data && !(dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
827 return 0; 872 return 0;
828 873
829 if (data && !adapter->rx_csum) 874 if (data && !adapter->rx_csum)
830 rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags); 875 rc = ibmveth_set_csum_offload(dev, data,
876 ibmveth_set_tx_csum_flags);
831 else 877 else
832 ibmveth_set_tx_csum_flags(dev, data); 878 ibmveth_set_tx_csum_flags(dev, data);
833 879
@@ -881,6 +927,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
881 .get_strings = ibmveth_get_strings, 927 .get_strings = ibmveth_get_strings,
882 .get_sset_count = ibmveth_get_sset_count, 928 .get_sset_count = ibmveth_get_sset_count,
883 .get_ethtool_stats = ibmveth_get_ethtool_stats, 929 .get_ethtool_stats = ibmveth_get_ethtool_stats,
930 .set_sg = ethtool_op_set_sg,
884}; 931};
885 932
886static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 933static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -890,129 +937,216 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
890 937
891#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) 938#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
892 939
893static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, 940static int ibmveth_send(struct ibmveth_adapter *adapter,
894 struct net_device *netdev) 941 union ibmveth_buf_desc *descs)
895{ 942{
896 struct ibmveth_adapter *adapter = netdev_priv(netdev);
897 union ibmveth_buf_desc desc;
898 unsigned long lpar_rc;
899 unsigned long correlator; 943 unsigned long correlator;
900 unsigned long flags;
901 unsigned int retry_count; 944 unsigned int retry_count;
902 unsigned int tx_dropped = 0; 945 unsigned long ret;
903 unsigned int tx_bytes = 0; 946
904 unsigned int tx_packets = 0; 947 /*
905 unsigned int tx_send_failed = 0; 948 * The retry count sets a maximum for the number of broadcast and
906 unsigned int tx_map_failed = 0; 949 * multicast destinations within the system.
907 int used_bounce = 0; 950 */
908 unsigned long data_dma_addr; 951 retry_count = 1024;
952 correlator = 0;
953 do {
954 ret = h_send_logical_lan(adapter->vdev->unit_address,
955 descs[0].desc, descs[1].desc,
956 descs[2].desc, descs[3].desc,
957 descs[4].desc, descs[5].desc,
958 correlator, &correlator);
959 } while ((ret == H_BUSY) && (retry_count--));
960
961 if (ret != H_SUCCESS && ret != H_DROPPED) {
962 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
963 "with rc=%ld\n", ret);
964 return 1;
965 }
966
967 return 0;
968}
909 969
910 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; 970static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
971 struct net_device *netdev)
972{
973 struct ibmveth_adapter *adapter = netdev_priv(netdev);
974 unsigned int desc_flags;
975 union ibmveth_buf_desc descs[6];
976 int last, i;
977 int force_bounce = 0;
978
979 /*
980 * veth handles a maximum of 6 segments including the header, so
981 * we have to linearize the skb if there are more than this.
982 */
983 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
984 netdev->stats.tx_dropped++;
985 goto out;
986 }
911 987
988 /* veth can't checksum offload UDP */
912 if (skb->ip_summed == CHECKSUM_PARTIAL && 989 if (skb->ip_summed == CHECKSUM_PARTIAL &&
913 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { 990 ((skb->protocol == htons(ETH_P_IP) &&
914 ibmveth_error_printk("tx: failed to checksum packet\n"); 991 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
915 tx_dropped++; 992 (skb->protocol == htons(ETH_P_IPV6) &&
993 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
994 skb_checksum_help(skb)) {
995
996 netdev_err(netdev, "tx: failed to checksum packet\n");
997 netdev->stats.tx_dropped++;
916 goto out; 998 goto out;
917 } 999 }
918 1000
1001 desc_flags = IBMVETH_BUF_VALID;
1002
919 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1003 if (skb->ip_summed == CHECKSUM_PARTIAL) {
920 unsigned char *buf = skb_transport_header(skb) + skb->csum_offset; 1004 unsigned char *buf = skb_transport_header(skb) +
1005 skb->csum_offset;
921 1006
922 desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD); 1007 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
923 1008
924 /* Need to zero out the checksum */ 1009 /* Need to zero out the checksum */
925 buf[0] = 0; 1010 buf[0] = 0;
926 buf[1] = 0; 1011 buf[1] = 0;
927 } 1012 }
928 1013
929 data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 1014retry_bounce:
930 skb->len, DMA_TO_DEVICE); 1015 memset(descs, 0, sizeof(descs));
931 if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { 1016
932 if (!firmware_has_feature(FW_FEATURE_CMO)) 1017 /*
933 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 1018 * If a linear packet is below the rx threshold then
1019 * copy it into the static bounce buffer. This avoids the
1020 * cost of a TCE insert and remove.
1021 */
1022 if (force_bounce || (!skb_is_nonlinear(skb) &&
1023 (skb->len < tx_copybreak))) {
934 skb_copy_from_linear_data(skb, adapter->bounce_buffer, 1024 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
935 skb->len); 1025 skb->len);
936 desc.fields.address = adapter->bounce_buffer_dma; 1026
937 tx_map_failed++; 1027 descs[0].fields.flags_len = desc_flags | skb->len;
938 used_bounce = 1; 1028 descs[0].fields.address = adapter->bounce_buffer_dma;
939 wmb(); 1029
940 } else 1030 if (ibmveth_send(adapter, descs)) {
941 desc.fields.address = data_dma_addr; 1031 adapter->tx_send_failed++;
942 1032 netdev->stats.tx_dropped++;
943 /* send the frame. Arbitrarily set retrycount to 1024 */ 1033 } else {
944 correlator = 0; 1034 netdev->stats.tx_packets++;
945 retry_count = 1024; 1035 netdev->stats.tx_bytes += skb->len;
946 do { 1036 }
947 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address, 1037
948 desc.desc, 0, 0, 0, 0, 0, 1038 goto out;
949 correlator, &correlator); 1039 }
950 } while ((lpar_rc == H_BUSY) && (retry_count--)); 1040
951 1041 /* Map the header */
952 if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { 1042 descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
953 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc); 1043 skb_headlen(skb),
954 ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n", 1044 DMA_TO_DEVICE);
955 (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0, 1045 if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
956 skb->len, desc.fields.address); 1046 goto map_failed;
957 tx_send_failed++; 1047
958 tx_dropped++; 1048 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
959 } else { 1049
960 tx_packets++; 1050 /* Map the frags */
961 tx_bytes += skb->len; 1051 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
962 netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ 1052 unsigned long dma_addr;
1053 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1054
1055 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
1056 frag->page_offset, frag->size,
1057 DMA_TO_DEVICE);
1058
1059 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1060 goto map_failed_frags;
1061
1062 descs[i+1].fields.flags_len = desc_flags | frag->size;
1063 descs[i+1].fields.address = dma_addr;
963 } 1064 }
964 1065
965 if (!used_bounce) 1066 if (ibmveth_send(adapter, descs)) {
966 dma_unmap_single(&adapter->vdev->dev, data_dma_addr, 1067 adapter->tx_send_failed++;
967 skb->len, DMA_TO_DEVICE); 1068 netdev->stats.tx_dropped++;
1069 } else {
1070 netdev->stats.tx_packets++;
1071 netdev->stats.tx_bytes += skb->len;
1072 }
968 1073
969out: spin_lock_irqsave(&adapter->stats_lock, flags); 1074 for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
970 netdev->stats.tx_dropped += tx_dropped; 1075 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
971 netdev->stats.tx_bytes += tx_bytes; 1076 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
972 netdev->stats.tx_packets += tx_packets; 1077 DMA_TO_DEVICE);
973 adapter->tx_send_failed += tx_send_failed;
974 adapter->tx_map_failed += tx_map_failed;
975 spin_unlock_irqrestore(&adapter->stats_lock, flags);
976 1078
1079out:
977 dev_kfree_skb(skb); 1080 dev_kfree_skb(skb);
978 return NETDEV_TX_OK; 1081 return NETDEV_TX_OK;
1082
1083map_failed_frags:
1084 last = i+1;
1085 for (i = 0; i < last; i++)
1086 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1087 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1088 DMA_TO_DEVICE);
1089
1090map_failed:
1091 if (!firmware_has_feature(FW_FEATURE_CMO))
1092 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1093 adapter->tx_map_failed++;
1094 skb_linearize(skb);
1095 force_bounce = 1;
1096 goto retry_bounce;
979} 1097}
980 1098
981static int ibmveth_poll(struct napi_struct *napi, int budget) 1099static int ibmveth_poll(struct napi_struct *napi, int budget)
982{ 1100{
983 struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); 1101 struct ibmveth_adapter *adapter =
1102 container_of(napi, struct ibmveth_adapter, napi);
984 struct net_device *netdev = adapter->netdev; 1103 struct net_device *netdev = adapter->netdev;
985 int frames_processed = 0; 1104 int frames_processed = 0;
986 unsigned long lpar_rc; 1105 unsigned long lpar_rc;
987 1106
988 restart_poll: 1107restart_poll:
989 do { 1108 do {
990 struct sk_buff *skb;
991
992 if (!ibmveth_rxq_pending_buffer(adapter)) 1109 if (!ibmveth_rxq_pending_buffer(adapter))
993 break; 1110 break;
994 1111
995 rmb(); 1112 smp_rmb();
996 if (!ibmveth_rxq_buffer_valid(adapter)) { 1113 if (!ibmveth_rxq_buffer_valid(adapter)) {
997 wmb(); /* suggested by larson1 */ 1114 wmb(); /* suggested by larson1 */
998 adapter->rx_invalid_buffer++; 1115 adapter->rx_invalid_buffer++;
999 ibmveth_debug_printk("recycling invalid buffer\n"); 1116 netdev_dbg(netdev, "recycling invalid buffer\n");
1000 ibmveth_rxq_recycle_buffer(adapter); 1117 ibmveth_rxq_recycle_buffer(adapter);
1001 } else { 1118 } else {
1119 struct sk_buff *skb, *new_skb;
1002 int length = ibmveth_rxq_frame_length(adapter); 1120 int length = ibmveth_rxq_frame_length(adapter);
1003 int offset = ibmveth_rxq_frame_offset(adapter); 1121 int offset = ibmveth_rxq_frame_offset(adapter);
1004 int csum_good = ibmveth_rxq_csum_good(adapter); 1122 int csum_good = ibmveth_rxq_csum_good(adapter);
1005 1123
1006 skb = ibmveth_rxq_get_buffer(adapter); 1124 skb = ibmveth_rxq_get_buffer(adapter);
1007 if (csum_good)
1008 skb->ip_summed = CHECKSUM_UNNECESSARY;
1009 1125
1010 ibmveth_rxq_harvest_buffer(adapter); 1126 new_skb = NULL;
1127 if (length < rx_copybreak)
1128 new_skb = netdev_alloc_skb(netdev, length);
1129
1130 if (new_skb) {
1131 skb_copy_to_linear_data(new_skb,
1132 skb->data + offset,
1133 length);
1134 if (rx_flush)
1135 ibmveth_flush_buffer(skb->data,
1136 length + offset);
1137 skb = new_skb;
1138 ibmveth_rxq_recycle_buffer(adapter);
1139 } else {
1140 ibmveth_rxq_harvest_buffer(adapter);
1141 skb_reserve(skb, offset);
1142 }
1011 1143
1012 skb_reserve(skb, offset);
1013 skb_put(skb, length); 1144 skb_put(skb, length);
1014 skb->protocol = eth_type_trans(skb, netdev); 1145 skb->protocol = eth_type_trans(skb, netdev);
1015 1146
1147 if (csum_good)
1148 skb->ip_summed = CHECKSUM_UNNECESSARY;
1149
1016 netif_receive_skb(skb); /* send it up */ 1150 netif_receive_skb(skb); /* send it up */
1017 1151
1018 netdev->stats.rx_packets++; 1152 netdev->stats.rx_packets++;
@@ -1030,7 +1164,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1030 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1164 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1031 VIO_IRQ_ENABLE); 1165 VIO_IRQ_ENABLE);
1032 1166
1033 ibmveth_assert(lpar_rc == H_SUCCESS); 1167 BUG_ON(lpar_rc != H_SUCCESS);
1034 1168
1035 napi_complete(napi); 1169 napi_complete(napi);
1036 1170
@@ -1054,7 +1188,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1054 if (napi_schedule_prep(&adapter->napi)) { 1188 if (napi_schedule_prep(&adapter->napi)) {
1055 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1189 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1056 VIO_IRQ_DISABLE); 1190 VIO_IRQ_DISABLE);
1057 ibmveth_assert(lpar_rc == H_SUCCESS); 1191 BUG_ON(lpar_rc != H_SUCCESS);
1058 __napi_schedule(&adapter->napi); 1192 __napi_schedule(&adapter->napi);
1059 } 1193 }
1060 return IRQ_HANDLED; 1194 return IRQ_HANDLED;
@@ -1071,8 +1205,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1071 IbmVethMcastEnableRecv | 1205 IbmVethMcastEnableRecv |
1072 IbmVethMcastDisableFiltering, 1206 IbmVethMcastDisableFiltering,
1073 0); 1207 0);
1074 if(lpar_rc != H_SUCCESS) { 1208 if (lpar_rc != H_SUCCESS) {
1075 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); 1209 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1210 "entering promisc mode\n", lpar_rc);
1076 } 1211 }
1077 } else { 1212 } else {
1078 struct netdev_hw_addr *ha; 1213 struct netdev_hw_addr *ha;
@@ -1082,19 +1217,23 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1082 IbmVethMcastDisableFiltering | 1217 IbmVethMcastDisableFiltering |
1083 IbmVethMcastClearFilterTable, 1218 IbmVethMcastClearFilterTable,
1084 0); 1219 0);
1085 if(lpar_rc != H_SUCCESS) { 1220 if (lpar_rc != H_SUCCESS) {
1086 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); 1221 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1222 "attempting to clear filter table\n",
1223 lpar_rc);
1087 } 1224 }
1088 /* add the addresses to the filter table */ 1225 /* add the addresses to the filter table */
1089 netdev_for_each_mc_addr(ha, netdev) { 1226 netdev_for_each_mc_addr(ha, netdev) {
1090 // add the multicast address to the filter table 1227 /* add the multicast address to the filter table */
1091 unsigned long mcast_addr = 0; 1228 unsigned long mcast_addr = 0;
1092 memcpy(((char *)&mcast_addr)+2, ha->addr, 6); 1229 memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1093 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1230 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1094 IbmVethMcastAddFilter, 1231 IbmVethMcastAddFilter,
1095 mcast_addr); 1232 mcast_addr);
1096 if(lpar_rc != H_SUCCESS) { 1233 if (lpar_rc != H_SUCCESS) {
1097 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc); 1234 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1235 "when adding an entry to the filter "
1236 "table\n", lpar_rc);
1098 } 1237 }
1099 } 1238 }
1100 1239
@@ -1102,8 +1241,9 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1102 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1241 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1103 IbmVethMcastEnableFiltering, 1242 IbmVethMcastEnableFiltering,
1104 0); 1243 0);
1105 if(lpar_rc != H_SUCCESS) { 1244 if (lpar_rc != H_SUCCESS) {
1106 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc); 1245 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1246 "enabling filtering\n", lpar_rc);
1107 } 1247 }
1108 } 1248 }
1109} 1249}
@@ -1116,14 +1256,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1116 int i, rc; 1256 int i, rc;
1117 int need_restart = 0; 1257 int need_restart = 0;
1118 1258
1119 if (new_mtu < IBMVETH_MAX_MTU) 1259 if (new_mtu < IBMVETH_MIN_MTU)
1120 return -EINVAL; 1260 return -EINVAL;
1121 1261
1122 for (i = 0; i < IbmVethNumBufferPools; i++) 1262 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1123 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) 1263 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1124 break; 1264 break;
1125 1265
1126 if (i == IbmVethNumBufferPools) 1266 if (i == IBMVETH_NUM_BUFF_POOLS)
1127 return -EINVAL; 1267 return -EINVAL;
1128 1268
1129 /* Deactivate all the buffer pools so that the next loop can activate 1269 /* Deactivate all the buffer pools so that the next loop can activate
@@ -1136,7 +1276,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1136 } 1276 }
1137 1277
1138 /* Look for an active buffer pool that can hold the new MTU */ 1278 /* Look for an active buffer pool that can hold the new MTU */
1139 for(i = 0; i<IbmVethNumBufferPools; i++) { 1279 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1140 adapter->rx_buff_pool[i].active = 1; 1280 adapter->rx_buff_pool[i].active = 1;
1141 1281
1142 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { 1282 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
@@ -1190,7 +1330,7 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1190 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1330 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1191 ret += IOMMU_PAGE_ALIGN(netdev->mtu); 1331 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1192 1332
1193 for (i = 0; i < IbmVethNumBufferPools; i++) { 1333 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1194 /* add the size of the active receive buffers */ 1334 /* add the size of the active receive buffers */
1195 if (adapter->rx_buff_pool[i].active) 1335 if (adapter->rx_buff_pool[i].active)
1196 ret += 1336 ret +=
@@ -1219,41 +1359,36 @@ static const struct net_device_ops ibmveth_netdev_ops = {
1219#endif 1359#endif
1220}; 1360};
1221 1361
1222static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 1362static int __devinit ibmveth_probe(struct vio_dev *dev,
1363 const struct vio_device_id *id)
1223{ 1364{
1224 int rc, i; 1365 int rc, i;
1225 long ret;
1226 struct net_device *netdev; 1366 struct net_device *netdev;
1227 struct ibmveth_adapter *adapter; 1367 struct ibmveth_adapter *adapter;
1228 unsigned long set_attr, ret_attr;
1229
1230 unsigned char *mac_addr_p; 1368 unsigned char *mac_addr_p;
1231 unsigned int *mcastFilterSize_p; 1369 unsigned int *mcastFilterSize_p;
1232 1370
1371 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1372 dev->unit_address);
1233 1373
1234 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", 1374 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1235 dev->unit_address); 1375 NULL);
1236 1376 if (!mac_addr_p) {
1237 mac_addr_p = (unsigned char *) vio_get_attribute(dev, 1377 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1238 VETH_MAC_ADDR, NULL); 1378 return -EINVAL;
1239 if(!mac_addr_p) {
1240 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
1241 "attribute\n", __FILE__, __LINE__);
1242 return 0;
1243 } 1379 }
1244 1380
1245 mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev, 1381 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1246 VETH_MCAST_FILTER_SIZE, NULL); 1382 VETH_MCAST_FILTER_SIZE, NULL);
1247 if(!mcastFilterSize_p) { 1383 if (!mcastFilterSize_p) {
1248 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find " 1384 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1249 "VETH_MCAST_FILTER_SIZE attribute\n", 1385 "attribute\n");
1250 __FILE__, __LINE__); 1386 return -EINVAL;
1251 return 0;
1252 } 1387 }
1253 1388
1254 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter)); 1389 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1255 1390
1256 if(!netdev) 1391 if (!netdev)
1257 return -ENOMEM; 1392 return -ENOMEM;
1258 1393
1259 adapter = netdev_priv(netdev); 1394 adapter = netdev_priv(netdev);
@@ -1261,19 +1396,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1261 1396
1262 adapter->vdev = dev; 1397 adapter->vdev = dev;
1263 adapter->netdev = netdev; 1398 adapter->netdev = netdev;
1264 adapter->mcastFilterSize= *mcastFilterSize_p; 1399 adapter->mcastFilterSize = *mcastFilterSize_p;
1265 adapter->pool_config = 0; 1400 adapter->pool_config = 0;
1266 1401
1267 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); 1402 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1268 1403
1269 /* Some older boxes running PHYP non-natively have an OF that 1404 /*
1270 returns a 8-byte local-mac-address field (and the first 1405 * Some older boxes running PHYP non-natively have an OF that returns
1271 2 bytes have to be ignored) while newer boxes' OF return 1406 * a 8-byte local-mac-address field (and the first 2 bytes have to be
1272 a 6-byte field. Note that IEEE 1275 specifies that 1407 * ignored) while newer boxes' OF return a 6-byte field. Note that
1273 local-mac-address must be a 6-byte field. 1408 * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
1274 The RPA doc specifies that the first byte must be 10b, so 1409 * The RPA doc specifies that the first byte must be 10b, so we'll
1275 we'll just look for it to solve this 8 vs. 6 byte field issue */ 1410 * just look for it to solve this 8 vs. 6 byte field issue
1276 1411 */
1277 if ((*mac_addr_p & 0x3) != 0x02) 1412 if ((*mac_addr_p & 0x3) != 0x02)
1278 mac_addr_p += 2; 1413 mac_addr_p += 2;
1279 1414
@@ -1284,12 +1419,11 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1284 netdev->netdev_ops = &ibmveth_netdev_ops; 1419 netdev->netdev_ops = &ibmveth_netdev_ops;
1285 netdev->ethtool_ops = &netdev_ethtool_ops; 1420 netdev->ethtool_ops = &netdev_ethtool_ops;
1286 SET_NETDEV_DEV(netdev, &dev->dev); 1421 SET_NETDEV_DEV(netdev, &dev->dev);
1287 netdev->features |= NETIF_F_LLTX; 1422 netdev->features |= NETIF_F_SG;
1288 spin_lock_init(&adapter->stats_lock);
1289 1423
1290 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 1424 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1291 1425
1292 for(i = 0; i<IbmVethNumBufferPools; i++) { 1426 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1293 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; 1427 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1294 int error; 1428 int error;
1295 1429
@@ -1302,41 +1436,25 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1302 kobject_uevent(kobj, KOBJ_ADD); 1436 kobject_uevent(kobj, KOBJ_ADD);
1303 } 1437 }
1304 1438
1305 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 1439 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1306 1440
1307 adapter->buffer_list_dma = DMA_ERROR_CODE; 1441 adapter->buffer_list_dma = DMA_ERROR_CODE;
1308 adapter->filter_list_dma = DMA_ERROR_CODE; 1442 adapter->filter_list_dma = DMA_ERROR_CODE;
1309 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 1443 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1310 1444
1311 ibmveth_debug_printk("registering netdev...\n"); 1445 netdev_dbg(netdev, "registering netdev...\n");
1312
1313 ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr);
1314
1315 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
1316 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
1317 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
1318 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
1319
1320 ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr);
1321 1446
1322 if (ret == H_SUCCESS) { 1447 ibmveth_set_csum_offload(netdev, 1, ibmveth_set_tx_csum_flags);
1323 adapter->rx_csum = 1;
1324 netdev->features |= NETIF_F_IP_CSUM;
1325 } else
1326 ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr);
1327 }
1328 1448
1329 rc = register_netdev(netdev); 1449 rc = register_netdev(netdev);
1330 1450
1331 if(rc) { 1451 if (rc) {
1332 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc); 1452 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1333 free_netdev(netdev); 1453 free_netdev(netdev);
1334 return rc; 1454 return rc;
1335 } 1455 }
1336 1456
1337 ibmveth_debug_printk("registered\n"); 1457 netdev_dbg(netdev, "registered\n");
1338
1339 ibmveth_proc_register_adapter(adapter);
1340 1458
1341 return 0; 1459 return 0;
1342} 1460}
@@ -1347,114 +1465,23 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
1347 struct ibmveth_adapter *adapter = netdev_priv(netdev); 1465 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1348 int i; 1466 int i;
1349 1467
1350 for(i = 0; i<IbmVethNumBufferPools; i++) 1468 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1351 kobject_put(&adapter->rx_buff_pool[i].kobj); 1469 kobject_put(&adapter->rx_buff_pool[i].kobj);
1352 1470
1353 unregister_netdev(netdev); 1471 unregister_netdev(netdev);
1354 1472
1355 ibmveth_proc_unregister_adapter(adapter);
1356
1357 free_netdev(netdev); 1473 free_netdev(netdev);
1358 dev_set_drvdata(&dev->dev, NULL); 1474 dev_set_drvdata(&dev->dev, NULL);
1359 1475
1360 return 0; 1476 return 0;
1361} 1477}
1362 1478
1363#ifdef CONFIG_PROC_FS
1364static void ibmveth_proc_register_driver(void)
1365{
1366 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net);
1367 if (ibmveth_proc_dir) {
1368 }
1369}
1370
1371static void ibmveth_proc_unregister_driver(void)
1372{
1373 remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
1374}
1375
1376static int ibmveth_show(struct seq_file *seq, void *v)
1377{
1378 struct ibmveth_adapter *adapter = seq->private;
1379 char *current_mac = (char *) adapter->netdev->dev_addr;
1380 char *firmware_mac = (char *) &adapter->mac_addr;
1381
1382 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1383
1384 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1385 seq_printf(seq, "Current MAC: %pM\n", current_mac);
1386 seq_printf(seq, "Firmware MAC: %pM\n", firmware_mac);
1387
1388 seq_printf(seq, "\nAdapter Statistics:\n");
1389 seq_printf(seq, " TX: vio_map_single failres: %lld\n", adapter->tx_map_failed);
1390 seq_printf(seq, " send failures: %lld\n", adapter->tx_send_failed);
1391 seq_printf(seq, " RX: replenish task cycles: %lld\n", adapter->replenish_task_cycles);
1392 seq_printf(seq, " alloc_skb_failures: %lld\n", adapter->replenish_no_mem);
1393 seq_printf(seq, " add buffer failures: %lld\n", adapter->replenish_add_buff_failure);
1394 seq_printf(seq, " invalid buffers: %lld\n", adapter->rx_invalid_buffer);
1395 seq_printf(seq, " no buffers: %lld\n", adapter->rx_no_buffer);
1396
1397 return 0;
1398}
1399
1400static int ibmveth_proc_open(struct inode *inode, struct file *file)
1401{
1402 return single_open(file, ibmveth_show, PDE(inode)->data);
1403}
1404
1405static const struct file_operations ibmveth_proc_fops = {
1406 .owner = THIS_MODULE,
1407 .open = ibmveth_proc_open,
1408 .read = seq_read,
1409 .llseek = seq_lseek,
1410 .release = single_release,
1411};
1412
1413static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1414{
1415 struct proc_dir_entry *entry;
1416 if (ibmveth_proc_dir) {
1417 char u_addr[10];
1418 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1419 entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir,
1420 &ibmveth_proc_fops, adapter);
1421 if (!entry)
1422 ibmveth_error_printk("Cannot create adapter proc entry");
1423 }
1424}
1425
1426static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1427{
1428 if (ibmveth_proc_dir) {
1429 char u_addr[10];
1430 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1431 remove_proc_entry(u_addr, ibmveth_proc_dir);
1432 }
1433}
1434
1435#else /* CONFIG_PROC_FS */
1436static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1437{
1438}
1439
1440static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1441{
1442}
1443static void ibmveth_proc_register_driver(void)
1444{
1445}
1446
1447static void ibmveth_proc_unregister_driver(void)
1448{
1449}
1450#endif /* CONFIG_PROC_FS */
1451
1452static struct attribute veth_active_attr; 1479static struct attribute veth_active_attr;
1453static struct attribute veth_num_attr; 1480static struct attribute veth_num_attr;
1454static struct attribute veth_size_attr; 1481static struct attribute veth_size_attr;
1455 1482
1456static ssize_t veth_pool_show(struct kobject * kobj, 1483static ssize_t veth_pool_show(struct kobject *kobj,
1457 struct attribute * attr, char * buf) 1484 struct attribute *attr, char *buf)
1458{ 1485{
1459 struct ibmveth_buff_pool *pool = container_of(kobj, 1486 struct ibmveth_buff_pool *pool = container_of(kobj,
1460 struct ibmveth_buff_pool, 1487 struct ibmveth_buff_pool,
@@ -1469,8 +1496,8 @@ static ssize_t veth_pool_show(struct kobject * kobj,
1469 return 0; 1496 return 0;
1470} 1497}
1471 1498
1472static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr, 1499static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1473const char * buf, size_t count) 1500 const char *buf, size_t count)
1474{ 1501{
1475 struct ibmveth_buff_pool *pool = container_of(kobj, 1502 struct ibmveth_buff_pool *pool = container_of(kobj,
1476 struct ibmveth_buff_pool, 1503 struct ibmveth_buff_pool,
@@ -1484,8 +1511,9 @@ const char * buf, size_t count)
1484 if (attr == &veth_active_attr) { 1511 if (attr == &veth_active_attr) {
1485 if (value && !pool->active) { 1512 if (value && !pool->active) {
1486 if (netif_running(netdev)) { 1513 if (netif_running(netdev)) {
1487 if(ibmveth_alloc_buffer_pool(pool)) { 1514 if (ibmveth_alloc_buffer_pool(pool)) {
1488 ibmveth_error_printk("unable to alloc pool\n"); 1515 netdev_err(netdev,
1516 "unable to alloc pool\n");
1489 return -ENOMEM; 1517 return -ENOMEM;
1490 } 1518 }
1491 pool->active = 1; 1519 pool->active = 1;
@@ -1494,14 +1522,15 @@ const char * buf, size_t count)
1494 adapter->pool_config = 0; 1522 adapter->pool_config = 0;
1495 if ((rc = ibmveth_open(netdev))) 1523 if ((rc = ibmveth_open(netdev)))
1496 return rc; 1524 return rc;
1497 } else 1525 } else {
1498 pool->active = 1; 1526 pool->active = 1;
1527 }
1499 } else if (!value && pool->active) { 1528 } else if (!value && pool->active) {
1500 int mtu = netdev->mtu + IBMVETH_BUFF_OH; 1529 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1501 int i; 1530 int i;
1502 /* Make sure there is a buffer pool with buffers that 1531 /* Make sure there is a buffer pool with buffers that
1503 can hold a packet of the size of the MTU */ 1532 can hold a packet of the size of the MTU */
1504 for (i = 0; i < IbmVethNumBufferPools; i++) { 1533 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1505 if (pool == &adapter->rx_buff_pool[i]) 1534 if (pool == &adapter->rx_buff_pool[i])
1506 continue; 1535 continue;
1507 if (!adapter->rx_buff_pool[i].active) 1536 if (!adapter->rx_buff_pool[i].active)
@@ -1510,8 +1539,8 @@ const char * buf, size_t count)
1510 break; 1539 break;
1511 } 1540 }
1512 1541
1513 if (i == IbmVethNumBufferPools) { 1542 if (i == IBMVETH_NUM_BUFF_POOLS) {
1514 ibmveth_error_printk("no active pool >= MTU\n"); 1543 netdev_err(netdev, "no active pool >= MTU\n");
1515 return -EPERM; 1544 return -EPERM;
1516 } 1545 }
1517 1546
@@ -1526,9 +1555,9 @@ const char * buf, size_t count)
1526 pool->active = 0; 1555 pool->active = 0;
1527 } 1556 }
1528 } else if (attr == &veth_num_attr) { 1557 } else if (attr == &veth_num_attr) {
1529 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) 1558 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1530 return -EINVAL; 1559 return -EINVAL;
1531 else { 1560 } else {
1532 if (netif_running(netdev)) { 1561 if (netif_running(netdev)) {
1533 adapter->pool_config = 1; 1562 adapter->pool_config = 1;
1534 ibmveth_close(netdev); 1563 ibmveth_close(netdev);
@@ -1536,13 +1565,14 @@ const char * buf, size_t count)
1536 pool->size = value; 1565 pool->size = value;
1537 if ((rc = ibmveth_open(netdev))) 1566 if ((rc = ibmveth_open(netdev)))
1538 return rc; 1567 return rc;
1539 } else 1568 } else {
1540 pool->size = value; 1569 pool->size = value;
1570 }
1541 } 1571 }
1542 } else if (attr == &veth_size_attr) { 1572 } else if (attr == &veth_size_attr) {
1543 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) 1573 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1544 return -EINVAL; 1574 return -EINVAL;
1545 else { 1575 } else {
1546 if (netif_running(netdev)) { 1576 if (netif_running(netdev)) {
1547 adapter->pool_config = 1; 1577 adapter->pool_config = 1;
1548 ibmveth_close(netdev); 1578 ibmveth_close(netdev);
@@ -1550,8 +1580,9 @@ const char * buf, size_t count)
1550 pool->buff_size = value; 1580 pool->buff_size = value;
1551 if ((rc = ibmveth_open(netdev))) 1581 if ((rc = ibmveth_open(netdev)))
1552 return rc; 1582 return rc;
1553 } else 1583 } else {
1554 pool->buff_size = value; 1584 pool->buff_size = value;
1585 }
1555 } 1586 }
1556 } 1587 }
1557 1588
@@ -1561,16 +1592,16 @@ const char * buf, size_t count)
1561} 1592}
1562 1593
1563 1594
1564#define ATTR(_name, _mode) \ 1595#define ATTR(_name, _mode) \
1565 struct attribute veth_##_name##_attr = { \ 1596 struct attribute veth_##_name##_attr = { \
1566 .name = __stringify(_name), .mode = _mode, \ 1597 .name = __stringify(_name), .mode = _mode, \
1567 }; 1598 };
1568 1599
1569static ATTR(active, 0644); 1600static ATTR(active, 0644);
1570static ATTR(num, 0644); 1601static ATTR(num, 0644);
1571static ATTR(size, 0644); 1602static ATTR(size, 0644);
1572 1603
1573static struct attribute * veth_pool_attrs[] = { 1604static struct attribute *veth_pool_attrs[] = {
1574 &veth_active_attr, 1605 &veth_active_attr,
1575 &veth_num_attr, 1606 &veth_num_attr,
1576 &veth_size_attr, 1607 &veth_size_attr,
@@ -1595,7 +1626,7 @@ static int ibmveth_resume(struct device *dev)
1595 return 0; 1626 return 0;
1596} 1627}
1597 1628
1598static struct vio_device_id ibmveth_device_table[] __devinitdata= { 1629static struct vio_device_id ibmveth_device_table[] __devinitdata = {
1599 { "network", "IBM,l-lan"}, 1630 { "network", "IBM,l-lan"},
1600 { "", "" } 1631 { "", "" }
1601}; 1632};
@@ -1619,9 +1650,8 @@ static struct vio_driver ibmveth_driver = {
1619 1650
1620static int __init ibmveth_module_init(void) 1651static int __init ibmveth_module_init(void)
1621{ 1652{
1622 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version); 1653 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1623 1654 ibmveth_driver_string, ibmveth_driver_version);
1624 ibmveth_proc_register_driver();
1625 1655
1626 return vio_register_driver(&ibmveth_driver); 1656 return vio_register_driver(&ibmveth_driver);
1627} 1657}
@@ -1629,7 +1659,6 @@ static int __init ibmveth_module_init(void)
1629static void __exit ibmveth_module_exit(void) 1659static void __exit ibmveth_module_exit(void)
1630{ 1660{
1631 vio_unregister_driver(&ibmveth_driver); 1661 vio_unregister_driver(&ibmveth_driver);
1632 ibmveth_proc_unregister_driver();
1633} 1662}
1634 1663
1635module_init(ibmveth_module_init); 1664module_init(ibmveth_module_init);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index ec76ace66c6b..43a794fab9ff 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -1,26 +1,28 @@
1/**************************************************************************/ 1/*
2/* */ 2 * IBM Power Virtual Ethernet Device Driver
3/* IBM eServer i/[Series Virtual Ethernet Device Driver */ 3 *
4/* Copyright (C) 2003 IBM Corp. */ 4 * This program is free software; you can redistribute it and/or modify
5/* Dave Larson (larson1@us.ibm.com) */ 5 * it under the terms of the GNU General Public License as published by
6/* Santiago Leon (santil@us.ibm.com) */ 6 * the Free Software Foundation; either version 2 of the License, or
7/* */ 7 * (at your option) any later version.
8/* This program is free software; you can redistribute it and/or modify */ 8 *
9/* it under the terms of the GNU General Public License as published by */ 9 * This program is distributed in the hope that it will be useful,
10/* the Free Software Foundation; either version 2 of the License, or */ 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11/* (at your option) any later version. */ 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12/* */ 12 * GNU General Public License for more details.
13/* This program is distributed in the hope that it will be useful, */ 13 *
14/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ 14 * You should have received a copy of the GNU General Public License
15/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ 15 * along with this program; if not, write to the Free Software
16/* GNU General Public License for more details. */ 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17/* */ 17 *
18/* You should have received a copy of the GNU General Public License */ 18 * Copyright (C) IBM Corporation, 2003, 2010
19/* along with this program; if not, write to the Free Software */ 19 *
20/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */ 20 * Authors: Dave Larson <larson1@us.ibm.com>
21/* USA */ 21 * Santiago Leon <santil@linux.vnet.ibm.com>
22/* */ 22 * Brian King <brking@linux.vnet.ibm.com>
23/**************************************************************************/ 23 * Robert Jennings <rcj@linux.vnet.ibm.com>
24 * Anton Blanchard <anton@au.ibm.com>
25 */
24 26
25#ifndef _IBMVETH_H 27#ifndef _IBMVETH_H
26#define _IBMVETH_H 28#define _IBMVETH_H
@@ -92,17 +94,17 @@ static inline long h_illan_attributes(unsigned long unit_address,
92#define h_change_logical_lan_mac(ua, mac) \ 94#define h_change_logical_lan_mac(ua, mac) \
93 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) 95 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
94 96
95#define IbmVethNumBufferPools 5 97#define IBMVETH_NUM_BUFF_POOLS 5
96#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */ 98#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
97#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ 99#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
98#define IBMVETH_MAX_MTU 68 100#define IBMVETH_MIN_MTU 68
99#define IBMVETH_MAX_POOL_COUNT 4096 101#define IBMVETH_MAX_POOL_COUNT 4096
100#define IBMVETH_BUFF_LIST_SIZE 4096 102#define IBMVETH_BUFF_LIST_SIZE 4096
101#define IBMVETH_FILT_LIST_SIZE 4096 103#define IBMVETH_FILT_LIST_SIZE 4096
102#define IBMVETH_MAX_BUF_SIZE (1024 * 128) 104#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
103 105
104static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; 106static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
105static int pool_count[] = { 256, 768, 256, 256, 256 }; 107static int pool_count[] = { 256, 512, 256, 256, 256 };
106static int pool_active[] = { 1, 1, 0, 0, 0}; 108static int pool_active[] = { 1, 1, 0, 0, 0};
107 109
108#define IBM_VETH_INVALID_MAP ((u16)0xffff) 110#define IBM_VETH_INVALID_MAP ((u16)0xffff)
@@ -142,13 +144,15 @@ struct ibmveth_adapter {
142 void * filter_list_addr; 144 void * filter_list_addr;
143 dma_addr_t buffer_list_dma; 145 dma_addr_t buffer_list_dma;
144 dma_addr_t filter_list_dma; 146 dma_addr_t filter_list_dma;
145 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; 147 struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
146 struct ibmveth_rx_q rx_queue; 148 struct ibmveth_rx_q rx_queue;
147 int pool_config; 149 int pool_config;
148 int rx_csum; 150 int rx_csum;
149 void *bounce_buffer; 151 void *bounce_buffer;
150 dma_addr_t bounce_buffer_dma; 152 dma_addr_t bounce_buffer_dma;
151 153
154 u64 fw_ipv6_csum_support;
155 u64 fw_ipv4_csum_support;
152 /* adapter specific stats */ 156 /* adapter specific stats */
153 u64 replenish_task_cycles; 157 u64 replenish_task_cycles;
154 u64 replenish_no_mem; 158 u64 replenish_no_mem;
@@ -158,7 +162,6 @@ struct ibmveth_adapter {
158 u64 rx_no_buffer; 162 u64 rx_no_buffer;
159 u64 tx_map_failed; 163 u64 tx_map_failed;
160 u64 tx_send_failed; 164 u64 tx_send_failed;
161 spinlock_t stats_lock;
162}; 165};
163 166
164struct ibmveth_buf_desc_fields { 167struct ibmveth_buf_desc_fields {
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 187622f1c816..bc183f5487cb 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -132,6 +132,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
132 case E1000_DEV_ID_82580_SERDES: 132 case E1000_DEV_ID_82580_SERDES:
133 case E1000_DEV_ID_82580_SGMII: 133 case E1000_DEV_ID_82580_SGMII:
134 case E1000_DEV_ID_82580_COPPER_DUAL: 134 case E1000_DEV_ID_82580_COPPER_DUAL:
135 case E1000_DEV_ID_DH89XXCC_SGMII:
136 case E1000_DEV_ID_DH89XXCC_SERDES:
135 mac->type = e1000_82580; 137 mac->type = e1000_82580;
136 break; 138 break;
137 case E1000_DEV_ID_I350_COPPER: 139 case E1000_DEV_ID_I350_COPPER:
@@ -282,10 +284,18 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
282 284
283 /* Verify phy id and set remaining function pointers */ 285 /* Verify phy id and set remaining function pointers */
284 switch (phy->id) { 286 switch (phy->id) {
287 case I347AT4_E_PHY_ID:
288 case M88E1112_E_PHY_ID:
285 case M88E1111_I_PHY_ID: 289 case M88E1111_I_PHY_ID:
286 phy->type = e1000_phy_m88; 290 phy->type = e1000_phy_m88;
287 phy->ops.get_phy_info = igb_get_phy_info_m88; 291 phy->ops.get_phy_info = igb_get_phy_info_m88;
288 phy->ops.get_cable_length = igb_get_cable_length_m88; 292
293 if (phy->id == I347AT4_E_PHY_ID ||
294 phy->id == M88E1112_E_PHY_ID)
295 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
296 else
297 phy->ops.get_cable_length = igb_get_cable_length_m88;
298
289 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 299 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
290 break; 300 break;
291 case IGP03E1000_E_PHY_ID: 301 case IGP03E1000_E_PHY_ID:
@@ -1058,7 +1068,11 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1058 } 1068 }
1059 switch (hw->phy.type) { 1069 switch (hw->phy.type) {
1060 case e1000_phy_m88: 1070 case e1000_phy_m88:
1061 ret_val = igb_copper_link_setup_m88(hw); 1071 if (hw->phy.id == I347AT4_E_PHY_ID ||
1072 hw->phy.id == M88E1112_E_PHY_ID)
1073 ret_val = igb_copper_link_setup_m88_gen2(hw);
1074 else
1075 ret_val = igb_copper_link_setup_m88(hw);
1062 break; 1076 break;
1063 case e1000_phy_igp_3: 1077 case e1000_phy_igp_3:
1064 ret_val = igb_copper_link_setup_igp(hw); 1078 ret_val = igb_copper_link_setup_igp(hw);
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index bbd2ec308eb0..62222796a8b3 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -634,6 +634,8 @@
634 * E = External 634 * E = External
635 */ 635 */
636#define M88E1111_I_PHY_ID 0x01410CC0 636#define M88E1111_I_PHY_ID 0x01410CC0
637#define M88E1112_E_PHY_ID 0x01410C90
638#define I347AT4_E_PHY_ID 0x01410DC0
637#define IGP03E1000_E_PHY_ID 0x02A80390 639#define IGP03E1000_E_PHY_ID 0x02A80390
638#define I82580_I_PHY_ID 0x015403A0 640#define I82580_I_PHY_ID 0x015403A0
639#define I350_I_PHY_ID 0x015403B0 641#define I350_I_PHY_ID 0x015403B0
@@ -702,6 +704,35 @@
702#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 704#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
703#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ 705#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
704 706
707/* Intel i347-AT4 Registers */
708
709#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
710#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
711#define I347AT4_PAGE_SELECT 0x16
712
713/* i347-AT4 Extended PHY Specific Control Register */
714
715/*
716 * Number of times we will attempt to autonegotiate before downshifting if we
717 * are the master
718 */
719#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
720#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
721#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
722#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
723#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
724#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
725#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
726#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
727#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
728#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
729
730/* i347-AT4 PHY Cable Diagnostics Control */
731#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
732
733/* Marvell 1112 only registers */
734#define M88E1112_VCT_DSP_DISTANCE 0x001A
735
705/* M88EC018 Rev 2 specific DownShift settings */ 736/* M88EC018 Rev 2 specific DownShift settings */
706#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 737#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
707#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 738#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index cb8db78b1a05..c0b017f8d782 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,8 @@ struct e1000_hw;
54#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
55#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_DH89XXCC_SGMII 0x0436
58#define E1000_DEV_ID_DH89XXCC_SERDES 0x0438
57#define E1000_DEV_ID_I350_COPPER 0x1521 59#define E1000_DEV_ID_I350_COPPER 0x1521
58#define E1000_DEV_ID_I350_FIBER 0x1522 60#define E1000_DEV_ID_I350_FIBER 0x1522
59#define E1000_DEV_ID_I350_SERDES 0x1523 61#define E1000_DEV_ID_I350_SERDES 0x1523
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index cf1f32300923..ddd036a78999 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -570,6 +570,89 @@ out:
570} 570}
571 571
572/** 572/**
573 * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
574 * @hw: pointer to the HW structure
575 *
576 * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
577 * Also enables and sets the downshift parameters.
578 **/
579s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
580{
581 struct e1000_phy_info *phy = &hw->phy;
582 s32 ret_val;
583 u16 phy_data;
584
585 if (phy->reset_disable) {
586 ret_val = 0;
587 goto out;
588 }
589
590 /* Enable CRS on Tx. This must be set for half-duplex operation. */
591 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
592 if (ret_val)
593 goto out;
594
595 /*
596 * Options:
597 * MDI/MDI-X = 0 (default)
598 * 0 - Auto for all speeds
599 * 1 - MDI mode
600 * 2 - MDI-X mode
601 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
602 */
603 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
604
605 switch (phy->mdix) {
606 case 1:
607 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
608 break;
609 case 2:
610 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
611 break;
612 case 3:
613 /* M88E1112 does not support this mode) */
614 if (phy->id != M88E1112_E_PHY_ID) {
615 phy_data |= M88E1000_PSCR_AUTO_X_1000T;
616 break;
617 }
618 case 0:
619 default:
620 phy_data |= M88E1000_PSCR_AUTO_X_MODE;
621 break;
622 }
623
624 /*
625 * Options:
626 * disable_polarity_correction = 0 (default)
627 * Automatic Correction for Reversed Cable Polarity
628 * 0 - Disabled
629 * 1 - Enabled
630 */
631 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
632 if (phy->disable_polarity_correction == 1)
633 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
634
635 /* Enable downshift and setting it to X6 */
636 phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
637 phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
638 phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
639
640 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
641 if (ret_val)
642 goto out;
643
644 /* Commit the changes. */
645 ret_val = igb_phy_sw_reset(hw);
646 if (ret_val) {
647 hw_dbg("Error committing the PHY changes\n");
648 goto out;
649 }
650
651out:
652 return ret_val;
653}
654
655/**
573 * igb_copper_link_setup_igp - Setup igp PHY's for copper link 656 * igb_copper_link_setup_igp - Setup igp PHY's for copper link
574 * @hw: pointer to the HW structure 657 * @hw: pointer to the HW structure
575 * 658 *
@@ -1124,18 +1207,25 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1124 goto out; 1207 goto out;
1125 1208
1126 if (!link) { 1209 if (!link) {
1127 /* 1210 if (hw->phy.type != e1000_phy_m88 ||
1128 * We didn't get link. 1211 hw->phy.id == I347AT4_E_PHY_ID ||
1129 * Reset the DSP and cross our fingers. 1212 hw->phy.id == M88E1112_E_PHY_ID) {
1130 */ 1213 hw_dbg("Link taking longer than expected.\n");
1131 ret_val = phy->ops.write_reg(hw, 1214 } else {
1132 M88E1000_PHY_PAGE_SELECT, 1215
1133 0x001d); 1216 /*
1134 if (ret_val) 1217 * We didn't get link.
1135 goto out; 1218 * Reset the DSP and cross our fingers.
1136 ret_val = igb_phy_reset_dsp(hw); 1219 */
1137 if (ret_val) 1220 ret_val = phy->ops.write_reg(hw,
1138 goto out; 1221 M88E1000_PHY_PAGE_SELECT,
1222 0x001d);
1223 if (ret_val)
1224 goto out;
1225 ret_val = igb_phy_reset_dsp(hw);
1226 if (ret_val)
1227 goto out;
1228 }
1139 } 1229 }
1140 1230
1141 /* Try once more */ 1231 /* Try once more */
@@ -1145,6 +1235,11 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1145 goto out; 1235 goto out;
1146 } 1236 }
1147 1237
1238 if (hw->phy.type != e1000_phy_m88 ||
1239 hw->phy.id == I347AT4_E_PHY_ID ||
1240 hw->phy.id == M88E1112_E_PHY_ID)
1241 goto out;
1242
1148 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 1243 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
1149 if (ret_val) 1244 if (ret_val)
1150 goto out; 1245 goto out;
@@ -1557,6 +1652,93 @@ out:
1557 return ret_val; 1652 return ret_val;
1558} 1653}
1559 1654
1655s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1656{
1657 struct e1000_phy_info *phy = &hw->phy;
1658 s32 ret_val;
1659 u16 phy_data, phy_data2, index, default_page, is_cm;
1660
1661 switch (hw->phy.id) {
1662 case I347AT4_E_PHY_ID:
1663 /* Remember the original page select and set it to 7 */
1664 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
1665 &default_page);
1666 if (ret_val)
1667 goto out;
1668
1669 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
1670 if (ret_val)
1671 goto out;
1672
1673 /* Get cable length from PHY Cable Diagnostics Control Reg */
1674 ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
1675 &phy_data);
1676 if (ret_val)
1677 goto out;
1678
1679 /* Check if the unit of cable length is meters or cm */
1680 ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
1681 if (ret_val)
1682 goto out;
1683
1684 is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT);
1685
1686 /* Populate the phy structure with cable length in meters */
1687 phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
1688 phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
1689 phy->cable_length = phy_data / (is_cm ? 100 : 1);
1690
1691 /* Reset the page selec to its original value */
1692 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
1693 default_page);
1694 if (ret_val)
1695 goto out;
1696 break;
1697 case M88E1112_E_PHY_ID:
1698 /* Remember the original page select and set it to 5 */
1699 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
1700 &default_page);
1701 if (ret_val)
1702 goto out;
1703
1704 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
1705 if (ret_val)
1706 goto out;
1707
1708 ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
1709 &phy_data);
1710 if (ret_val)
1711 goto out;
1712
1713 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1714 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1715 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
1716 ret_val = -E1000_ERR_PHY;
1717 goto out;
1718 }
1719
1720 phy->min_cable_length = e1000_m88_cable_length_table[index];
1721 phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
1722
1723 phy->cable_length = (phy->min_cable_length +
1724 phy->max_cable_length) / 2;
1725
1726 /* Reset the page select to its original value */
1727 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
1728 default_page);
1729 if (ret_val)
1730 goto out;
1731
1732 break;
1733 default:
1734 ret_val = -E1000_ERR_PHY;
1735 goto out;
1736 }
1737
1738out:
1739 return ret_val;
1740}
1741
1560/** 1742/**
1561 * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY 1743 * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY
1562 * @hw: pointer to the HW structure 1744 * @hw: pointer to the HW structure
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 565a6dbb3714..2cc117705a31 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -45,9 +45,11 @@ s32 igb_check_downshift(struct e1000_hw *hw);
45s32 igb_check_reset_block(struct e1000_hw *hw); 45s32 igb_check_reset_block(struct e1000_hw *hw);
46s32 igb_copper_link_setup_igp(struct e1000_hw *hw); 46s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
47s32 igb_copper_link_setup_m88(struct e1000_hw *hw); 47s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
48s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw);
48s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); 49s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
49s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); 50s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
50s32 igb_get_cable_length_m88(struct e1000_hw *hw); 51s32 igb_get_cable_length_m88(struct e1000_hw *hw);
52s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw);
51s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); 53s32 igb_get_cable_length_igp_2(struct e1000_hw *hw);
52s32 igb_get_phy_id(struct e1000_hw *hw); 54s32 igb_get_phy_id(struct e1000_hw *hw);
53s32 igb_get_phy_info_igp(struct e1000_hw *hw); 55s32 igb_get_phy_info_igp(struct e1000_hw *hw);
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 6e63d9a7fc75..44e0ff1494e0 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -143,7 +143,7 @@ struct igb_buffer {
143 u16 next_to_watch; 143 u16 next_to_watch;
144 unsigned int bytecount; 144 unsigned int bytecount;
145 u16 gso_segs; 145 u16 gso_segs;
146 union skb_shared_tx shtx; 146 u8 tx_flags;
147 u8 mapped_as_page; 147 u8 mapped_as_page;
148 }; 148 };
149 /* RX */ 149 /* RX */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 9b4e5895f5f9..0394ca95f3ba 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -71,6 +71,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, 72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, 73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, 77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
@@ -1856,8 +1858,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1856 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 1858 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1857 netdev->vlan_features |= NETIF_F_SG; 1859 netdev->vlan_features |= NETIF_F_SG;
1858 1860
1859 if (pci_using_dac) 1861 if (pci_using_dac) {
1860 netdev->features |= NETIF_F_HIGHDMA; 1862 netdev->features |= NETIF_F_HIGHDMA;
1863 netdev->vlan_features |= NETIF_F_HIGHDMA;
1864 }
1861 1865
1862 if (hw->mac.type >= e1000_82576) 1866 if (hw->mac.type >= e1000_82576)
1863 netdev->features |= NETIF_F_SCTP_CSUM; 1867 netdev->features |= NETIF_F_SCTP_CSUM;
@@ -1888,9 +1892,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1888 goto err_eeprom; 1892 goto err_eeprom;
1889 } 1893 }
1890 1894
1891 setup_timer(&adapter->watchdog_timer, &igb_watchdog, 1895 setup_timer(&adapter->watchdog_timer, igb_watchdog,
1892 (unsigned long) adapter); 1896 (unsigned long) adapter);
1893 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, 1897 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
1894 (unsigned long) adapter); 1898 (unsigned long) adapter);
1895 1899
1896 INIT_WORK(&adapter->reset_task, igb_reset_task); 1900 INIT_WORK(&adapter->reset_task, igb_reset_task);
@@ -3954,7 +3958,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3954 } 3958 }
3955 3959
3956 tx_ring->buffer_info[i].skb = skb; 3960 tx_ring->buffer_info[i].skb = skb;
3957 tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags; 3961 tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
3958 /* multiply data chunks by size of headers */ 3962 /* multiply data chunks by size of headers */
3959 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len; 3963 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
3960 tx_ring->buffer_info[i].gso_segs = gso_segs; 3964 tx_ring->buffer_info[i].gso_segs = gso_segs;
@@ -4088,7 +4092,6 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4088 u32 tx_flags = 0; 4092 u32 tx_flags = 0;
4089 u16 first; 4093 u16 first;
4090 u8 hdr_len = 0; 4094 u8 hdr_len = 0;
4091 union skb_shared_tx *shtx = skb_tx(skb);
4092 4095
4093 /* need: 1 descriptor per page, 4096 /* need: 1 descriptor per page,
4094 * + 2 desc gap to keep tail from touching head, 4097 * + 2 desc gap to keep tail from touching head,
@@ -4100,8 +4103,8 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4100 return NETDEV_TX_BUSY; 4103 return NETDEV_TX_BUSY;
4101 } 4104 }
4102 4105
4103 if (unlikely(shtx->hardware)) { 4106 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4104 shtx->in_progress = 1; 4107 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4105 tx_flags |= IGB_TX_FLAGS_TSTAMP; 4108 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4106 } 4109 }
4107 4110
@@ -4660,12 +4663,13 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4660 u32 vmolr = rd32(E1000_VMOLR(vf)); 4663 u32 vmolr = rd32(E1000_VMOLR(vf));
4661 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 4664 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4662 4665
4663 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC | 4666 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
4664 IGB_VF_FLAG_MULTI_PROMISC); 4667 IGB_VF_FLAG_MULTI_PROMISC);
4665 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 4668 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4666 4669
4667 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { 4670 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4668 vmolr |= E1000_VMOLR_MPME; 4671 vmolr |= E1000_VMOLR_MPME;
4672 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
4669 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; 4673 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4670 } else { 4674 } else {
4671 /* 4675 /*
@@ -5319,7 +5323,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *bu
5319 u64 regval; 5323 u64 regval;
5320 5324
5321 /* if skb does not support hw timestamp or TX stamp not valid exit */ 5325 /* if skb does not support hw timestamp or TX stamp not valid exit */
5322 if (likely(!buffer_info->shtx.hardware) || 5326 if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
5323 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) 5327 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5324 return; 5328 return;
5325 5329
@@ -5431,7 +5435,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5431 tx_ring->total_packets += total_packets; 5435 tx_ring->total_packets += total_packets;
5432 tx_ring->tx_stats.bytes += total_bytes; 5436 tx_ring->tx_stats.bytes += total_bytes;
5433 tx_ring->tx_stats.packets += total_packets; 5437 tx_ring->tx_stats.packets += total_packets;
5434 return (count < tx_ring->count); 5438 return count < tx_ring->count;
5435} 5439}
5436 5440
5437/** 5441/**
@@ -5456,7 +5460,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
5456static inline void igb_rx_checksum_adv(struct igb_ring *ring, 5460static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5457 u32 status_err, struct sk_buff *skb) 5461 u32 status_err, struct sk_buff *skb)
5458{ 5462{
5459 skb->ip_summed = CHECKSUM_NONE; 5463 skb_checksum_none_assert(skb);
5460 5464
5461 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 5465 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
5462 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || 5466 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
@@ -5500,7 +5504,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5500 * values must belong to this one here and therefore we don't need to 5504 * values must belong to this one here and therefore we don't need to
5501 * compare any of the additional attributes stored for it. 5505 * compare any of the additional attributes stored for it.
5502 * 5506 *
5503 * If nothing went wrong, then it should have a skb_shared_tx that we 5507 * If nothing went wrong, then it should have a shared tx_flags that we
5504 * can turn into a skb_shared_hwtstamps. 5508 * can turn into a skb_shared_hwtstamps.
5505 */ 5509 */
5506 if (staterr & E1000_RXDADV_STAT_TSIP) { 5510 if (staterr & E1000_RXDADV_STAT_TSIP) {
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 103b3aa1afc2..33add708bcbe 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -153,7 +153,7 @@ static int igbvf_set_rx_csum(struct net_device *netdev, u32 data)
153 153
154static u32 igbvf_get_tx_csum(struct net_device *netdev) 154static u32 igbvf_get_tx_csum(struct net_device *netdev)
155{ 155{
156 return ((netdev->features & NETIF_F_IP_CSUM) != 0); 156 return (netdev->features & NETIF_F_IP_CSUM) != 0;
157} 157}
158 158
159static int igbvf_set_tx_csum(struct net_device *netdev, u32 data) 159static int igbvf_set_tx_csum(struct net_device *netdev, u32 data)
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index c539f7c9c3e0..265501348f33 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -103,7 +103,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
103static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, 103static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
104 u32 status_err, struct sk_buff *skb) 104 u32 status_err, struct sk_buff *skb)
105{ 105{
106 skb->ip_summed = CHECKSUM_NONE; 106 skb_checksum_none_assert(skb);
107 107
108 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 108 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
109 if ((status_err & E1000_RXD_STAT_IXSM) || 109 if ((status_err & E1000_RXD_STAT_IXSM) ||
@@ -845,7 +845,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
845 } 845 }
846 adapter->net_stats.tx_bytes += total_bytes; 846 adapter->net_stats.tx_bytes += total_bytes;
847 adapter->net_stats.tx_packets += total_packets; 847 adapter->net_stats.tx_packets += total_packets;
848 return (count < tx_ring->count); 848 return count < tx_ring->count;
849} 849}
850 850
851static irqreturn_t igbvf_msix_other(int irq, void *data) 851static irqreturn_t igbvf_msix_other(int irq, void *data)
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 0b3f6df5cff7..c8ee8d28767b 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -827,7 +827,7 @@ static void ioc3_mii_start(struct ioc3_private *ip)
827{ 827{
828 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ 828 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
829 ip->ioc3_timer.data = (unsigned long) ip; 829 ip->ioc3_timer.data = (unsigned long) ip;
830 ip->ioc3_timer.function = &ioc3_timer; 830 ip->ioc3_timer.function = ioc3_timer;
831 add_timer(&ip->ioc3_timer); 831 add_timer(&ip->ioc3_timer);
832} 832}
833 833
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 72e3d2da9e9f..dc0198092343 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -1213,7 +1213,7 @@ static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1213 1213
1214 skb_put(skb, framelen); 1214 skb_put(skb, framelen);
1215 skb->protocol = eth_type_trans(skb, dev); 1215 skb->protocol = eth_type_trans(skb, dev);
1216 skb->ip_summed = CHECKSUM_NONE; 1216 skb_checksum_none_assert(skb);
1217 netif_rx(skb); 1217 netif_rx(skb);
1218 sp->rx_buff[entry] = NULL; 1218 sp->rx_buff[entry] = NULL;
1219} 1219}
@@ -1278,7 +1278,7 @@ static void ipg_nic_rx_with_end(struct net_device *dev,
1278 jumbo->skb->protocol = 1278 jumbo->skb->protocol =
1279 eth_type_trans(jumbo->skb, dev); 1279 eth_type_trans(jumbo->skb, dev);
1280 1280
1281 jumbo->skb->ip_summed = CHECKSUM_NONE; 1281 skb_checksum_none_assert(jumbo->skb);
1282 netif_rx(jumbo->skb); 1282 netif_rx(jumbo->skb);
1283 } 1283 }
1284 } 1284 }
@@ -1476,7 +1476,7 @@ static int ipg_nic_rx(struct net_device *dev)
1476 * IP/TCP/UDP frame was received. Let the 1476 * IP/TCP/UDP frame was received. Let the
1477 * upper layer decide. 1477 * upper layer decide.
1478 */ 1478 */
1479 skb->ip_summed = CHECKSUM_NONE; 1479 skb_checksum_none_assert(skb);
1480 1480
1481 /* Hand off frame for higher layer processing. 1481 /* Hand off frame for higher layer processing.
1482 * The function netif_rx() releases the sk_buff 1482 * The function netif_rx() releases the sk_buff
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 48bd5ec9f29b..b626cccbccd1 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -217,7 +217,7 @@ toshoboe_checkfcs (unsigned char *buf, int len)
217 for (i = 0; i < len; ++i) 217 for (i = 0; i < len; ++i)
218 fcs.value = irda_fcs (fcs.value, *(buf++)); 218 fcs.value = irda_fcs (fcs.value, *(buf++));
219 219
220 return (fcs.value == GOOD_FCS); 220 return fcs.value == GOOD_FCS;
221} 221}
222 222
223/***********************************************************************/ 223/***********************************************************************/
@@ -759,7 +759,7 @@ toshoboe_maketestpacket (unsigned char *buf, int badcrc, int fir)
759 if (fir) 759 if (fir)
760 { 760 {
761 memset (buf, 0, TT_LEN); 761 memset (buf, 0, TT_LEN);
762 return (TT_LEN); 762 return TT_LEN;
763 } 763 }
764 764
765 fcs.value = INIT_FCS; 765 fcs.value = INIT_FCS;
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 4441fa3389c2..cce82f101f50 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1514,7 +1514,7 @@ static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_
1514 IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n", 1514 IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
1515 __func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep); 1515 __func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
1516 1516
1517 return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0)); 1517 return (self->bulk_in_ep != 0) && (self->bulk_out_ep != 0);
1518} 1518}
1519 1519
1520#ifdef IU_DUMP_CLASS_DESC 1520#ifdef IU_DUMP_CLASS_DESC
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 5b1036ac38d7..74b20f179cea 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -734,7 +734,7 @@ static int mcs_net_open(struct net_device *netdev)
734 } 734 }
735 735
736 if (!mcs_setup_urbs(mcs)) 736 if (!mcs_setup_urbs(mcs))
737 goto error3; 737 goto error3;
738 738
739 ret = mcs_receive_start(mcs); 739 ret = mcs_receive_start(mcs);
740 if (ret) 740 if (ret)
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index e30cdbb14745..559fe854d76d 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1348,7 +1348,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
1348 outb(bank, iobase+BSR); 1348 outb(bank, iobase+BSR);
1349 1349
1350 /* Make sure interrupt handlers keep the proper interrupt mask */ 1350 /* Make sure interrupt handlers keep the proper interrupt mask */
1351 return(ier); 1351 return ier;
1352} 1352}
1353 1353
1354/* 1354/*
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 1b051dab7b29..39d6e6f15d4f 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -336,7 +336,7 @@ static int sirdev_is_receiving(struct sir_dev *dev)
336 if (!atomic_read(&dev->enable_rx)) 336 if (!atomic_read(&dev->enable_rx))
337 return 0; 337 return 0;
338 338
339 return (dev->rx_buff.state != OUTSIDE_FRAME); 339 return dev->rx_buff.state != OUTSIDE_FRAME;
340} 340}
341 341
342int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type) 342int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 850ca1c5ee19..8c57bfb5f098 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -2051,7 +2051,7 @@ static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
2051 */ 2051 */
2052static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self) 2052static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self)
2053{ 2053{
2054 return (self->rx_buff.state != OUTSIDE_FRAME); 2054 return self->rx_buff.state != OUTSIDE_FRAME;
2055} 2055}
2056 2056
2057 2057
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index e5698fa30a4f..41c96b3d8152 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -219,7 +219,7 @@ static inline int read_reg(struct stir_cb *stir, __u16 reg,
219 219
220static inline int isfir(u32 speed) 220static inline int isfir(u32 speed)
221{ 221{
222 return (speed == 4000000); 222 return speed == 4000000;
223} 223}
224 224
225/* 225/*
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index b0a6cd815be1..67c0ad42d818 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -1182,12 +1182,13 @@ F01_E */
1182 1182
1183 skb = dev_alloc_skb(len + 1 - 4); 1183 skb = dev_alloc_skb(len + 1 - 4);
1184 /* 1184 /*
1185 * if frame size,data ptr,or skb ptr are wrong ,the get next 1185 * if frame size, data ptr, or skb ptr are wrong, then get next
1186 * entry. 1186 * entry.
1187 */ 1187 */
1188 if ((skb == NULL) || (skb->data == NULL) || 1188 if ((skb == NULL) || (skb->data == NULL) ||
1189 (self->rx_buff.data == NULL) || (len < 6)) { 1189 (self->rx_buff.data == NULL) || (len < 6)) {
1190 self->netdev->stats.rx_dropped++; 1190 self->netdev->stats.rx_dropped++;
1191 kfree_skb(skb);
1191 return TRUE; 1192 return TRUE;
1192 } 1193 }
1193 skb_reserve(skb, 1); 1194 skb_reserve(skb, 1);
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index 5a84822b5a43..c6f58482b769 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -238,7 +238,7 @@ static void WriteLPCReg(int iRegNum, unsigned char iVal)
238 238
239static __u8 ReadReg(unsigned int BaseAddr, int iRegNum) 239static __u8 ReadReg(unsigned int BaseAddr, int iRegNum)
240{ 240{
241 return ((__u8) inb(BaseAddr + iRegNum)); 241 return (__u8) inb(BaseAddr + iRegNum);
242} 242}
243 243
244static void WriteReg(unsigned int BaseAddr, int iRegNum, unsigned char iVal) 244static void WriteReg(unsigned int BaseAddr, int iRegNum, unsigned char iVal)
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 3f24a1f33022..d66fab854bf1 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -595,7 +595,7 @@ struct ring_descr {
595 595
596static inline int rd_is_active(struct ring_descr *rd) 596static inline int rd_is_active(struct ring_descr *rd)
597{ 597{
598 return ((rd->hw->rd_status & RD_ACTIVE) != 0); 598 return (rd->hw->rd_status & RD_ACTIVE) != 0;
599} 599}
600 600
601static inline void rd_activate(struct ring_descr *rd) 601static inline void rd_activate(struct ring_descr *rd)
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index ba1de5973fb2..8df645e78f2e 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1524,7 +1524,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
1524 1524
1525 skb_put(skb, length); 1525 skb_put(skb, length);
1526 skb->protocol = eth_type_trans(skb, dev); 1526 skb->protocol = eth_type_trans(skb, dev);
1527 skb->ip_summed = CHECKSUM_NONE; 1527 skb_checksum_none_assert(skb);
1528 netif_rx(skb); /* send it up */ 1528 netif_rx(skb); /* send it up */
1529 dev->stats.rx_packets++; 1529 dev->stats.rx_packets++;
1530 dev->stats.rx_bytes += length; 1530 dev->stats.rx_bytes += length;
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 813993f9c65c..c982ab9f9005 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -296,12 +296,12 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
296 eecd_reg = IXGB_READ_REG(hw, EECD); 296 eecd_reg = IXGB_READ_REG(hw, EECD);
297 297
298 if (eecd_reg & IXGB_EECD_DO) 298 if (eecd_reg & IXGB_EECD_DO)
299 return (true); 299 return true;
300 300
301 udelay(50); 301 udelay(50);
302 } 302 }
303 ASSERT(0); 303 ASSERT(0);
304 return (false); 304 return false;
305} 305}
306 306
307/****************************************************************************** 307/******************************************************************************
@@ -327,9 +327,9 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
327 checksum += ixgb_read_eeprom(hw, i); 327 checksum += ixgb_read_eeprom(hw, i);
328 328
329 if (checksum == (u16) EEPROM_SUM) 329 if (checksum == (u16) EEPROM_SUM)
330 return (true); 330 return true;
331 else 331 else
332 return (false); 332 return false;
333} 333}
334 334
335/****************************************************************************** 335/******************************************************************************
@@ -439,7 +439,7 @@ ixgb_read_eeprom(struct ixgb_hw *hw,
439 /* End this read operation */ 439 /* End this read operation */
440 ixgb_standby_eeprom(hw); 440 ixgb_standby_eeprom(hw);
441 441
442 return (data); 442 return data;
443} 443}
444 444
445/****************************************************************************** 445/******************************************************************************
@@ -476,16 +476,16 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
476 /* clear the init_ctrl_reg_1 to signify that the cache is 476 /* clear the init_ctrl_reg_1 to signify that the cache is
477 * invalidated */ 477 * invalidated */
478 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); 478 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
479 return (false); 479 return false;
480 } 480 }
481 481
482 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) 482 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
483 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { 483 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
484 pr_debug("Signature invalid\n"); 484 pr_debug("Signature invalid\n");
485 return(false); 485 return false;
486 } 486 }
487 487
488 return(true); 488 return true;
489} 489}
490 490
491/****************************************************************************** 491/******************************************************************************
@@ -505,7 +505,7 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
505 505
506 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) 506 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
507 == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { 507 == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
508 return (true); 508 return true;
509 } else { 509 } else {
510 return ixgb_get_eeprom_data(hw); 510 return ixgb_get_eeprom_data(hw);
511 } 511 }
@@ -526,10 +526,10 @@ ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
526 526
527 if ((index < IXGB_EEPROM_SIZE) && 527 if ((index < IXGB_EEPROM_SIZE) &&
528 (ixgb_check_and_get_eeprom_data(hw) == true)) { 528 (ixgb_check_and_get_eeprom_data(hw) == true)) {
529 return(hw->eeprom[index]); 529 return hw->eeprom[index];
530 } 530 }
531 531
532 return(0); 532 return 0;
533} 533}
534 534
535/****************************************************************************** 535/******************************************************************************
@@ -570,10 +570,10 @@ u32
570ixgb_get_ee_pba_number(struct ixgb_hw *hw) 570ixgb_get_ee_pba_number(struct ixgb_hw *hw)
571{ 571{
572 if (ixgb_check_and_get_eeprom_data(hw) == true) 572 if (ixgb_check_and_get_eeprom_data(hw) == true)
573 return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG]) 573 return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
574 | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16)); 574 | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16);
575 575
576 return(0); 576 return 0;
577} 577}
578 578
579 579
@@ -591,8 +591,8 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
591 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 591 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
592 592
593 if (ixgb_check_and_get_eeprom_data(hw) == true) 593 if (ixgb_check_and_get_eeprom_data(hw) == true)
594 return (le16_to_cpu(ee_map->device_id)); 594 return le16_to_cpu(ee_map->device_id);
595 595
596 return (0); 596 return 0;
597} 597}
598 598
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index a4ed96caae69..43994c199991 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -410,7 +410,7 @@ static int
410ixgb_get_eeprom_len(struct net_device *netdev) 410ixgb_get_eeprom_len(struct net_device *netdev)
411{ 411{
412 /* return size in bytes */ 412 /* return size in bytes */
413 return (IXGB_EEPROM_SIZE << 1); 413 return IXGB_EEPROM_SIZE << 1;
414} 414}
415 415
416static int 416static int
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 397acabccab6..6cb2e42ff4c1 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -167,7 +167,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
167 /* Clear any pending interrupt events. */ 167 /* Clear any pending interrupt events. */
168 icr_reg = IXGB_READ_REG(hw, ICR); 168 icr_reg = IXGB_READ_REG(hw, ICR);
169 169
170 return (ctrl_reg & IXGB_CTRL0_RST); 170 return ctrl_reg & IXGB_CTRL0_RST;
171} 171}
172 172
173 173
@@ -209,7 +209,7 @@ ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
209 xpak_vendor = ixgb_xpak_vendor_infineon; 209 xpak_vendor = ixgb_xpak_vendor_infineon;
210 } 210 }
211 211
212 return (xpak_vendor); 212 return xpak_vendor;
213} 213}
214 214
215/****************************************************************************** 215/******************************************************************************
@@ -273,7 +273,7 @@ ixgb_identify_phy(struct ixgb_hw *hw)
273 if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) 273 if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID)
274 phy_type = ixgb_phy_type_bcm; 274 phy_type = ixgb_phy_type_bcm;
275 275
276 return (phy_type); 276 return phy_type;
277} 277}
278 278
279/****************************************************************************** 279/******************************************************************************
@@ -366,7 +366,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
366 /* 82597EX errata: Call check-for-link in case lane deskew is locked */ 366 /* 82597EX errata: Call check-for-link in case lane deskew is locked */
367 ixgb_check_for_link(hw); 367 ixgb_check_for_link(hw);
368 368
369 return (status); 369 return status;
370} 370}
371 371
372/****************************************************************************** 372/******************************************************************************
@@ -531,7 +531,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
531 } 531 }
532 532
533 hash_value &= 0xFFF; 533 hash_value &= 0xFFF;
534 return (hash_value); 534 return hash_value;
535} 535}
536 536
537/****************************************************************************** 537/******************************************************************************
@@ -715,7 +715,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
715 } 715 }
716 IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water); 716 IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
717 } 717 }
718 return (status); 718 return status;
719} 719}
720 720
721/****************************************************************************** 721/******************************************************************************
@@ -1140,7 +1140,7 @@ mac_addr_valid(u8 *mac_addr)
1140 pr_debug("MAC address is all zeros\n"); 1140 pr_debug("MAC address is all zeros\n");
1141 is_valid = false; 1141 is_valid = false;
1142 } 1142 }
1143 return (is_valid); 1143 return is_valid;
1144} 1144}
1145 1145
1146/****************************************************************************** 1146/******************************************************************************
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 45fc89b9ba64..80e62578ffa0 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -446,8 +446,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
446 NETIF_F_HW_VLAN_FILTER; 446 NETIF_F_HW_VLAN_FILTER;
447 netdev->features |= NETIF_F_TSO; 447 netdev->features |= NETIF_F_TSO;
448 448
449 if (pci_using_dac) 449 if (pci_using_dac) {
450 netdev->features |= NETIF_F_HIGHDMA; 450 netdev->features |= NETIF_F_HIGHDMA;
451 netdev->vlan_features |= NETIF_F_HIGHDMA;
452 }
451 453
452 /* make sure the EEPROM is good */ 454 /* make sure the EEPROM is good */
453 455
@@ -470,7 +472,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
470 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw); 472 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
471 473
472 init_timer(&adapter->watchdog_timer); 474 init_timer(&adapter->watchdog_timer);
473 adapter->watchdog_timer.function = &ixgb_watchdog; 475 adapter->watchdog_timer.function = ixgb_watchdog;
474 adapter->watchdog_timer.data = (unsigned long)adapter; 476 adapter->watchdog_timer.data = (unsigned long)adapter;
475 477
476 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); 478 INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
@@ -1905,7 +1907,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1905 */ 1907 */
1906 if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) || 1908 if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1907 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) { 1909 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1908 skb->ip_summed = CHECKSUM_NONE; 1910 skb_checksum_none_assert(skb);
1909 return; 1911 return;
1910 } 1912 }
1911 1913
@@ -1913,7 +1915,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1913 /* now look at the TCP checksum error bit */ 1915 /* now look at the TCP checksum error bit */
1914 if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) { 1916 if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1915 /* let the stack verify checksum errors */ 1917 /* let the stack verify checksum errors */
1916 skb->ip_summed = CHECKSUM_NONE; 1918 skb_checksum_none_assert(skb);
1917 adapter->hw_csum_rx_error++; 1919 adapter->hw_csum_rx_error++;
1918 } else { 1920 } else {
1919 /* TCP checksum is good */ 1921 /* TCP checksum is good */
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 9e15eb93860e..5cebc3755b64 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -69,15 +69,20 @@
69#define IXGBE_MAX_FCPAUSE 0xFFFF 69#define IXGBE_MAX_FCPAUSE 0xFFFF
70 70
71/* Supported Rx Buffer Sizes */ 71/* Supported Rx Buffer Sizes */
72#define IXGBE_RXBUFFER_64 64 /* Used for packet split */ 72#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
73#define IXGBE_RXBUFFER_128 128 /* Used for packet split */
74#define IXGBE_RXBUFFER_256 256 /* Used for packet split */
75#define IXGBE_RXBUFFER_2048 2048 73#define IXGBE_RXBUFFER_2048 2048
76#define IXGBE_RXBUFFER_4096 4096 74#define IXGBE_RXBUFFER_4096 4096
77#define IXGBE_RXBUFFER_8192 8192 75#define IXGBE_RXBUFFER_8192 8192
78#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ 76#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
79 77
80#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 78/*
79 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
80 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
81 * this adds up to 512 bytes of extra data meaning the smallest allocation
82 * we could have is 1K.
83 * i.e. RXBUFFER_512 --> size-1024 slab
84 */
85#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
81 86
82#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 87#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
83 88
@@ -251,11 +256,11 @@ struct ixgbe_q_vector {
251 (R)->next_to_clean - (R)->next_to_use - 1) 256 (R)->next_to_clean - (R)->next_to_use - 1)
252 257
253#define IXGBE_RX_DESC_ADV(R, i) \ 258#define IXGBE_RX_DESC_ADV(R, i) \
254 (&(((union ixgbe_adv_rx_desc *)((R).desc))[i])) 259 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
255#define IXGBE_TX_DESC_ADV(R, i) \ 260#define IXGBE_TX_DESC_ADV(R, i) \
256 (&(((union ixgbe_adv_tx_desc *)((R).desc))[i])) 261 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
257#define IXGBE_TX_CTXTDESC_ADV(R, i) \ 262#define IXGBE_TX_CTXTDESC_ADV(R, i) \
258 (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) 263 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
259 264
260#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 265#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
261#ifdef IXGBE_FCOE 266#ifdef IXGBE_FCOE
@@ -448,9 +453,20 @@ extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
448extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 453extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
449extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 454extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
450extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 455extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
456extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
457extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
451extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 458extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
452extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 459extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
453extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 460extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
461extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
462 struct net_device *,
463 struct ixgbe_adapter *,
464 struct ixgbe_ring *);
465extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *,
466 struct ixgbe_tx_buffer *);
467extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
468 struct ixgbe_ring *rx_ring,
469 int cleaned_count);
454extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 470extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
455extern int ethtool_ioctl(struct ifreq *ifr); 471extern int ethtool_ioctl(struct ifreq *ifr);
456extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 472extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 3e06a61da921..e80657c75506 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1910,56 +1910,27 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1910 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); 1910 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
1911 1911
1912 /* 1912 /*
1913 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1913 * Program the relevant mask registers. L4type cannot be
1914 * are zero, then assume a full mask for that field. Also assume that 1914 * masked out in this implementation.
1915 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1916 * cannot be masked out in this implementation.
1917 * 1915 *
1918 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1916 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1919 * point in time. 1917 * point in time.
1920 */ 1918 */
1921 if (src_ipv4 == 0) 1919 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
1922 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff); 1920 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
1923 else
1924 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
1925
1926 if (dst_ipv4 == 0)
1927 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
1928 else
1929 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
1930 1921
1931 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1922 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1932 case IXGBE_ATR_L4TYPE_TCP: 1923 case IXGBE_ATR_L4TYPE_TCP:
1933 if (src_port == 0) 1924 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask);
1934 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff); 1925 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
1935 else 1926 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
1936 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 1927 (input_masks->dst_port_mask << 16)));
1937 input_masks->src_port_mask);
1938
1939 if (dst_port == 0)
1940 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
1941 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
1942 (0xffff << 16)));
1943 else
1944 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
1945 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
1946 (input_masks->dst_port_mask << 16)));
1947 break; 1928 break;
1948 case IXGBE_ATR_L4TYPE_UDP: 1929 case IXGBE_ATR_L4TYPE_UDP:
1949 if (src_port == 0) 1930 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask);
1950 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff); 1931 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
1951 else 1932 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
1952 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 1933 (input_masks->src_port_mask << 16)));
1953 input_masks->src_port_mask);
1954
1955 if (dst_port == 0)
1956 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
1957 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
1958 (0xffff << 16)));
1959 else
1960 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
1961 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
1962 (input_masks->src_port_mask << 16)));
1963 break; 1934 break;
1964 default: 1935 default:
1965 /* this already would have failed above */ 1936 /* this already would have failed above */
@@ -1967,11 +1938,11 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1967 } 1938 }
1968 1939
1969 /* Program the last mask register, FDIRM */ 1940 /* Program the last mask register, FDIRM */
1970 if (input_masks->vlan_id_mask || !vlan_id) 1941 if (input_masks->vlan_id_mask)
1971 /* Mask both VLAN and VLANP - bits 0 and 1 */ 1942 /* Mask both VLAN and VLANP - bits 0 and 1 */
1972 fdirm |= 0x3; 1943 fdirm |= 0x3;
1973 1944
1974 if (input_masks->data_mask || !flex_bytes) 1945 if (input_masks->data_mask)
1975 /* Flex bytes need masking, so mask the whole thing - bit 4 */ 1946 /* Flex bytes need masking, so mask the whole thing - bit 4 */
1976 fdirm |= 0x10; 1947 fdirm |= 0x10;
1977 1948
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index dcebc82c6f4d..d4ac94324fa0 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -401,7 +401,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
401static u32 ixgbe_get_rx_csum(struct net_device *netdev) 401static u32 ixgbe_get_rx_csum(struct net_device *netdev)
402{ 402{
403 struct ixgbe_adapter *adapter = netdev_priv(netdev); 403 struct ixgbe_adapter *adapter = netdev_priv(netdev);
404 return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED); 404 return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
405} 405}
406 406
407static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) 407static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
@@ -820,16 +820,19 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
820 struct ixgbe_adapter *adapter = netdev_priv(netdev); 820 struct ixgbe_adapter *adapter = netdev_priv(netdev);
821 char firmware_version[32]; 821 char firmware_version[32];
822 822
823 strncpy(drvinfo->driver, ixgbe_driver_name, 32); 823 strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
824 strncpy(drvinfo->version, ixgbe_driver_version, 32); 824 strncpy(drvinfo->version, ixgbe_driver_version,
825 sizeof(drvinfo->version));
825 826
826 sprintf(firmware_version, "%d.%d-%d", 827 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
827 (adapter->eeprom_version & 0xF000) >> 12, 828 (adapter->eeprom_version & 0xF000) >> 12,
828 (adapter->eeprom_version & 0x0FF0) >> 4, 829 (adapter->eeprom_version & 0x0FF0) >> 4,
829 adapter->eeprom_version & 0x000F); 830 adapter->eeprom_version & 0x000F);
830 831
831 strncpy(drvinfo->fw_version, firmware_version, 32); 832 strncpy(drvinfo->fw_version, firmware_version,
832 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 833 sizeof(drvinfo->fw_version));
834 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
835 sizeof(drvinfo->bus_info));
833 drvinfo->n_stats = IXGBE_STATS_LEN; 836 drvinfo->n_stats = IXGBE_STATS_LEN;
834 drvinfo->testinfo_len = IXGBE_TEST_LEN; 837 drvinfo->testinfo_len = IXGBE_TEST_LEN;
835 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 838 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -985,8 +988,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
985 case ETH_SS_STATS: 988 case ETH_SS_STATS:
986 return IXGBE_STATS_LEN; 989 return IXGBE_STATS_LEN;
987 case ETH_SS_NTUPLE_FILTERS: 990 case ETH_SS_NTUPLE_FILTERS:
988 return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY * 991 return ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
989 ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY); 992 ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY;
990 default: 993 default:
991 return -EOPNOTSUPP; 994 return -EOPNOTSUPP;
992 } 995 }
@@ -1435,9 +1438,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1435 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1438 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1436 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1439 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1437 struct ixgbe_hw *hw = &adapter->hw; 1440 struct ixgbe_hw *hw = &adapter->hw;
1438 struct pci_dev *pdev = adapter->pdev;
1439 u32 reg_ctl; 1441 u32 reg_ctl;
1440 int i;
1441 1442
1442 /* shut down the DMA engines now so they can be reinitialized later */ 1443 /* shut down the DMA engines now so they can be reinitialized later */
1443 1444
@@ -1445,14 +1446,15 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1445 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1446 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1446 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1447 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1447 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1448 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1448 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0)); 1449 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
1449 reg_ctl &= ~IXGBE_RXDCTL_ENABLE; 1450 reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
1450 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl); 1451 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
1451 1452
1452 /* now Tx */ 1453 /* now Tx */
1453 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0)); 1454 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1454 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1455 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1455 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl); 1456 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1457
1456 if (hw->mac.type == ixgbe_mac_82599EB) { 1458 if (hw->mac.type == ixgbe_mac_82599EB) {
1457 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1459 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1458 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1460 reg_ctl &= ~IXGBE_DMATXCTL_TE;
@@ -1461,221 +1463,57 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1461 1463
1462 ixgbe_reset(adapter); 1464 ixgbe_reset(adapter);
1463 1465
1464 if (tx_ring->desc && tx_ring->tx_buffer_info) { 1466 ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
1465 for (i = 0; i < tx_ring->count; i++) { 1467 ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
1466 struct ixgbe_tx_buffer *buf =
1467 &(tx_ring->tx_buffer_info[i]);
1468 if (buf->dma)
1469 dma_unmap_single(&pdev->dev, buf->dma,
1470 buf->length, DMA_TO_DEVICE);
1471 if (buf->skb)
1472 dev_kfree_skb(buf->skb);
1473 }
1474 }
1475
1476 if (rx_ring->desc && rx_ring->rx_buffer_info) {
1477 for (i = 0; i < rx_ring->count; i++) {
1478 struct ixgbe_rx_buffer *buf =
1479 &(rx_ring->rx_buffer_info[i]);
1480 if (buf->dma)
1481 dma_unmap_single(&pdev->dev, buf->dma,
1482 IXGBE_RXBUFFER_2048,
1483 DMA_FROM_DEVICE);
1484 if (buf->skb)
1485 dev_kfree_skb(buf->skb);
1486 }
1487 }
1488
1489 if (tx_ring->desc) {
1490 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1491 tx_ring->dma);
1492 tx_ring->desc = NULL;
1493 }
1494 if (rx_ring->desc) {
1495 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1496 rx_ring->dma);
1497 rx_ring->desc = NULL;
1498 }
1499
1500 kfree(tx_ring->tx_buffer_info);
1501 tx_ring->tx_buffer_info = NULL;
1502 kfree(rx_ring->rx_buffer_info);
1503 rx_ring->rx_buffer_info = NULL;
1504} 1468}
1505 1469
1506static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1470static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1507{ 1471{
1508 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1472 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1509 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1473 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1510 struct pci_dev *pdev = adapter->pdev;
1511 u32 rctl, reg_data; 1474 u32 rctl, reg_data;
1512 int i, ret_val; 1475 int ret_val;
1476 int err;
1513 1477
1514 /* Setup Tx descriptor ring and Tx buffers */ 1478 /* Setup Tx descriptor ring and Tx buffers */
1479 tx_ring->count = IXGBE_DEFAULT_TXD;
1480 tx_ring->queue_index = 0;
1481 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1482 tx_ring->numa_node = adapter->node;
1515 1483
1516 if (!tx_ring->count) 1484 err = ixgbe_setup_tx_resources(adapter, tx_ring);
1517 tx_ring->count = IXGBE_DEFAULT_TXD; 1485 if (err)
1518 1486 return 1;
1519 tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
1520 sizeof(struct ixgbe_tx_buffer),
1521 GFP_KERNEL);
1522 if (!(tx_ring->tx_buffer_info)) {
1523 ret_val = 1;
1524 goto err_nomem;
1525 }
1526
1527 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
1528 tx_ring->size = ALIGN(tx_ring->size, 4096);
1529 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1530 &tx_ring->dma, GFP_KERNEL);
1531 if (!(tx_ring->desc)) {
1532 ret_val = 2;
1533 goto err_nomem;
1534 }
1535 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1536
1537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
1538 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1539 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
1540 ((u64) tx_ring->dma >> 32));
1541 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
1542 tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
1543 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
1544 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
1545
1546 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1547 reg_data |= IXGBE_HLREG0_TXPADEN;
1548 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1549 1487
1550 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1488 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1551 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1489 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1552 reg_data |= IXGBE_DMATXCTL_TE; 1490 reg_data |= IXGBE_DMATXCTL_TE;
1553 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1491 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1554 } 1492 }
1555 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
1556 reg_data |= IXGBE_TXDCTL_ENABLE;
1557 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
1558
1559 for (i = 0; i < tx_ring->count; i++) {
1560 union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
1561 struct sk_buff *skb;
1562 unsigned int size = 1024;
1563
1564 skb = alloc_skb(size, GFP_KERNEL);
1565 if (!skb) {
1566 ret_val = 3;
1567 goto err_nomem;
1568 }
1569 skb_put(skb, size);
1570 tx_ring->tx_buffer_info[i].skb = skb;
1571 tx_ring->tx_buffer_info[i].length = skb->len;
1572 tx_ring->tx_buffer_info[i].dma =
1573 dma_map_single(&pdev->dev, skb->data, skb->len,
1574 DMA_TO_DEVICE);
1575 desc->read.buffer_addr =
1576 cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
1577 desc->read.cmd_type_len = cpu_to_le32(skb->len);
1578 desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
1579 IXGBE_TXD_CMD_IFCS |
1580 IXGBE_TXD_CMD_RS);
1581 desc->read.olinfo_status = 0;
1582 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1583 desc->read.olinfo_status |=
1584 (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
1585 1493
1586 } 1494 ixgbe_configure_tx_ring(adapter, tx_ring);
1587 1495
1588 /* Setup Rx Descriptor ring and Rx buffers */ 1496 /* Setup Rx Descriptor ring and Rx buffers */
1589 1497 rx_ring->count = IXGBE_DEFAULT_RXD;
1590 if (!rx_ring->count) 1498 rx_ring->queue_index = 0;
1591 rx_ring->count = IXGBE_DEFAULT_RXD; 1499 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1592 1500 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
1593 rx_ring->rx_buffer_info = kcalloc(rx_ring->count, 1501 rx_ring->numa_node = adapter->node;
1594 sizeof(struct ixgbe_rx_buffer), 1502
1595 GFP_KERNEL); 1503 err = ixgbe_setup_rx_resources(adapter, rx_ring);
1596 if (!(rx_ring->rx_buffer_info)) { 1504 if (err) {
1597 ret_val = 4; 1505 ret_val = 4;
1598 goto err_nomem; 1506 goto err_nomem;
1599 } 1507 }
1600 1508
1601 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
1602 rx_ring->size = ALIGN(rx_ring->size, 4096);
1603 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1604 &rx_ring->dma, GFP_KERNEL);
1605 if (!(rx_ring->desc)) {
1606 ret_val = 5;
1607 goto err_nomem;
1608 }
1609 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1610
1611 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1509 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1612 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1510 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1613 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
1614 ((u64)rx_ring->dma & 0xFFFFFFFF));
1615 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
1616 ((u64) rx_ring->dma >> 32));
1617 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
1618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
1619 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
1620 1511
1621 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1512 ixgbe_configure_rx_ring(adapter, rx_ring);
1622 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1624
1625 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1626 reg_data &= ~IXGBE_HLREG0_LPBK;
1627 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1628
1629 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
1630#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum
1631 Threshold Size mask */
1632 reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
1633 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
1634
1635 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
1636#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */
1637 reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
1638 reg_data |= adapter->hw.mac.mc_filter_type;
1639 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
1640
1641 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
1642 reg_data |= IXGBE_RXDCTL_ENABLE;
1643 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
1644 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1645 int j = adapter->rx_ring[0]->reg_idx;
1646 u32 k;
1647 for (k = 0; k < 10; k++) {
1648 if (IXGBE_READ_REG(&adapter->hw,
1649 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1650 break;
1651 else
1652 msleep(1);
1653 }
1654 }
1655 1513
1656 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1514 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1657 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1515 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1658 1516
1659 for (i = 0; i < rx_ring->count; i++) {
1660 union ixgbe_adv_rx_desc *rx_desc =
1661 IXGBE_RX_DESC_ADV(*rx_ring, i);
1662 struct sk_buff *skb;
1663
1664 skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
1665 if (!skb) {
1666 ret_val = 6;
1667 goto err_nomem;
1668 }
1669 skb_reserve(skb, NET_IP_ALIGN);
1670 rx_ring->rx_buffer_info[i].skb = skb;
1671 rx_ring->rx_buffer_info[i].dma =
1672 dma_map_single(&pdev->dev, skb->data,
1673 IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
1674 rx_desc->read.pkt_addr =
1675 cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
1676 memset(skb->data, 0x00, skb->len);
1677 }
1678
1679 return 0; 1517 return 0;
1680 1518
1681err_nomem: 1519err_nomem:
@@ -1689,16 +1527,21 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1689 u32 reg_data; 1527 u32 reg_data;
1690 1528
1691 /* right now we only support MAC loopback in the driver */ 1529 /* right now we only support MAC loopback in the driver */
1692
1693 /* Setup MAC loopback */
1694 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1530 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1531 /* Setup MAC loopback */
1695 reg_data |= IXGBE_HLREG0_LPBK; 1532 reg_data |= IXGBE_HLREG0_LPBK;
1696 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1697 1534
1535 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1536 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1538
1698 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); 1539 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
1699 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1540 reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1700 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1541 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1701 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); 1542 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
1543 IXGBE_WRITE_FLUSH(&adapter->hw);
1544 msleep(10);
1702 1545
1703 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1546 /* Disable Atlas Tx lanes; re-enabled in reset path */
1704 if (hw->mac.type == ixgbe_mac_82598EB) { 1547 if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -1756,15 +1599,81 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1756 return 13; 1599 return 13;
1757} 1600}
1758 1601
1602static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1603 struct ixgbe_ring *rx_ring,
1604 struct ixgbe_ring *tx_ring,
1605 unsigned int size)
1606{
1607 union ixgbe_adv_rx_desc *rx_desc;
1608 struct ixgbe_rx_buffer *rx_buffer_info;
1609 struct ixgbe_tx_buffer *tx_buffer_info;
1610 const int bufsz = rx_ring->rx_buf_len;
1611 u32 staterr;
1612 u16 rx_ntc, tx_ntc, count = 0;
1613
1614 /* initialize next to clean and descriptor values */
1615 rx_ntc = rx_ring->next_to_clean;
1616 tx_ntc = tx_ring->next_to_clean;
1617 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1618 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1619
1620 while (staterr & IXGBE_RXD_STAT_DD) {
1621 /* check Rx buffer */
1622 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1623
1624 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1625 dma_unmap_single(&adapter->pdev->dev,
1626 rx_buffer_info->dma,
1627 bufsz,
1628 DMA_FROM_DEVICE);
1629 rx_buffer_info->dma = 0;
1630
1631 /* verify contents of skb */
1632 if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
1633 count++;
1634
1635 /* unmap buffer on Tx side */
1636 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1637 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1638
1639 /* increment Rx/Tx next to clean counters */
1640 rx_ntc++;
1641 if (rx_ntc == rx_ring->count)
1642 rx_ntc = 0;
1643 tx_ntc++;
1644 if (tx_ntc == tx_ring->count)
1645 tx_ntc = 0;
1646
1647 /* fetch next descriptor */
1648 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1649 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1650 }
1651
1652 /* re-map buffers to ring, store next to clean values */
1653 ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
1654 rx_ring->next_to_clean = rx_ntc;
1655 tx_ring->next_to_clean = tx_ntc;
1656
1657 return count;
1658}
1659
1759static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1660static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1760{ 1661{
1761 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1662 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1762 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1663 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1763 struct pci_dev *pdev = adapter->pdev; 1664 int i, j, lc, good_cnt, ret_val = 0;
1764 int i, j, k, l, lc, good_cnt, ret_val = 0; 1665 unsigned int size = 1024;
1765 unsigned long time; 1666 netdev_tx_t tx_ret_val;
1667 struct sk_buff *skb;
1668
1669 /* allocate test skb */
1670 skb = alloc_skb(size, GFP_KERNEL);
1671 if (!skb)
1672 return 11;
1766 1673
1767 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1); 1674 /* place data into test skb */
1675 ixgbe_create_lbtest_frame(skb, size);
1676 skb_put(skb, size);
1768 1677
1769 /* 1678 /*
1770 * Calculate the loop count based on the largest descriptor ring 1679 * Calculate the loop count based on the largest descriptor ring
@@ -1777,54 +1686,40 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1777 else 1686 else
1778 lc = ((rx_ring->count / 64) * 2) + 1; 1687 lc = ((rx_ring->count / 64) * 2) + 1;
1779 1688
1780 k = l = 0;
1781 for (j = 0; j <= lc; j++) { 1689 for (j = 0; j <= lc; j++) {
1782 for (i = 0; i < 64; i++) { 1690 /* reset count of good packets */
1783 ixgbe_create_lbtest_frame(
1784 tx_ring->tx_buffer_info[k].skb,
1785 1024);
1786 dma_sync_single_for_device(&pdev->dev,
1787 tx_ring->tx_buffer_info[k].dma,
1788 tx_ring->tx_buffer_info[k].length,
1789 DMA_TO_DEVICE);
1790 if (unlikely(++k == tx_ring->count))
1791 k = 0;
1792 }
1793 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
1794 msleep(200);
1795 /* set the start time for the receive */
1796 time = jiffies;
1797 good_cnt = 0; 1691 good_cnt = 0;
1798 do { 1692
1799 /* receive the sent packets */ 1693 /* place 64 packets on the transmit queue*/
1800 dma_sync_single_for_cpu(&pdev->dev, 1694 for (i = 0; i < 64; i++) {
1801 rx_ring->rx_buffer_info[l].dma, 1695 skb_get(skb);
1802 IXGBE_RXBUFFER_2048, 1696 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1803 DMA_FROM_DEVICE); 1697 adapter->netdev,
1804 ret_val = ixgbe_check_lbtest_frame( 1698 adapter,
1805 rx_ring->rx_buffer_info[l].skb, 1024); 1699 tx_ring);
1806 if (!ret_val) 1700 if (tx_ret_val == NETDEV_TX_OK)
1807 good_cnt++; 1701 good_cnt++;
1808 if (++l == rx_ring->count) 1702 }
1809 l = 0; 1703
1810 /*
1811 * time + 20 msecs (200 msecs on 2.4) is more than
1812 * enough time to complete the receives, if it's
1813 * exceeded, break and error off
1814 */
1815 } while (good_cnt < 64 && jiffies < (time + 20));
1816 if (good_cnt != 64) { 1704 if (good_cnt != 64) {
1817 /* ret_val is the same as mis-compare */ 1705 ret_val = 12;
1818 ret_val = 13;
1819 break; 1706 break;
1820 } 1707 }
1821 if (jiffies >= (time + 20)) { 1708
1822 /* Error code for time out error */ 1709 /* allow 200 milliseconds for packets to go from Tx to Rx */
1823 ret_val = 14; 1710 msleep(200);
1711
1712 good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
1713 tx_ring, size);
1714 if (good_cnt != 64) {
1715 ret_val = 13;
1824 break; 1716 break;
1825 } 1717 }
1826 } 1718 }
1827 1719
1720 /* free the original skb */
1721 kfree_skb(skb);
1722
1828 return ret_val; 1723 return ret_val;
1829} 1724}
1830 1725
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 072327c5e41a..2f1de8b90f9e 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -304,12 +304,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
304 if (!ixgbe_rx_is_fcoe(rx_desc)) 304 if (!ixgbe_rx_is_fcoe(rx_desc))
305 goto ddp_out; 305 goto ddp_out;
306 306
307 skb->ip_summed = CHECKSUM_UNNECESSARY;
308 sterr = le32_to_cpu(rx_desc->wb.upper.status_error); 307 sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
309 fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR); 308 fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
310 fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE); 309 fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
311 if (fcerr == IXGBE_FCERR_BADCRC) 310 if (fcerr == IXGBE_FCERR_BADCRC)
312 skb->ip_summed = CHECKSUM_NONE; 311 skb_checksum_none_assert(skb);
312 else
313 skb->ip_summed = CHECKSUM_UNNECESSARY;
313 314
314 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) 315 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
315 fh = (struct fc_frame_header *)(skb->data + 316 fh = (struct fc_frame_header *)(skb->data +
@@ -471,7 +472,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
471 472
472 /* write context desc */ 473 /* write context desc */
473 i = tx_ring->next_to_use; 474 i = tx_ring->next_to_use;
474 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 475 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
475 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 476 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
476 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); 477 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
477 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 478 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index e32af434cc9d..4e0ce91321dd 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -50,7 +50,7 @@
50 50
51char ixgbe_driver_name[] = "ixgbe"; 51char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "2.0.84-k2" 55#define DRV_VERSION "2.0.84-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
@@ -120,7 +120,7 @@ MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
120 120
121#ifdef CONFIG_IXGBE_DCA 121#ifdef CONFIG_IXGBE_DCA
122static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 122static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
123 void *p); 123 void *p);
124static struct notifier_block dca_notifier = { 124static struct notifier_block dca_notifier = {
125 .notifier_call = ixgbe_notify_dca, 125 .notifier_call = ixgbe_notify_dca,
126 .next = NULL, 126 .next = NULL,
@@ -131,8 +131,8 @@ static struct notifier_block dca_notifier = {
131#ifdef CONFIG_PCI_IOV 131#ifdef CONFIG_PCI_IOV
132static unsigned int max_vfs; 132static unsigned int max_vfs;
133module_param(max_vfs, uint, 0); 133module_param(max_vfs, uint, 0);
134MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " 134MODULE_PARM_DESC(max_vfs,
135 "per physical function"); 135 "Maximum number of virtual functions to allocate per physical function");
136#endif /* CONFIG_PCI_IOV */ 136#endif /* CONFIG_PCI_IOV */
137 137
138MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 138MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -169,8 +169,8 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
169 169
170 /* take a breather then clean up driver data */ 170 /* take a breather then clean up driver data */
171 msleep(100); 171 msleep(100);
172 if (adapter->vfinfo) 172
173 kfree(adapter->vfinfo); 173 kfree(adapter->vfinfo);
174 adapter->vfinfo = NULL; 174 adapter->vfinfo = NULL;
175 175
176 adapter->num_vfs = 0; 176 adapter->num_vfs = 0;
@@ -282,17 +282,17 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
282 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 282 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
283 break; 283 break;
284 default: 284 default:
285 printk(KERN_INFO "%-15s %08x\n", reginfo->name, 285 pr_info("%-15s %08x\n", reginfo->name,
286 IXGBE_READ_REG(hw, reginfo->ofs)); 286 IXGBE_READ_REG(hw, reginfo->ofs));
287 return; 287 return;
288 } 288 }
289 289
290 for (i = 0; i < 8; i++) { 290 for (i = 0; i < 8; i++) {
291 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); 291 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
292 printk(KERN_ERR "%-15s ", rname); 292 pr_err("%-15s", rname);
293 for (j = 0; j < 8; j++) 293 for (j = 0; j < 8; j++)
294 printk(KERN_CONT "%08x ", regs[i*8+j]); 294 pr_cont(" %08x", regs[i*8+j]);
295 printk(KERN_CONT "\n"); 295 pr_cont("\n");
296 } 296 }
297 297
298} 298}
@@ -322,18 +322,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
322 /* Print netdevice Info */ 322 /* Print netdevice Info */
323 if (netdev) { 323 if (netdev) {
324 dev_info(&adapter->pdev->dev, "Net device Info\n"); 324 dev_info(&adapter->pdev->dev, "Net device Info\n");
325 printk(KERN_INFO "Device Name state " 325 pr_info("Device Name state "
326 "trans_start last_rx\n"); 326 "trans_start last_rx\n");
327 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", 327 pr_info("%-15s %016lX %016lX %016lX\n",
328 netdev->name, 328 netdev->name,
329 netdev->state, 329 netdev->state,
330 netdev->trans_start, 330 netdev->trans_start,
331 netdev->last_rx); 331 netdev->last_rx);
332 } 332 }
333 333
334 /* Print Registers */ 334 /* Print Registers */
335 dev_info(&adapter->pdev->dev, "Register Dump\n"); 335 dev_info(&adapter->pdev->dev, "Register Dump\n");
336 printk(KERN_INFO " Register Name Value\n"); 336 pr_info(" Register Name Value\n");
337 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; 337 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
338 reginfo->name; reginfo++) { 338 reginfo->name; reginfo++) {
339 ixgbe_regdump(hw, reginfo); 339 ixgbe_regdump(hw, reginfo);
@@ -344,13 +344,12 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
344 goto exit; 344 goto exit;
345 345
346 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); 346 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
347 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] " 347 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
348 "leng ntw timestamp\n");
349 for (n = 0; n < adapter->num_tx_queues; n++) { 348 for (n = 0; n < adapter->num_tx_queues; n++) {
350 tx_ring = adapter->tx_ring[n]; 349 tx_ring = adapter->tx_ring[n];
351 tx_buffer_info = 350 tx_buffer_info =
352 &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 351 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
353 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", 352 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
354 n, tx_ring->next_to_use, tx_ring->next_to_clean, 353 n, tx_ring->next_to_use, tx_ring->next_to_clean,
355 (u64)tx_buffer_info->dma, 354 (u64)tx_buffer_info->dma,
356 tx_buffer_info->length, 355 tx_buffer_info->length,
@@ -377,18 +376,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
377 376
378 for (n = 0; n < adapter->num_tx_queues; n++) { 377 for (n = 0; n < adapter->num_tx_queues; n++) {
379 tx_ring = adapter->tx_ring[n]; 378 tx_ring = adapter->tx_ring[n];
380 printk(KERN_INFO "------------------------------------\n"); 379 pr_info("------------------------------------\n");
381 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); 380 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
382 printk(KERN_INFO "------------------------------------\n"); 381 pr_info("------------------------------------\n");
383 printk(KERN_INFO "T [desc] [address 63:0 ] " 382 pr_info("T [desc] [address 63:0 ] "
384 "[PlPOIdStDDt Ln] [bi->dma ] " 383 "[PlPOIdStDDt Ln] [bi->dma ] "
385 "leng ntw timestamp bi->skb\n"); 384 "leng ntw timestamp bi->skb\n");
386 385
387 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 386 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
388 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 387 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
389 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 388 tx_buffer_info = &tx_ring->tx_buffer_info[i];
390 u0 = (struct my_u0 *)tx_desc; 389 u0 = (struct my_u0 *)tx_desc;
391 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" 390 pr_info("T [0x%03X] %016llX %016llX %016llX"
392 " %04X %3X %016llX %p", i, 391 " %04X %3X %016llX %p", i,
393 le64_to_cpu(u0->a), 392 le64_to_cpu(u0->a),
394 le64_to_cpu(u0->b), 393 le64_to_cpu(u0->b),
@@ -399,13 +398,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
399 tx_buffer_info->skb); 398 tx_buffer_info->skb);
400 if (i == tx_ring->next_to_use && 399 if (i == tx_ring->next_to_use &&
401 i == tx_ring->next_to_clean) 400 i == tx_ring->next_to_clean)
402 printk(KERN_CONT " NTC/U\n"); 401 pr_cont(" NTC/U\n");
403 else if (i == tx_ring->next_to_use) 402 else if (i == tx_ring->next_to_use)
404 printk(KERN_CONT " NTU\n"); 403 pr_cont(" NTU\n");
405 else if (i == tx_ring->next_to_clean) 404 else if (i == tx_ring->next_to_clean)
406 printk(KERN_CONT " NTC\n"); 405 pr_cont(" NTC\n");
407 else 406 else
408 printk(KERN_CONT "\n"); 407 pr_cont("\n");
409 408
410 if (netif_msg_pktdata(adapter) && 409 if (netif_msg_pktdata(adapter) &&
411 tx_buffer_info->dma != 0) 410 tx_buffer_info->dma != 0)
@@ -419,11 +418,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
419 /* Print RX Rings Summary */ 418 /* Print RX Rings Summary */
420rx_ring_summary: 419rx_ring_summary:
421 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); 420 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
422 printk(KERN_INFO "Queue [NTU] [NTC]\n"); 421 pr_info("Queue [NTU] [NTC]\n");
423 for (n = 0; n < adapter->num_rx_queues; n++) { 422 for (n = 0; n < adapter->num_rx_queues; n++) {
424 rx_ring = adapter->rx_ring[n]; 423 rx_ring = adapter->rx_ring[n];
425 printk(KERN_INFO "%5d %5X %5X\n", n, 424 pr_info("%5d %5X %5X\n",
426 rx_ring->next_to_use, rx_ring->next_to_clean); 425 n, rx_ring->next_to_use, rx_ring->next_to_clean);
427 } 426 }
428 427
429 /* Print RX Rings */ 428 /* Print RX Rings */
@@ -454,30 +453,30 @@ rx_ring_summary:
454 */ 453 */
455 for (n = 0; n < adapter->num_rx_queues; n++) { 454 for (n = 0; n < adapter->num_rx_queues; n++) {
456 rx_ring = adapter->rx_ring[n]; 455 rx_ring = adapter->rx_ring[n];
457 printk(KERN_INFO "------------------------------------\n"); 456 pr_info("------------------------------------\n");
458 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); 457 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
459 printk(KERN_INFO "------------------------------------\n"); 458 pr_info("------------------------------------\n");
460 printk(KERN_INFO "R [desc] [ PktBuf A0] " 459 pr_info("R [desc] [ PktBuf A0] "
461 "[ HeadBuf DD] [bi->dma ] [bi->skb] " 460 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
462 "<-- Adv Rx Read format\n"); 461 "<-- Adv Rx Read format\n");
463 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " 462 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
464 "[vl er S cks ln] ---------------- [bi->skb] " 463 "[vl er S cks ln] ---------------- [bi->skb] "
465 "<-- Adv Rx Write-Back format\n"); 464 "<-- Adv Rx Write-Back format\n");
466 465
467 for (i = 0; i < rx_ring->count; i++) { 466 for (i = 0; i < rx_ring->count; i++) {
468 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 467 rx_buffer_info = &rx_ring->rx_buffer_info[i];
469 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 468 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
470 u0 = (struct my_u0 *)rx_desc; 469 u0 = (struct my_u0 *)rx_desc;
471 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 470 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
472 if (staterr & IXGBE_RXD_STAT_DD) { 471 if (staterr & IXGBE_RXD_STAT_DD) {
473 /* Descriptor Done */ 472 /* Descriptor Done */
474 printk(KERN_INFO "RWB[0x%03X] %016llX " 473 pr_info("RWB[0x%03X] %016llX "
475 "%016llX ---------------- %p", i, 474 "%016llX ---------------- %p", i,
476 le64_to_cpu(u0->a), 475 le64_to_cpu(u0->a),
477 le64_to_cpu(u0->b), 476 le64_to_cpu(u0->b),
478 rx_buffer_info->skb); 477 rx_buffer_info->skb);
479 } else { 478 } else {
480 printk(KERN_INFO "R [0x%03X] %016llX " 479 pr_info("R [0x%03X] %016llX "
481 "%016llX %016llX %p", i, 480 "%016llX %016llX %p", i,
482 le64_to_cpu(u0->a), 481 le64_to_cpu(u0->a),
483 le64_to_cpu(u0->b), 482 le64_to_cpu(u0->b),
@@ -503,11 +502,11 @@ rx_ring_summary:
503 } 502 }
504 503
505 if (i == rx_ring->next_to_use) 504 if (i == rx_ring->next_to_use)
506 printk(KERN_CONT " NTU\n"); 505 pr_cont(" NTU\n");
507 else if (i == rx_ring->next_to_clean) 506 else if (i == rx_ring->next_to_clean)
508 printk(KERN_CONT " NTC\n"); 507 pr_cont(" NTC\n");
509 else 508 else
510 printk(KERN_CONT "\n"); 509 pr_cont("\n");
511 510
512 } 511 }
513 } 512 }
@@ -523,7 +522,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
523 /* Let firmware take over control of h/w */ 522 /* Let firmware take over control of h/w */
524 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 523 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
525 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
526 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 525 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
527} 526}
528 527
529static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 528static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
@@ -533,7 +532,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
533 /* Let firmware know the driver has taken over */ 532 /* Let firmware know the driver has taken over */
534 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 533 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
535 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 534 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
536 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 535 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
537} 536}
538 537
539/* 538/*
@@ -545,7 +544,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
545 * 544 *
546 */ 545 */
547static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, 546static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
548 u8 queue, u8 msix_vector) 547 u8 queue, u8 msix_vector)
549{ 548{
550 u32 ivar, index; 549 u32 ivar, index;
551 struct ixgbe_hw *hw = &adapter->hw; 550 struct ixgbe_hw *hw = &adapter->hw;
@@ -586,7 +585,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
586} 585}
587 586
588static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, 587static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
589 u64 qmask) 588 u64 qmask)
590{ 589{
591 u32 mask; 590 u32 mask;
592 591
@@ -601,9 +600,9 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
601 } 600 }
602} 601}
603 602
604static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 603void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
605 struct ixgbe_tx_buffer 604 struct ixgbe_tx_buffer
606 *tx_buffer_info) 605 *tx_buffer_info)
607{ 606{
608 if (tx_buffer_info->dma) { 607 if (tx_buffer_info->dma) {
609 if (tx_buffer_info->mapped_as_page) 608 if (tx_buffer_info->mapped_as_page)
@@ -637,7 +636,7 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
637 * Returns : true if in xon state (currently not paused) 636 * Returns : true if in xon state (currently not paused)
638 */ 637 */
639static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, 638static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
640 struct ixgbe_ring *tx_ring) 639 struct ixgbe_ring *tx_ring)
641{ 640{
642 u32 txoff = IXGBE_TFCS_TXOFF; 641 u32 txoff = IXGBE_TFCS_TXOFF;
643 642
@@ -682,8 +681,8 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
682} 681}
683 682
684static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 683static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
685 struct ixgbe_ring *tx_ring, 684 struct ixgbe_ring *tx_ring,
686 unsigned int eop) 685 unsigned int eop)
687{ 686{
688 struct ixgbe_hw *hw = &adapter->hw; 687 struct ixgbe_hw *hw = &adapter->hw;
689 688
@@ -695,7 +694,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
695 ixgbe_tx_xon_state(adapter, tx_ring)) { 694 ixgbe_tx_xon_state(adapter, tx_ring)) {
696 /* detected Tx unit hang */ 695 /* detected Tx unit hang */
697 union ixgbe_adv_tx_desc *tx_desc; 696 union ixgbe_adv_tx_desc *tx_desc;
698 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 697 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
699 e_err(drv, "Detected Tx Unit Hang\n" 698 e_err(drv, "Detected Tx Unit Hang\n"
700 " Tx Queue <%d>\n" 699 " Tx Queue <%d>\n"
701 " TDH, TDT <%x>, <%x>\n" 700 " TDH, TDT <%x>, <%x>\n"
@@ -732,7 +731,7 @@ static void ixgbe_tx_timeout(struct net_device *netdev);
732 * @tx_ring: tx ring to clean 731 * @tx_ring: tx ring to clean
733 **/ 732 **/
734static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, 733static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
735 struct ixgbe_ring *tx_ring) 734 struct ixgbe_ring *tx_ring)
736{ 735{
737 struct ixgbe_adapter *adapter = q_vector->adapter; 736 struct ixgbe_adapter *adapter = q_vector->adapter;
738 struct net_device *netdev = adapter->netdev; 737 struct net_device *netdev = adapter->netdev;
@@ -743,7 +742,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
743 742
744 i = tx_ring->next_to_clean; 743 i = tx_ring->next_to_clean;
745 eop = tx_ring->tx_buffer_info[i].next_to_watch; 744 eop = tx_ring->tx_buffer_info[i].next_to_watch;
746 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 745 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
747 746
748 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 747 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
749 (count < tx_ring->work_limit)) { 748 (count < tx_ring->work_limit)) {
@@ -751,7 +750,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
751 rmb(); /* read buffer_info after eop_desc */ 750 rmb(); /* read buffer_info after eop_desc */
752 for ( ; !cleaned; count++) { 751 for ( ; !cleaned; count++) {
753 struct sk_buff *skb; 752 struct sk_buff *skb;
754 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 753 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
755 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 754 tx_buffer_info = &tx_ring->tx_buffer_info[i];
756 cleaned = (i == eop); 755 cleaned = (i == eop);
757 skb = tx_buffer_info->skb; 756 skb = tx_buffer_info->skb;
@@ -781,7 +780,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
781 } 780 }
782 781
783 ixgbe_unmap_and_free_tx_resource(adapter, 782 ixgbe_unmap_and_free_tx_resource(adapter,
784 tx_buffer_info); 783 tx_buffer_info);
785 784
786 tx_desc->wb.status = 0; 785 tx_desc->wb.status = 0;
787 786
@@ -791,14 +790,14 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
791 } 790 }
792 791
793 eop = tx_ring->tx_buffer_info[i].next_to_watch; 792 eop = tx_ring->tx_buffer_info[i].next_to_watch;
794 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 793 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
795 } 794 }
796 795
797 tx_ring->next_to_clean = i; 796 tx_ring->next_to_clean = i;
798 797
799#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 798#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
800 if (unlikely(count && netif_carrier_ok(netdev) && 799 if (unlikely(count && netif_carrier_ok(netdev) &&
801 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 800 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
802 /* Make sure that anybody stopping the queue after this 801 /* Make sure that anybody stopping the queue after this
803 * sees the new next_to_clean. 802 * sees the new next_to_clean.
804 */ 803 */
@@ -827,12 +826,12 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
827 tx_ring->total_packets += total_packets; 826 tx_ring->total_packets += total_packets;
828 tx_ring->stats.packets += total_packets; 827 tx_ring->stats.packets += total_packets;
829 tx_ring->stats.bytes += total_bytes; 828 tx_ring->stats.bytes += total_bytes;
830 return (count < tx_ring->work_limit); 829 return count < tx_ring->work_limit;
831} 830}
832 831
833#ifdef CONFIG_IXGBE_DCA 832#ifdef CONFIG_IXGBE_DCA
834static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 833static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
835 struct ixgbe_ring *rx_ring) 834 struct ixgbe_ring *rx_ring)
836{ 835{
837 u32 rxctrl; 836 u32 rxctrl;
838 int cpu = get_cpu(); 837 int cpu = get_cpu();
@@ -846,13 +845,13 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
846 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 845 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
847 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; 846 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
848 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 847 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
849 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); 848 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
850 } 849 }
851 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 850 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
852 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 851 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
853 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 852 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
854 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | 853 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
855 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 854 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 855 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
857 rx_ring->cpu = cpu; 856 rx_ring->cpu = cpu;
858 } 857 }
@@ -860,7 +859,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
860} 859}
861 860
862static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 861static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
863 struct ixgbe_ring *tx_ring) 862 struct ixgbe_ring *tx_ring)
864{ 863{
865 u32 txctrl; 864 u32 txctrl;
866 int cpu = get_cpu(); 865 int cpu = get_cpu();
@@ -878,7 +877,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
878 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); 877 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
879 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; 878 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
880 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 879 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
881 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); 880 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
882 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 881 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
883 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); 882 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
884 } 883 }
@@ -946,16 +945,15 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
946 * @rx_desc: rx descriptor 945 * @rx_desc: rx descriptor
947 **/ 946 **/
948static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, 947static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
949 struct sk_buff *skb, u8 status, 948 struct sk_buff *skb, u8 status,
950 struct ixgbe_ring *ring, 949 struct ixgbe_ring *ring,
951 union ixgbe_adv_rx_desc *rx_desc) 950 union ixgbe_adv_rx_desc *rx_desc)
952{ 951{
953 struct ixgbe_adapter *adapter = q_vector->adapter; 952 struct ixgbe_adapter *adapter = q_vector->adapter;
954 struct napi_struct *napi = &q_vector->napi; 953 struct napi_struct *napi = &q_vector->napi;
955 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 954 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
956 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 955 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
957 956
958 skb_record_rx_queue(skb, ring->queue_index);
959 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 957 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
960 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) 958 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
961 vlan_gro_receive(napi, adapter->vlgrp, tag, skb); 959 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
@@ -981,7 +979,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
981{ 979{
982 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); 980 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
983 981
984 skb->ip_summed = CHECKSUM_NONE; 982 skb_checksum_none_assert(skb);
985 983
986 /* Rx csum disabled */ 984 /* Rx csum disabled */
987 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 985 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -1017,7 +1015,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1017} 1015}
1018 1016
1019static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, 1017static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1020 struct ixgbe_ring *rx_ring, u32 val) 1018 struct ixgbe_ring *rx_ring, u32 val)
1021{ 1019{
1022 /* 1020 /*
1023 * Force memory writes to complete before letting h/w 1021 * Force memory writes to complete before letting h/w
@@ -1033,25 +1031,27 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1033 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 1031 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1034 * @adapter: address of board private structure 1032 * @adapter: address of board private structure
1035 **/ 1033 **/
1036static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 1034void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1037 struct ixgbe_ring *rx_ring, 1035 struct ixgbe_ring *rx_ring,
1038 int cleaned_count) 1036 int cleaned_count)
1039{ 1037{
1038 struct net_device *netdev = adapter->netdev;
1040 struct pci_dev *pdev = adapter->pdev; 1039 struct pci_dev *pdev = adapter->pdev;
1041 union ixgbe_adv_rx_desc *rx_desc; 1040 union ixgbe_adv_rx_desc *rx_desc;
1042 struct ixgbe_rx_buffer *bi; 1041 struct ixgbe_rx_buffer *bi;
1043 unsigned int i; 1042 unsigned int i;
1043 unsigned int bufsz = rx_ring->rx_buf_len;
1044 1044
1045 i = rx_ring->next_to_use; 1045 i = rx_ring->next_to_use;
1046 bi = &rx_ring->rx_buffer_info[i]; 1046 bi = &rx_ring->rx_buffer_info[i];
1047 1047
1048 while (cleaned_count--) { 1048 while (cleaned_count--) {
1049 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 1049 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1050 1050
1051 if (!bi->page_dma && 1051 if (!bi->page_dma &&
1052 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { 1052 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
1053 if (!bi->page) { 1053 if (!bi->page) {
1054 bi->page = alloc_page(GFP_ATOMIC); 1054 bi->page = netdev_alloc_page(netdev);
1055 if (!bi->page) { 1055 if (!bi->page) {
1056 adapter->alloc_rx_page_failed++; 1056 adapter->alloc_rx_page_failed++;
1057 goto no_buffers; 1057 goto no_buffers;
@@ -1063,29 +1063,28 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1063 } 1063 }
1064 1064
1065 bi->page_dma = dma_map_page(&pdev->dev, bi->page, 1065 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
1066 bi->page_offset, 1066 bi->page_offset,
1067 (PAGE_SIZE / 2), 1067 (PAGE_SIZE / 2),
1068 DMA_FROM_DEVICE); 1068 DMA_FROM_DEVICE);
1069 } 1069 }
1070 1070
1071 if (!bi->skb) { 1071 if (!bi->skb) {
1072 struct sk_buff *skb; 1072 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
1073 /* netdev_alloc_skb reserves 32 bytes up front!! */ 1073 bufsz);
1074 uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES; 1074 bi->skb = skb;
1075 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1076 1075
1077 if (!skb) { 1076 if (!skb) {
1078 adapter->alloc_rx_buff_failed++; 1077 adapter->alloc_rx_buff_failed++;
1079 goto no_buffers; 1078 goto no_buffers;
1080 } 1079 }
1080 /* initialize queue mapping */
1081 skb_record_rx_queue(skb, rx_ring->queue_index);
1082 }
1081 1083
1082 /* advance the data pointer to the next cache line */ 1084 if (!bi->dma) {
1083 skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES) 1085 bi->dma = dma_map_single(&pdev->dev,
1084 - skb->data)); 1086 bi->skb->data,
1085 1087 rx_ring->rx_buf_len,
1086 bi->skb = skb;
1087 bi->dma = dma_map_single(&pdev->dev, skb->data,
1088 rx_ring->rx_buf_len,
1089 DMA_FROM_DEVICE); 1088 DMA_FROM_DEVICE);
1090 } 1089 }
1091 /* Refresh the desc even if buffer_addrs didn't change because 1090 /* Refresh the desc even if buffer_addrs didn't change because
@@ -1095,6 +1094,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1095 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 1094 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1096 } else { 1095 } else {
1097 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 1096 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1097 rx_desc->read.hdr_addr = 0;
1098 } 1098 }
1099 1099
1100 i++; 1100 i++;
@@ -1126,8 +1126,8 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
1126static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) 1126static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
1127{ 1127{
1128 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & 1128 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1129 IXGBE_RXDADV_RSCCNT_MASK) >> 1129 IXGBE_RXDADV_RSCCNT_MASK) >>
1130 IXGBE_RXDADV_RSCCNT_SHIFT; 1130 IXGBE_RXDADV_RSCCNT_SHIFT;
1131} 1131}
1132 1132
1133/** 1133/**
@@ -1140,7 +1140,7 @@ static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
1140 * turns it into the frag list owner. 1140 * turns it into the frag list owner.
1141 **/ 1141 **/
1142static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, 1142static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
1143 u64 *count) 1143 u64 *count)
1144{ 1144{
1145 unsigned int frag_list_size = 0; 1145 unsigned int frag_list_size = 0;
1146 1146
@@ -1168,8 +1168,8 @@ struct ixgbe_rsc_cb {
1168#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) 1168#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1169 1169
1170static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1170static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1171 struct ixgbe_ring *rx_ring, 1171 struct ixgbe_ring *rx_ring,
1172 int *work_done, int work_to_do) 1172 int *work_done, int work_to_do)
1173{ 1173{
1174 struct ixgbe_adapter *adapter = q_vector->adapter; 1174 struct ixgbe_adapter *adapter = q_vector->adapter;
1175 struct net_device *netdev = adapter->netdev; 1175 struct net_device *netdev = adapter->netdev;
@@ -1188,7 +1188,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1188#endif /* IXGBE_FCOE */ 1188#endif /* IXGBE_FCOE */
1189 1189
1190 i = rx_ring->next_to_clean; 1190 i = rx_ring->next_to_clean;
1191 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 1191 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1192 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1192 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1193 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1193 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1194 1194
@@ -1231,9 +1231,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1231 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1231 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1232 } else { 1232 } else {
1233 dma_unmap_single(&pdev->dev, 1233 dma_unmap_single(&pdev->dev,
1234 rx_buffer_info->dma, 1234 rx_buffer_info->dma,
1235 rx_ring->rx_buf_len, 1235 rx_ring->rx_buf_len,
1236 DMA_FROM_DEVICE); 1236 DMA_FROM_DEVICE);
1237 } 1237 }
1238 rx_buffer_info->dma = 0; 1238 rx_buffer_info->dma = 0;
1239 skb_put(skb, len); 1239 skb_put(skb, len);
@@ -1244,9 +1244,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1244 PAGE_SIZE / 2, DMA_FROM_DEVICE); 1244 PAGE_SIZE / 2, DMA_FROM_DEVICE);
1245 rx_buffer_info->page_dma = 0; 1245 rx_buffer_info->page_dma = 0;
1246 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1246 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1247 rx_buffer_info->page, 1247 rx_buffer_info->page,
1248 rx_buffer_info->page_offset, 1248 rx_buffer_info->page_offset,
1249 upper_len); 1249 upper_len);
1250 1250
1251 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 1251 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
1252 (page_count(rx_buffer_info->page) != 1)) 1252 (page_count(rx_buffer_info->page) != 1))
@@ -1263,7 +1263,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1263 if (i == rx_ring->count) 1263 if (i == rx_ring->count)
1264 i = 0; 1264 i = 0;
1265 1265
1266 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); 1266 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
1267 prefetch(next_rxd); 1267 prefetch(next_rxd);
1268 cleaned_count++; 1268 cleaned_count++;
1269 1269
@@ -1280,18 +1280,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1280 1280
1281 if (staterr & IXGBE_RXD_STAT_EOP) { 1281 if (staterr & IXGBE_RXD_STAT_EOP) {
1282 if (skb->prev) 1282 if (skb->prev)
1283 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 1283 skb = ixgbe_transform_rsc_queue(skb,
1284 &(rx_ring->rsc_count));
1284 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1285 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1285 if (IXGBE_RSC_CB(skb)->delay_unmap) { 1286 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1286 dma_unmap_single(&pdev->dev, 1287 dma_unmap_single(&pdev->dev,
1287 IXGBE_RSC_CB(skb)->dma, 1288 IXGBE_RSC_CB(skb)->dma,
1288 rx_ring->rx_buf_len, 1289 rx_ring->rx_buf_len,
1289 DMA_FROM_DEVICE); 1290 DMA_FROM_DEVICE);
1290 IXGBE_RSC_CB(skb)->dma = 0; 1291 IXGBE_RSC_CB(skb)->dma = 0;
1291 IXGBE_RSC_CB(skb)->delay_unmap = false; 1292 IXGBE_RSC_CB(skb)->delay_unmap = false;
1292 } 1293 }
1293 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 1294 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
1294 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; 1295 rx_ring->rsc_count +=
1296 skb_shinfo(skb)->nr_frags;
1295 else 1297 else
1296 rx_ring->rsc_count++; 1298 rx_ring->rsc_count++;
1297 rx_ring->rsc_flush++; 1299 rx_ring->rsc_flush++;
@@ -1403,24 +1405,24 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1403 q_vector = adapter->q_vector[v_idx]; 1405 q_vector = adapter->q_vector[v_idx];
1404 /* XXX for_each_set_bit(...) */ 1406 /* XXX for_each_set_bit(...) */
1405 r_idx = find_first_bit(q_vector->rxr_idx, 1407 r_idx = find_first_bit(q_vector->rxr_idx,
1406 adapter->num_rx_queues); 1408 adapter->num_rx_queues);
1407 1409
1408 for (i = 0; i < q_vector->rxr_count; i++) { 1410 for (i = 0; i < q_vector->rxr_count; i++) {
1409 j = adapter->rx_ring[r_idx]->reg_idx; 1411 j = adapter->rx_ring[r_idx]->reg_idx;
1410 ixgbe_set_ivar(adapter, 0, j, v_idx); 1412 ixgbe_set_ivar(adapter, 0, j, v_idx);
1411 r_idx = find_next_bit(q_vector->rxr_idx, 1413 r_idx = find_next_bit(q_vector->rxr_idx,
1412 adapter->num_rx_queues, 1414 adapter->num_rx_queues,
1413 r_idx + 1); 1415 r_idx + 1);
1414 } 1416 }
1415 r_idx = find_first_bit(q_vector->txr_idx, 1417 r_idx = find_first_bit(q_vector->txr_idx,
1416 adapter->num_tx_queues); 1418 adapter->num_tx_queues);
1417 1419
1418 for (i = 0; i < q_vector->txr_count; i++) { 1420 for (i = 0; i < q_vector->txr_count; i++) {
1419 j = adapter->tx_ring[r_idx]->reg_idx; 1421 j = adapter->tx_ring[r_idx]->reg_idx;
1420 ixgbe_set_ivar(adapter, 1, j, v_idx); 1422 ixgbe_set_ivar(adapter, 1, j, v_idx);
1421 r_idx = find_next_bit(q_vector->txr_idx, 1423 r_idx = find_next_bit(q_vector->txr_idx,
1422 adapter->num_tx_queues, 1424 adapter->num_tx_queues,
1423 r_idx + 1); 1425 r_idx + 1);
1424 } 1426 }
1425 1427
1426 if (q_vector->txr_count && !q_vector->rxr_count) 1428 if (q_vector->txr_count && !q_vector->rxr_count)
@@ -1435,7 +1437,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1435 1437
1436 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1438 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1437 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 1439 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1438 v_idx); 1440 v_idx);
1439 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1441 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1440 ixgbe_set_ivar(adapter, -1, 1, v_idx); 1442 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1441 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 1443 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
@@ -1477,8 +1479,8 @@ enum latency_range {
1477 * parameter (see ixgbe_param.c) 1479 * parameter (see ixgbe_param.c)
1478 **/ 1480 **/
1479static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 1481static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
1480 u32 eitr, u8 itr_setting, 1482 u32 eitr, u8 itr_setting,
1481 int packets, int bytes) 1483 int packets, int bytes)
1482{ 1484{
1483 unsigned int retval = itr_setting; 1485 unsigned int retval = itr_setting;
1484 u32 timepassed_us; 1486 u32 timepassed_us;
@@ -1567,30 +1569,30 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1567 for (i = 0; i < q_vector->txr_count; i++) { 1569 for (i = 0; i < q_vector->txr_count; i++) {
1568 tx_ring = adapter->tx_ring[r_idx]; 1570 tx_ring = adapter->tx_ring[r_idx];
1569 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1571 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1570 q_vector->tx_itr, 1572 q_vector->tx_itr,
1571 tx_ring->total_packets, 1573 tx_ring->total_packets,
1572 tx_ring->total_bytes); 1574 tx_ring->total_bytes);
1573 /* if the result for this queue would decrease interrupt 1575 /* if the result for this queue would decrease interrupt
1574 * rate for this vector then use that result */ 1576 * rate for this vector then use that result */
1575 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? 1577 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
1576 q_vector->tx_itr - 1 : ret_itr); 1578 q_vector->tx_itr - 1 : ret_itr);
1577 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1579 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1578 r_idx + 1); 1580 r_idx + 1);
1579 } 1581 }
1580 1582
1581 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1583 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1582 for (i = 0; i < q_vector->rxr_count; i++) { 1584 for (i = 0; i < q_vector->rxr_count; i++) {
1583 rx_ring = adapter->rx_ring[r_idx]; 1585 rx_ring = adapter->rx_ring[r_idx];
1584 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1586 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1585 q_vector->rx_itr, 1587 q_vector->rx_itr,
1586 rx_ring->total_packets, 1588 rx_ring->total_packets,
1587 rx_ring->total_bytes); 1589 rx_ring->total_bytes);
1588 /* if the result for this queue would decrease interrupt 1590 /* if the result for this queue would decrease interrupt
1589 * rate for this vector then use that result */ 1591 * rate for this vector then use that result */
1590 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? 1592 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
1591 q_vector->rx_itr - 1 : ret_itr); 1593 q_vector->rx_itr - 1 : ret_itr);
1592 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1594 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1593 r_idx + 1); 1595 r_idx + 1);
1594 } 1596 }
1595 1597
1596 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 1598 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
@@ -1627,39 +1629,40 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1627static void ixgbe_check_overtemp_task(struct work_struct *work) 1629static void ixgbe_check_overtemp_task(struct work_struct *work)
1628{ 1630{
1629 struct ixgbe_adapter *adapter = container_of(work, 1631 struct ixgbe_adapter *adapter = container_of(work,
1630 struct ixgbe_adapter, 1632 struct ixgbe_adapter,
1631 check_overtemp_task); 1633 check_overtemp_task);
1632 struct ixgbe_hw *hw = &adapter->hw; 1634 struct ixgbe_hw *hw = &adapter->hw;
1633 u32 eicr = adapter->interrupt_event; 1635 u32 eicr = adapter->interrupt_event;
1634 1636
1635 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 1637 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
1636 switch (hw->device_id) { 1638 return;
1637 case IXGBE_DEV_ID_82599_T3_LOM: { 1639
1638 u32 autoneg; 1640 switch (hw->device_id) {
1639 bool link_up = false; 1641 case IXGBE_DEV_ID_82599_T3_LOM: {
1642 u32 autoneg;
1643 bool link_up = false;
1640 1644
1641 if (hw->mac.ops.check_link) 1645 if (hw->mac.ops.check_link)
1642 hw->mac.ops.check_link(hw, &autoneg, &link_up, false); 1646 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1643 1647
1644 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || 1648 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
1645 (eicr & IXGBE_EICR_LSC)) 1649 (eicr & IXGBE_EICR_LSC))
1646 /* Check if this is due to overtemp */ 1650 /* Check if this is due to overtemp */
1647 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) 1651 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
1648 break; 1652 break;
1649 } 1653 return;
1654 }
1655 default:
1656 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1650 return; 1657 return;
1651 default: 1658 break;
1652 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1653 return;
1654 break;
1655 }
1656 e_crit(drv, "Network adapter has been stopped because it has "
1657 "over heated. Restart the computer. If the problem "
1658 "persists, power off the system and replace the "
1659 "adapter\n");
1660 /* write to clear the interrupt */
1661 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1662 } 1659 }
1660 e_crit(drv,
1661 "Network adapter has been stopped because it has over heated. "
1662 "Restart the computer. If the problem persists, "
1663 "power off the system and replace the adapter\n");
1664 /* write to clear the interrupt */
1665 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1663} 1666}
1664 1667
1665static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1668static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1746,9 +1749,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1746 netif_tx_stop_all_queues(netdev); 1749 netif_tx_stop_all_queues(netdev);
1747 for (i = 0; i < adapter->num_tx_queues; i++) { 1750 for (i = 0; i < adapter->num_tx_queues; i++) {
1748 struct ixgbe_ring *tx_ring = 1751 struct ixgbe_ring *tx_ring =
1749 adapter->tx_ring[i]; 1752 adapter->tx_ring[i];
1750 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1753 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1751 &tx_ring->reinit_state)) 1754 &tx_ring->reinit_state))
1752 schedule_work(&adapter->fdir_reinit_task); 1755 schedule_work(&adapter->fdir_reinit_task);
1753 } 1756 }
1754 } 1757 }
@@ -1777,7 +1780,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1777} 1780}
1778 1781
1779static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, 1782static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1780 u64 qmask) 1783 u64 qmask)
1781{ 1784{
1782 u32 mask; 1785 u32 mask;
1783 1786
@@ -1809,7 +1812,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1809 tx_ring->total_bytes = 0; 1812 tx_ring->total_bytes = 0;
1810 tx_ring->total_packets = 0; 1813 tx_ring->total_packets = 0;
1811 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1814 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1812 r_idx + 1); 1815 r_idx + 1);
1813 } 1816 }
1814 1817
1815 /* EIAM disabled interrupts (on this vector) for us */ 1818 /* EIAM disabled interrupts (on this vector) for us */
@@ -1837,7 +1840,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1837 rx_ring->total_bytes = 0; 1840 rx_ring->total_bytes = 0;
1838 rx_ring->total_packets = 0; 1841 rx_ring->total_packets = 0;
1839 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1842 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1840 r_idx + 1); 1843 r_idx + 1);
1841 } 1844 }
1842 1845
1843 if (!q_vector->rxr_count) 1846 if (!q_vector->rxr_count)
@@ -1867,7 +1870,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1867 ring->total_bytes = 0; 1870 ring->total_bytes = 0;
1868 ring->total_packets = 0; 1871 ring->total_packets = 0;
1869 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1872 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1870 r_idx + 1); 1873 r_idx + 1);
1871 } 1874 }
1872 1875
1873 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1876 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1876,7 +1879,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1876 ring->total_bytes = 0; 1879 ring->total_bytes = 0;
1877 ring->total_packets = 0; 1880 ring->total_packets = 0;
1878 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1881 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1879 r_idx + 1); 1882 r_idx + 1);
1880 } 1883 }
1881 1884
1882 /* EIAM disabled interrupts (on this vector) for us */ 1885 /* EIAM disabled interrupts (on this vector) for us */
@@ -1896,7 +1899,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1896static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 1899static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1897{ 1900{
1898 struct ixgbe_q_vector *q_vector = 1901 struct ixgbe_q_vector *q_vector =
1899 container_of(napi, struct ixgbe_q_vector, napi); 1902 container_of(napi, struct ixgbe_q_vector, napi);
1900 struct ixgbe_adapter *adapter = q_vector->adapter; 1903 struct ixgbe_adapter *adapter = q_vector->adapter;
1901 struct ixgbe_ring *rx_ring = NULL; 1904 struct ixgbe_ring *rx_ring = NULL;
1902 int work_done = 0; 1905 int work_done = 0;
@@ -1918,7 +1921,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1918 ixgbe_set_itr_msix(q_vector); 1921 ixgbe_set_itr_msix(q_vector);
1919 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1922 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1920 ixgbe_irq_enable_queues(adapter, 1923 ixgbe_irq_enable_queues(adapter,
1921 ((u64)1 << q_vector->v_idx)); 1924 ((u64)1 << q_vector->v_idx));
1922 } 1925 }
1923 1926
1924 return work_done; 1927 return work_done;
@@ -1935,7 +1938,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1935static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) 1938static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1936{ 1939{
1937 struct ixgbe_q_vector *q_vector = 1940 struct ixgbe_q_vector *q_vector =
1938 container_of(napi, struct ixgbe_q_vector, napi); 1941 container_of(napi, struct ixgbe_q_vector, napi);
1939 struct ixgbe_adapter *adapter = q_vector->adapter; 1942 struct ixgbe_adapter *adapter = q_vector->adapter;
1940 struct ixgbe_ring *ring = NULL; 1943 struct ixgbe_ring *ring = NULL;
1941 int work_done = 0, i; 1944 int work_done = 0, i;
@@ -1951,7 +1954,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1951#endif 1954#endif
1952 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); 1955 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1953 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 1956 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1954 r_idx + 1); 1957 r_idx + 1);
1955 } 1958 }
1956 1959
1957 /* attempt to distribute budget to each queue fairly, but don't allow 1960 /* attempt to distribute budget to each queue fairly, but don't allow
@@ -1967,7 +1970,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1967#endif 1970#endif
1968 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); 1971 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1969 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1972 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1970 r_idx + 1); 1973 r_idx + 1);
1971 } 1974 }
1972 1975
1973 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1976 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
@@ -1979,7 +1982,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1979 ixgbe_set_itr_msix(q_vector); 1982 ixgbe_set_itr_msix(q_vector);
1980 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1983 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1981 ixgbe_irq_enable_queues(adapter, 1984 ixgbe_irq_enable_queues(adapter,
1982 ((u64)1 << q_vector->v_idx)); 1985 ((u64)1 << q_vector->v_idx));
1983 return 0; 1986 return 0;
1984 } 1987 }
1985 1988
@@ -1997,7 +2000,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1997static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) 2000static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1998{ 2001{
1999 struct ixgbe_q_vector *q_vector = 2002 struct ixgbe_q_vector *q_vector =
2000 container_of(napi, struct ixgbe_q_vector, napi); 2003 container_of(napi, struct ixgbe_q_vector, napi);
2001 struct ixgbe_adapter *adapter = q_vector->adapter; 2004 struct ixgbe_adapter *adapter = q_vector->adapter;
2002 struct ixgbe_ring *tx_ring = NULL; 2005 struct ixgbe_ring *tx_ring = NULL;
2003 int work_done = 0; 2006 int work_done = 0;
@@ -2019,14 +2022,15 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2019 if (adapter->tx_itr_setting & 1) 2022 if (adapter->tx_itr_setting & 1)
2020 ixgbe_set_itr_msix(q_vector); 2023 ixgbe_set_itr_msix(q_vector);
2021 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2024 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2022 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); 2025 ixgbe_irq_enable_queues(adapter,
2026 ((u64)1 << q_vector->v_idx));
2023 } 2027 }
2024 2028
2025 return work_done; 2029 return work_done;
2026} 2030}
2027 2031
2028static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 2032static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2029 int r_idx) 2033 int r_idx)
2030{ 2034{
2031 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2035 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2032 2036
@@ -2035,7 +2039,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2035} 2039}
2036 2040
2037static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 2041static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2038 int t_idx) 2042 int t_idx)
2039{ 2043{
2040 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2044 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2041 2045
@@ -2055,7 +2059,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2055 * mapping configurations in here. 2059 * mapping configurations in here.
2056 **/ 2060 **/
2057static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 2061static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2058 int vectors) 2062 int vectors)
2059{ 2063{
2060 int v_start = 0; 2064 int v_start = 0;
2061 int rxr_idx = 0, txr_idx = 0; 2065 int rxr_idx = 0, txr_idx = 0;
@@ -2122,7 +2126,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2122 struct net_device *netdev = adapter->netdev; 2126 struct net_device *netdev = adapter->netdev;
2123 irqreturn_t (*handler)(int, void *); 2127 irqreturn_t (*handler)(int, void *);
2124 int i, vector, q_vectors, err; 2128 int i, vector, q_vectors, err;
2125 int ri=0, ti=0; 2129 int ri = 0, ti = 0;
2126 2130
2127 /* Decrement for Other and TCP Timer vectors */ 2131 /* Decrement for Other and TCP Timer vectors */
2128 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2132 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -2133,26 +2137,24 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2133 goto out; 2137 goto out;
2134 2138
2135#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 2139#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
2136 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 2140 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
2137 &ixgbe_msix_clean_many) 2141 &ixgbe_msix_clean_many)
2138 for (vector = 0; vector < q_vectors; vector++) { 2142 for (vector = 0; vector < q_vectors; vector++) {
2139 handler = SET_HANDLER(adapter->q_vector[vector]); 2143 handler = SET_HANDLER(adapter->q_vector[vector]);
2140 2144
2141 if(handler == &ixgbe_msix_clean_rx) { 2145 if (handler == &ixgbe_msix_clean_rx) {
2142 sprintf(adapter->name[vector], "%s-%s-%d", 2146 sprintf(adapter->name[vector], "%s-%s-%d",
2143 netdev->name, "rx", ri++); 2147 netdev->name, "rx", ri++);
2144 } 2148 } else if (handler == &ixgbe_msix_clean_tx) {
2145 else if(handler == &ixgbe_msix_clean_tx) {
2146 sprintf(adapter->name[vector], "%s-%s-%d", 2149 sprintf(adapter->name[vector], "%s-%s-%d",
2147 netdev->name, "tx", ti++); 2150 netdev->name, "tx", ti++);
2148 } 2151 } else
2149 else
2150 sprintf(adapter->name[vector], "%s-%s-%d", 2152 sprintf(adapter->name[vector], "%s-%s-%d",
2151 netdev->name, "TxRx", vector); 2153 netdev->name, "TxRx", vector);
2152 2154
2153 err = request_irq(adapter->msix_entries[vector].vector, 2155 err = request_irq(adapter->msix_entries[vector].vector,
2154 handler, 0, adapter->name[vector], 2156 handler, 0, adapter->name[vector],
2155 adapter->q_vector[vector]); 2157 adapter->q_vector[vector]);
2156 if (err) { 2158 if (err) {
2157 e_err(probe, "request_irq failed for MSIX interrupt " 2159 e_err(probe, "request_irq failed for MSIX interrupt "
2158 "Error: %d\n", err); 2160 "Error: %d\n", err);
@@ -2162,7 +2164,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2162 2164
2163 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 2165 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
2164 err = request_irq(adapter->msix_entries[vector].vector, 2166 err = request_irq(adapter->msix_entries[vector].vector,
2165 ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 2167 ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
2166 if (err) { 2168 if (err) {
2167 e_err(probe, "request_irq for msix_lsc failed: %d\n", err); 2169 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2168 goto free_queue_irqs; 2170 goto free_queue_irqs;
@@ -2173,7 +2175,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2173free_queue_irqs: 2175free_queue_irqs:
2174 for (i = vector - 1; i >= 0; i--) 2176 for (i = vector - 1; i >= 0; i--)
2175 free_irq(adapter->msix_entries[--vector].vector, 2177 free_irq(adapter->msix_entries[--vector].vector,
2176 adapter->q_vector[i]); 2178 adapter->q_vector[i]);
2177 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2179 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2178 pci_disable_msix(adapter->pdev); 2180 pci_disable_msix(adapter->pdev);
2179 kfree(adapter->msix_entries); 2181 kfree(adapter->msix_entries);
@@ -2191,13 +2193,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2191 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 2193 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
2192 2194
2193 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 2195 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
2194 q_vector->tx_itr, 2196 q_vector->tx_itr,
2195 tx_ring->total_packets, 2197 tx_ring->total_packets,
2196 tx_ring->total_bytes); 2198 tx_ring->total_bytes);
2197 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, 2199 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
2198 q_vector->rx_itr, 2200 q_vector->rx_itr,
2199 rx_ring->total_packets, 2201 rx_ring->total_packets,
2200 rx_ring->total_bytes); 2202 rx_ring->total_bytes);
2201 2203
2202 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 2204 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
2203 2205
@@ -2343,10 +2345,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2343 err = ixgbe_request_msix_irqs(adapter); 2345 err = ixgbe_request_msix_irqs(adapter);
2344 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 2346 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2345 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, 2347 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2346 netdev->name, netdev); 2348 netdev->name, netdev);
2347 } else { 2349 } else {
2348 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, 2350 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2349 netdev->name, netdev); 2351 netdev->name, netdev);
2350 } 2352 }
2351 2353
2352 if (err) 2354 if (err)
@@ -2370,7 +2372,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2370 i--; 2372 i--;
2371 for (; i >= 0; i--) { 2373 for (; i >= 0; i--) {
2372 free_irq(adapter->msix_entries[i].vector, 2374 free_irq(adapter->msix_entries[i].vector,
2373 adapter->q_vector[i]); 2375 adapter->q_vector[i]);
2374 } 2376 }
2375 2377
2376 ixgbe_reset_q_vectors(adapter); 2378 ixgbe_reset_q_vectors(adapter);
@@ -2413,7 +2415,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2413 struct ixgbe_hw *hw = &adapter->hw; 2415 struct ixgbe_hw *hw = &adapter->hw;
2414 2416
2415 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 2417 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
2416 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); 2418 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
2417 2419
2418 ixgbe_set_ivar(adapter, 0, 0, 0); 2420 ixgbe_set_ivar(adapter, 0, 0, 0);
2419 ixgbe_set_ivar(adapter, 1, 0, 0); 2421 ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -2425,95 +2427,140 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2425} 2427}
2426 2428
2427/** 2429/**
2428 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset 2430 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2429 * @adapter: board private structure 2431 * @adapter: board private structure
2432 * @ring: structure containing ring specific data
2430 * 2433 *
2431 * Configure the Tx unit of the MAC after a reset. 2434 * Configure the Tx descriptor ring after a reset.
2432 **/ 2435 **/
2433static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) 2436void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2437 struct ixgbe_ring *ring)
2434{ 2438{
2435 u64 tdba;
2436 struct ixgbe_hw *hw = &adapter->hw; 2439 struct ixgbe_hw *hw = &adapter->hw;
2437 u32 i, j, tdlen, txctrl; 2440 u64 tdba = ring->dma;
2441 int wait_loop = 10;
2442 u32 txdctl;
2443 u16 reg_idx = ring->reg_idx;
2438 2444
2439 /* Setup the HW Tx Head and Tail descriptor pointers */ 2445 /* disable queue to avoid issues while updating state */
2440 for (i = 0; i < adapter->num_tx_queues; i++) { 2446 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2441 struct ixgbe_ring *ring = adapter->tx_ring[i]; 2447 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
2442 j = ring->reg_idx; 2448 txdctl & ~IXGBE_TXDCTL_ENABLE);
2443 tdba = ring->dma; 2449 IXGBE_WRITE_FLUSH(hw);
2444 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 2450
2445 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 2451 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2446 (tdba & DMA_BIT_MASK(32))); 2452 (tdba & DMA_BIT_MASK(32)));
2447 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 2453 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2448 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); 2454 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2449 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 2455 ring->count * sizeof(union ixgbe_adv_tx_desc));
2450 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 2456 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2451 adapter->tx_ring[i]->head = IXGBE_TDH(j); 2457 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2452 adapter->tx_ring[i]->tail = IXGBE_TDT(j); 2458 ring->head = IXGBE_TDH(reg_idx);
2453 /* 2459 ring->tail = IXGBE_TDT(reg_idx);
2454 * Disable Tx Head Writeback RO bit, since this hoses 2460
2455 * bookkeeping if things aren't delivered in order. 2461 /* configure fetching thresholds */
2456 */ 2462 if (adapter->rx_itr_setting == 0) {
2457 switch (hw->mac.type) { 2463 /* cannot set wthresh when itr==0 */
2458 case ixgbe_mac_82598EB: 2464 txdctl &= ~0x007F0000;
2459 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); 2465 } else {
2460 break; 2466 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2461 case ixgbe_mac_82599EB: 2467 txdctl |= (8 << 16);
2462 default: 2468 }
2463 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); 2469 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2464 break; 2470 /* PThresh workaround for Tx hang with DFP enabled. */
2465 } 2471 txdctl |= 32;
2466 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2467 switch (hw->mac.type) {
2468 case ixgbe_mac_82598EB:
2469 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
2470 break;
2471 case ixgbe_mac_82599EB:
2472 default:
2473 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
2474 break;
2475 }
2476 } 2472 }
2477 2473
2478 if (hw->mac.type == ixgbe_mac_82599EB) { 2474 /* reinitialize flowdirector state */
2479 u32 rttdcs; 2475 set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
2480 u32 mask;
2481 2476
2482 /* disable the arbiter while setting MTQC */ 2477 /* enable queue */
2483 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2478 txdctl |= IXGBE_TXDCTL_ENABLE;
2484 rttdcs |= IXGBE_RTTDCS_ARBDIS; 2479 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2485 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2486 2480
2487 /* set transmit pool layout */ 2481 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2488 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); 2482 if (hw->mac.type == ixgbe_mac_82598EB &&
2489 switch (adapter->flags & mask) { 2483 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2484 return;
2490 2485
2491 case (IXGBE_FLAG_SRIOV_ENABLED): 2486 /* poll to verify queue is enabled */
2492 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 2487 do {
2493 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); 2488 msleep(1);
2494 break; 2489 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2490 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2491 if (!wait_loop)
2492 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
2493}
2495 2494
2496 case (IXGBE_FLAG_DCB_ENABLED): 2495static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2497 /* We enable 8 traffic classes, DCB only */ 2496{
2498 IXGBE_WRITE_REG(hw, IXGBE_MTQC, 2497 struct ixgbe_hw *hw = &adapter->hw;
2499 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); 2498 u32 rttdcs;
2500 break; 2499 u32 mask;
2501 2500
2502 default: 2501 if (hw->mac.type == ixgbe_mac_82598EB)
2503 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2502 return;
2504 break; 2503
2505 } 2504 /* disable the arbiter while setting MTQC */
2505 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2506 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2507 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2508
2509 /* set transmit pool layout */
2510 mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
2511 switch (adapter->flags & mask) {
2512
2513 case (IXGBE_FLAG_SRIOV_ENABLED):
2514 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2515 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2516 break;
2517
2518 case (IXGBE_FLAG_DCB_ENABLED):
2519 /* We enable 8 traffic classes, DCB only */
2520 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2521 (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
2522 break;
2523
2524 default:
2525 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2526 break;
2527 }
2528
2529 /* re-enable the arbiter */
2530 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2531 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2532}
2533
2534/**
2535 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
2536 * @adapter: board private structure
2537 *
2538 * Configure the Tx unit of the MAC after a reset.
2539 **/
2540static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2541{
2542 struct ixgbe_hw *hw = &adapter->hw;
2543 u32 dmatxctl;
2544 u32 i;
2545
2546 ixgbe_setup_mtqc(adapter);
2506 2547
2507 /* re-eable the arbiter */ 2548 if (hw->mac.type != ixgbe_mac_82598EB) {
2508 rttdcs &= ~IXGBE_RTTDCS_ARBDIS; 2549 /* DMATXCTL.EN must be before Tx queues are enabled */
2509 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); 2550 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2551 dmatxctl |= IXGBE_DMATXCTL_TE;
2552 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2510 } 2553 }
2554
2555 /* Setup the HW Tx Head and Tail descriptor pointers */
2556 for (i = 0; i < adapter->num_tx_queues; i++)
2557 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
2511} 2558}
2512 2559
2513#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 2560#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2514 2561
2515static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, 2562static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2516 struct ixgbe_ring *rx_ring) 2563 struct ixgbe_ring *rx_ring)
2517{ 2564{
2518 u32 srrctl; 2565 u32 srrctl;
2519 int index; 2566 int index;
@@ -2529,6 +2576,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2529 2576
2530 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 2577 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2531 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 2578 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2579 if (adapter->num_vfs)
2580 srrctl |= IXGBE_SRRCTL_DROP_EN;
2532 2581
2533 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 2582 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2534 IXGBE_SRRCTL_BSIZEHDR_MASK; 2583 IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -2549,20 +2598,46 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2549 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); 2598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
2550} 2599}
2551 2600
2552static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 2601static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2553{ 2602{
2554 u32 mrqc = 0; 2603 struct ixgbe_hw *hw = &adapter->hw;
2604 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2605 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2606 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2607 u32 mrqc = 0, reta = 0;
2608 u32 rxcsum;
2609 int i, j;
2555 int mask; 2610 int mask;
2556 2611
2557 if (!(adapter->hw.mac.type == ixgbe_mac_82599EB)) 2612 /* Fill out hash function seeds */
2558 return mrqc; 2613 for (i = 0; i < 10; i++)
2614 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2615
2616 /* Fill out redirection table */
2617 for (i = 0, j = 0; i < 128; i++, j++) {
2618 if (j == adapter->ring_feature[RING_F_RSS].indices)
2619 j = 0;
2620 /* reta = 4-byte sliding window of
2621 * 0x00..(indices-1)(indices-1)00..etc. */
2622 reta = (reta << 8) | (j * 0x11);
2623 if ((i & 3) == 3)
2624 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2625 }
2626
2627 /* Disable indicating checksum in descriptor, enables RSS hash */
2628 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2629 rxcsum |= IXGBE_RXCSUM_PCSD;
2630 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2559 2631
2560 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED 2632 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2633 mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
2634 else
2635 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
2561#ifdef CONFIG_IXGBE_DCB 2636#ifdef CONFIG_IXGBE_DCB
2562 | IXGBE_FLAG_DCB_ENABLED 2637 | IXGBE_FLAG_DCB_ENABLED
2563#endif 2638#endif
2564 | IXGBE_FLAG_SRIOV_ENABLED 2639 | IXGBE_FLAG_SRIOV_ENABLED
2565 ); 2640 );
2566 2641
2567 switch (mask) { 2642 switch (mask) {
2568 case (IXGBE_FLAG_RSS_ENABLED): 2643 case (IXGBE_FLAG_RSS_ENABLED):
@@ -2580,7 +2655,13 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2580 break; 2655 break;
2581 } 2656 }
2582 2657
2583 return mrqc; 2658 /* Perform hash on these packet types */
2659 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2660 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2661 | IXGBE_MRQC_RSS_FIELD_IPV6
2662 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2663
2664 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2584} 2665}
2585 2666
2586/** 2667/**
@@ -2588,25 +2669,26 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2588 * @adapter: address of board private structure 2669 * @adapter: address of board private structure
2589 * @index: index of ring to set 2670 * @index: index of ring to set
2590 **/ 2671 **/
2591static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index) 2672static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2673 struct ixgbe_ring *ring)
2592{ 2674{
2593 struct ixgbe_ring *rx_ring;
2594 struct ixgbe_hw *hw = &adapter->hw; 2675 struct ixgbe_hw *hw = &adapter->hw;
2595 int j;
2596 u32 rscctrl; 2676 u32 rscctrl;
2597 int rx_buf_len; 2677 int rx_buf_len;
2678 u16 reg_idx = ring->reg_idx;
2598 2679
2599 rx_ring = adapter->rx_ring[index]; 2680 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
2600 j = rx_ring->reg_idx; 2681 return;
2601 rx_buf_len = rx_ring->rx_buf_len; 2682
2602 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); 2683 rx_buf_len = ring->rx_buf_len;
2684 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2603 rscctrl |= IXGBE_RSCCTL_RSCEN; 2685 rscctrl |= IXGBE_RSCCTL_RSCEN;
2604 /* 2686 /*
2605 * we must limit the number of descriptors so that the 2687 * we must limit the number of descriptors so that the
2606 * total size of max desc * buf_len is not greater 2688 * total size of max desc * buf_len is not greater
2607 * than 65535 2689 * than 65535
2608 */ 2690 */
2609 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2691 if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2610#if (MAX_SKB_FRAGS > 16) 2692#if (MAX_SKB_FRAGS > 16)
2611 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2693 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2612#elif (MAX_SKB_FRAGS > 8) 2694#elif (MAX_SKB_FRAGS > 8)
@@ -2624,31 +2706,181 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
2624 else 2706 else
2625 rscctrl |= IXGBE_RSCCTL_MAXDESC_4; 2707 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2626 } 2708 }
2627 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl); 2709 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2628} 2710}
2629 2711
2630/** 2712/**
2631 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset 2713 * ixgbe_set_uta - Set unicast filter table address
2632 * @adapter: board private structure 2714 * @adapter: board private structure
2633 * 2715 *
2634 * Configure the Rx unit of the MAC after a reset. 2716 * The unicast table address is a register array of 32-bit registers.
2717 * The table is meant to be used in a way similar to how the MTA is used
2718 * however due to certain limitations in the hardware it is necessary to
2719 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2720 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2635 **/ 2721 **/
2636static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) 2722static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2723{
2724 struct ixgbe_hw *hw = &adapter->hw;
2725 int i;
2726
2727 /* The UTA table only exists on 82599 hardware and newer */
2728 if (hw->mac.type < ixgbe_mac_82599EB)
2729 return;
2730
2731 /* we only need to do this if VMDq is enabled */
2732 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2733 return;
2734
2735 for (i = 0; i < 128; i++)
2736 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2737}
2738
2739#define IXGBE_MAX_RX_DESC_POLL 10
2740static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2741 struct ixgbe_ring *ring)
2742{
2743 struct ixgbe_hw *hw = &adapter->hw;
2744 int reg_idx = ring->reg_idx;
2745 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2746 u32 rxdctl;
2747
2748 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2749 if (hw->mac.type == ixgbe_mac_82598EB &&
2750 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2751 return;
2752
2753 do {
2754 msleep(1);
2755 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2756 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
2757
2758 if (!wait_loop) {
2759 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
2760 "the polling period\n", reg_idx);
2761 }
2762}
2763
2764void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2765 struct ixgbe_ring *ring)
2766{
2767 struct ixgbe_hw *hw = &adapter->hw;
2768 u64 rdba = ring->dma;
2769 u32 rxdctl;
2770 u16 reg_idx = ring->reg_idx;
2771
2772 /* disable queue to avoid issues while updating state */
2773 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2774 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
2775 rxdctl & ~IXGBE_RXDCTL_ENABLE);
2776 IXGBE_WRITE_FLUSH(hw);
2777
2778 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
2779 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
2780 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
2781 ring->count * sizeof(union ixgbe_adv_rx_desc));
2782 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
2783 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
2784 ring->head = IXGBE_RDH(reg_idx);
2785 ring->tail = IXGBE_RDT(reg_idx);
2786
2787 ixgbe_configure_srrctl(adapter, ring);
2788 ixgbe_configure_rscctl(adapter, ring);
2789
2790 if (hw->mac.type == ixgbe_mac_82598EB) {
2791 /*
2792 * enable cache line friendly hardware writes:
2793 * PTHRESH=32 descriptors (half the internal cache),
2794 * this also removes ugly rx_no_buffer_count increment
2795 * HTHRESH=4 descriptors (to minimize latency on fetch)
2796 * WTHRESH=8 burst writeback up to two cache lines
2797 */
2798 rxdctl &= ~0x3FFFFF;
2799 rxdctl |= 0x080420;
2800 }
2801
2802 /* enable receive descriptor ring */
2803 rxdctl |= IXGBE_RXDCTL_ENABLE;
2804 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2805
2806 ixgbe_rx_desc_queue_enable(adapter, ring);
2807 ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
2808}
2809
2810static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
2811{
2812 struct ixgbe_hw *hw = &adapter->hw;
2813 int p;
2814
2815 /* PSRTYPE must be initialized in non 82598 adapters */
2816 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2817 IXGBE_PSRTYPE_UDPHDR |
2818 IXGBE_PSRTYPE_IPV4HDR |
2819 IXGBE_PSRTYPE_L2HDR |
2820 IXGBE_PSRTYPE_IPV6HDR;
2821
2822 if (hw->mac.type == ixgbe_mac_82598EB)
2823 return;
2824
2825 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
2826 psrtype |= (adapter->num_rx_queues_per_pool << 29);
2827
2828 for (p = 0; p < adapter->num_rx_pools; p++)
2829 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
2830 psrtype);
2831}
2832
2833static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
2834{
2835 struct ixgbe_hw *hw = &adapter->hw;
2836 u32 gcr_ext;
2837 u32 vt_reg_bits;
2838 u32 reg_offset, vf_shift;
2839 u32 vmdctl;
2840
2841 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2842 return;
2843
2844 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2845 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
2846 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
2847 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2848
2849 vf_shift = adapter->num_vfs % 32;
2850 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
2851
2852 /* Enable only the PF's pool for Tx/Rx */
2853 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2854 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
2855 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2856 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
2857 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2858
2859 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
2860 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2861
2862 /*
2863 * Set up VF register offsets for selected VT Mode,
2864 * i.e. 32 or 64 VFs for SR-IOV
2865 */
2866 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2867 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
2868 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
2869 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
2870
2871 /* enable Tx loopback for VF/PF communication */
2872 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2873}
2874
2875static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2637{ 2876{
2638 u64 rdba;
2639 struct ixgbe_hw *hw = &adapter->hw; 2877 struct ixgbe_hw *hw = &adapter->hw;
2640 struct ixgbe_ring *rx_ring;
2641 struct net_device *netdev = adapter->netdev; 2878 struct net_device *netdev = adapter->netdev;
2642 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2879 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2643 int i, j;
2644 u32 rdlen, rxctrl, rxcsum;
2645 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2646 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2647 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2648 u32 fctrl, hlreg0;
2649 u32 reta = 0, mrqc = 0;
2650 u32 rdrxctl;
2651 int rx_buf_len; 2880 int rx_buf_len;
2881 struct ixgbe_ring *rx_ring;
2882 int i;
2883 u32 mhadd, hlreg0;
2652 2884
2653 /* Decide whether to use packet split mode or not */ 2885 /* Decide whether to use packet split mode or not */
2654 /* Do not use packet split if we're in SR-IOV Mode */ 2886 /* Do not use packet split if we're in SR-IOV Mode */
@@ -2658,62 +2890,40 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2658 /* Set the RX buffer length according to the mode */ 2890 /* Set the RX buffer length according to the mode */
2659 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 2891 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
2660 rx_buf_len = IXGBE_RX_HDR_SIZE; 2892 rx_buf_len = IXGBE_RX_HDR_SIZE;
2661 if (hw->mac.type == ixgbe_mac_82599EB) {
2662 /* PSRTYPE must be initialized in 82599 */
2663 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2664 IXGBE_PSRTYPE_UDPHDR |
2665 IXGBE_PSRTYPE_IPV4HDR |
2666 IXGBE_PSRTYPE_IPV6HDR |
2667 IXGBE_PSRTYPE_L2HDR;
2668 IXGBE_WRITE_REG(hw,
2669 IXGBE_PSRTYPE(adapter->num_vfs),
2670 psrtype);
2671 }
2672 } else { 2893 } else {
2673 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 2894 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
2674 (netdev->mtu <= ETH_DATA_LEN)) 2895 (netdev->mtu <= ETH_DATA_LEN))
2675 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 2896 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
2676 else 2897 else
2677 rx_buf_len = ALIGN(max_frame, 1024); 2898 rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
2678 } 2899 }
2679 2900
2680 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 2901#ifdef IXGBE_FCOE
2681 fctrl |= IXGBE_FCTRL_BAM; 2902 /* adjust max frame to be able to do baby jumbo for FCoE */
2682 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ 2903 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
2683 fctrl |= IXGBE_FCTRL_PMCF; 2904 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2684 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 2905 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
2906
2907#endif /* IXGBE_FCOE */
2908 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2909 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2910 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2911 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2912
2913 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2914 }
2685 2915
2686 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2916 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2687 if (adapter->netdev->mtu <= ETH_DATA_LEN) 2917 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
2688 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; 2918 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2689 else
2690 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2691#ifdef IXGBE_FCOE
2692 if (netdev->features & NETIF_F_FCOE_MTU)
2693 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2694#endif
2695 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 2919 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2696 2920
2697 rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
2698 /* disable receives while setting up the descriptors */
2699 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2700 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2701
2702 /* 2921 /*
2703 * Setup the HW Rx Head and Tail Descriptor Pointers and 2922 * Setup the HW Rx Head and Tail Descriptor Pointers and
2704 * the Base and Length of the Rx Descriptor Ring 2923 * the Base and Length of the Rx Descriptor Ring
2705 */ 2924 */
2706 for (i = 0; i < adapter->num_rx_queues; i++) { 2925 for (i = 0; i < adapter->num_rx_queues; i++) {
2707 rx_ring = adapter->rx_ring[i]; 2926 rx_ring = adapter->rx_ring[i];
2708 rdba = rx_ring->dma;
2709 j = rx_ring->reg_idx;
2710 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
2711 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
2712 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
2713 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
2714 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
2715 rx_ring->head = IXGBE_RDH(j);
2716 rx_ring->tail = IXGBE_RDT(j);
2717 rx_ring->rx_buf_len = rx_buf_len; 2927 rx_ring->rx_buf_len = rx_buf_len;
2718 2928
2719 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) 2929 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
@@ -2729,15 +2939,21 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2729 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 2939 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2730 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) 2940 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2731 rx_ring->rx_buf_len = 2941 rx_ring->rx_buf_len =
2732 IXGBE_FCOE_JUMBO_FRAME_SIZE; 2942 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2733 } 2943 }
2734 } 2944 }
2735
2736#endif /* IXGBE_FCOE */ 2945#endif /* IXGBE_FCOE */
2737 ixgbe_configure_srrctl(adapter, rx_ring);
2738 } 2946 }
2739 2947
2740 if (hw->mac.type == ixgbe_mac_82598EB) { 2948}
2949
2950static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
2951{
2952 struct ixgbe_hw *hw = &adapter->hw;
2953 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2954
2955 switch (hw->mac.type) {
2956 case ixgbe_mac_82598EB:
2741 /* 2957 /*
2742 * For VMDq support of different descriptor types or 2958 * For VMDq support of different descriptor types or
2743 * buffer sizes through the use of multiple SRRCTL 2959 * buffer sizes through the use of multiple SRRCTL
@@ -2748,110 +2964,66 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2748 * effects of setting this bit are only that SRRCTL must be 2964 * effects of setting this bit are only that SRRCTL must be
2749 * fully programmed [0..15] 2965 * fully programmed [0..15]
2750 */ 2966 */
2751 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2752 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 2967 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2753 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 2968 break;
2969 case ixgbe_mac_82599EB:
2970 /* Disable RSC for ACK packets */
2971 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2972 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
2973 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2974 /* hardware requires some bits to be set by default */
2975 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
2976 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
2977 break;
2978 default:
2979 /* We should do nothing since we don't know this hardware */
2980 return;
2754 } 2981 }
2755 2982
2756 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 2983 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2757 u32 vt_reg_bits; 2984}
2758 u32 reg_offset, vf_shift;
2759 u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2760 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
2761 | IXGBE_VT_CTL_REPLEN;
2762 vt_reg_bits |= (adapter->num_vfs <<
2763 IXGBE_VT_CTL_POOL_SHIFT);
2764 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2765 IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
2766
2767 vf_shift = adapter->num_vfs % 32;
2768 reg_offset = adapter->num_vfs / 32;
2769 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
2770 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
2771 IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
2772 IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
2773 /* Enable only the PF's pool for Tx/Rx */
2774 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2775 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2776 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2777 ixgbe_set_vmolr(hw, adapter->num_vfs, true);
2778 }
2779
2780 /* Program MRQC for the distribution of queues */
2781 mrqc = ixgbe_setup_mrqc(adapter);
2782
2783 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2784 /* Fill out redirection table */
2785 for (i = 0, j = 0; i < 128; i++, j++) {
2786 if (j == adapter->ring_feature[RING_F_RSS].indices)
2787 j = 0;
2788 /* reta = 4-byte sliding window of
2789 * 0x00..(indices-1)(indices-1)00..etc. */
2790 reta = (reta << 8) | (j * 0x11);
2791 if ((i & 3) == 3)
2792 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2793 }
2794
2795 /* Fill out hash function seeds */
2796 for (i = 0; i < 10; i++)
2797 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2798
2799 if (hw->mac.type == ixgbe_mac_82598EB)
2800 mrqc |= IXGBE_MRQC_RSSEN;
2801 /* Perform hash on these packet types */
2802 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2803 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2804 | IXGBE_MRQC_RSS_FIELD_IPV6
2805 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2806 }
2807 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2808 2985
2809 if (adapter->num_vfs) { 2986/**
2810 u32 reg; 2987 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
2988 * @adapter: board private structure
2989 *
2990 * Configure the Rx unit of the MAC after a reset.
2991 **/
2992static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2993{
2994 struct ixgbe_hw *hw = &adapter->hw;
2995 int i;
2996 u32 rxctrl;
2811 2997
2812 /* Map PF MAC address in RAR Entry 0 to first pool 2998 /* disable receives while setting up the descriptors */
2813 * following VFs */ 2999 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2814 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); 3000 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2815 3001
2816 /* Set up VF register offsets for selected VT Mode, i.e. 3002 ixgbe_setup_psrtype(adapter);
2817 * 64 VFs for SR-IOV */ 3003 ixgbe_setup_rdrxctl(adapter);
2818 reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2819 reg |= IXGBE_GCR_EXT_SRIOV;
2820 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
2821 }
2822 3004
2823 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3005 /* Program registers for the distribution of queues */
3006 ixgbe_setup_mrqc(adapter);
2824 3007
2825 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || 3008 ixgbe_set_uta(adapter);
2826 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
2827 /* Disable indicating checksum in descriptor, enables
2828 * RSS hash */
2829 rxcsum |= IXGBE_RXCSUM_PCSD;
2830 }
2831 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
2832 /* Enable IPv4 payload checksum for UDP fragments
2833 * if PCSD is not set */
2834 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2835 }
2836 3009
2837 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3010 /* set_rx_buffer_len must be called before ring initialization */
3011 ixgbe_set_rx_buffer_len(adapter);
2838 3012
2839 if (hw->mac.type == ixgbe_mac_82599EB) { 3013 /*
2840 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 3014 * Setup the HW Rx Head and Tail Descriptor Pointers and
2841 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; 3015 * the Base and Length of the Rx Descriptor Ring
2842 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 3016 */
2843 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 3017 for (i = 0; i < adapter->num_rx_queues; i++)
2844 } 3018 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
2845 3019
2846 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 3020 /* disable drop enable for 82598 parts */
2847 /* Enable 82599 HW-RSC */ 3021 if (hw->mac.type == ixgbe_mac_82598EB)
2848 for (i = 0; i < adapter->num_rx_queues; i++) 3022 rxctrl |= IXGBE_RXCTRL_DMBYPS;
2849 ixgbe_configure_rscctl(adapter, i);
2850 3023
2851 /* Disable RSC for ACK packets */ 3024 /* enable all receives */
2852 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 3025 rxctrl |= IXGBE_RXCTRL_RXEN;
2853 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 3026 hw->mac.ops.enable_rx_dma(hw, rxctrl);
2854 }
2855} 3027}
2856 3028
2857static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 3029static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -2955,7 +3127,7 @@ static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
2955} 3127}
2956 3128
2957static void ixgbe_vlan_rx_register(struct net_device *netdev, 3129static void ixgbe_vlan_rx_register(struct net_device *netdev,
2958 struct vlan_group *grp) 3130 struct vlan_group *grp)
2959{ 3131{
2960 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3132 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2961 3133
@@ -3052,6 +3224,11 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3052 3224
3053 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3225 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3054 3226
3227 /* set all bits that we expect to always be set */
3228 fctrl |= IXGBE_FCTRL_BAM;
3229 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3230 fctrl |= IXGBE_FCTRL_PMCF;
3231
3055 /* clear the bits we are changing the status of */ 3232 /* clear the bits we are changing the status of */
3056 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 3233 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3057 3234
@@ -3157,6 +3334,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3157 u32 txdctl; 3334 u32 txdctl;
3158 int i, j; 3335 int i, j;
3159 3336
3337 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3338 if (hw->mac.type == ixgbe_mac_82598EB)
3339 netif_set_gso_max_size(adapter->netdev, 65536);
3340 return;
3341 }
3342
3343 if (hw->mac.type == ixgbe_mac_82598EB)
3344 netif_set_gso_max_size(adapter->netdev, 32768);
3345
3160 ixgbe_dcb_check_config(&adapter->dcb_cfg); 3346 ixgbe_dcb_check_config(&adapter->dcb_cfg);
3161 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); 3347 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
3162 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); 3348 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
@@ -3188,17 +3374,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3188 3374
3189 ixgbe_restore_vlan(adapter); 3375 ixgbe_restore_vlan(adapter);
3190#ifdef CONFIG_IXGBE_DCB 3376#ifdef CONFIG_IXGBE_DCB
3191 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3377 ixgbe_configure_dcb(adapter);
3192 if (hw->mac.type == ixgbe_mac_82598EB)
3193 netif_set_gso_max_size(netdev, 32768);
3194 else
3195 netif_set_gso_max_size(netdev, 65536);
3196 ixgbe_configure_dcb(adapter);
3197 } else {
3198 netif_set_gso_max_size(netdev, 65536);
3199 }
3200#else
3201 netif_set_gso_max_size(netdev, 65536);
3202#endif 3378#endif
3203 3379
3204#ifdef IXGBE_FCOE 3380#ifdef IXGBE_FCOE
@@ -3209,17 +3385,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3209 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { 3385 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3210 for (i = 0; i < adapter->num_tx_queues; i++) 3386 for (i = 0; i < adapter->num_tx_queues; i++)
3211 adapter->tx_ring[i]->atr_sample_rate = 3387 adapter->tx_ring[i]->atr_sample_rate =
3212 adapter->atr_sample_rate; 3388 adapter->atr_sample_rate;
3213 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); 3389 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
3214 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { 3390 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3215 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); 3391 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
3216 } 3392 }
3393 ixgbe_configure_virtualization(adapter);
3217 3394
3218 ixgbe_configure_tx(adapter); 3395 ixgbe_configure_tx(adapter);
3219 ixgbe_configure_rx(adapter); 3396 ixgbe_configure_rx(adapter);
3220 for (i = 0; i < adapter->num_rx_queues; i++)
3221 ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
3222 (adapter->rx_ring[i]->count - 1));
3223} 3397}
3224 3398
3225static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) 3399static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -3290,7 +3464,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
3290 goto link_cfg_out; 3464 goto link_cfg_out;
3291 3465
3292 if (hw->mac.ops.get_link_capabilities) 3466 if (hw->mac.ops.get_link_capabilities)
3293 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); 3467 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3468 &negotiation);
3294 if (ret) 3469 if (ret)
3295 goto link_cfg_out; 3470 goto link_cfg_out;
3296 3471
@@ -3300,62 +3475,15 @@ link_cfg_out:
3300 return ret; 3475 return ret;
3301} 3476}
3302 3477
3303#define IXGBE_MAX_RX_DESC_POLL 10 3478static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
3304static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3305 int rxr)
3306{
3307 int j = adapter->rx_ring[rxr]->reg_idx;
3308 int k;
3309
3310 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
3311 if (IXGBE_READ_REG(&adapter->hw,
3312 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
3313 break;
3314 else
3315 msleep(1);
3316 }
3317 if (k >= IXGBE_MAX_RX_DESC_POLL) {
3318 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3319 "the polling period\n", rxr);
3320 }
3321 ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
3322 (adapter->rx_ring[rxr]->count - 1));
3323}
3324
3325static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3326{ 3479{
3327 struct net_device *netdev = adapter->netdev;
3328 struct ixgbe_hw *hw = &adapter->hw; 3480 struct ixgbe_hw *hw = &adapter->hw;
3329 int i, j = 0; 3481 u32 gpie = 0;
3330 int num_rx_rings = adapter->num_rx_queues;
3331 int err;
3332 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3333 u32 txdctl, rxdctl, mhadd;
3334 u32 dmatxctl;
3335 u32 gpie;
3336 u32 ctrl_ext;
3337
3338 ixgbe_get_hw_control(adapter);
3339
3340 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
3341 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
3342 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3343 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
3344 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
3345 } else {
3346 /* MSI only */
3347 gpie = 0;
3348 }
3349 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3350 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3351 gpie |= IXGBE_GPIE_VTMODE_64;
3352 }
3353 /* XXX: to interrupt immediately for EICS writes, enable this */
3354 /* gpie |= IXGBE_GPIE_EIMEN; */
3355 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3356 }
3357 3482
3358 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3483 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3484 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3485 IXGBE_GPIE_OCD;
3486 gpie |= IXGBE_GPIE_EIAME;
3359 /* 3487 /*
3360 * use EIAM to auto-mask when MSI-X interrupt is asserted 3488 * use EIAM to auto-mask when MSI-X interrupt is asserted
3361 * this saves a register write for every interrupt 3489 * this saves a register write for every interrupt
@@ -3376,98 +3504,33 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3376 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3504 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3377 } 3505 }
3378 3506
3379 /* Enable Thermal over heat sensor interrupt */ 3507 /* XXX: to interrupt immediately for EICS writes, enable this */
3380 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { 3508 /* gpie |= IXGBE_GPIE_EIMEN; */
3381 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3509
3382 gpie |= IXGBE_SDP0_GPIEN; 3510 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3383 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3511 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3512 gpie |= IXGBE_GPIE_VTMODE_64;
3384 } 3513 }
3385 3514
3386 /* Enable fan failure interrupt if media type is copper */ 3515 /* Enable fan failure interrupt */
3387 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { 3516 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
3388 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3389 gpie |= IXGBE_SDP1_GPIEN; 3517 gpie |= IXGBE_SDP1_GPIEN;
3390 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3391 }
3392 3518
3393 if (hw->mac.type == ixgbe_mac_82599EB) { 3519 if (hw->mac.type == ixgbe_mac_82599EB)
3394 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3395 gpie |= IXGBE_SDP1_GPIEN; 3520 gpie |= IXGBE_SDP1_GPIEN;
3396 gpie |= IXGBE_SDP2_GPIEN; 3521 gpie |= IXGBE_SDP2_GPIEN;
3397 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3398 }
3399
3400#ifdef IXGBE_FCOE
3401 /* adjust max frame to be able to do baby jumbo for FCoE */
3402 if ((netdev->features & NETIF_F_FCOE_MTU) &&
3403 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3404 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3405 3522
3406#endif /* IXGBE_FCOE */ 3523 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3407 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3524}
3408 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3409 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3410 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3411
3412 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3413 }
3414
3415 for (i = 0; i < adapter->num_tx_queues; i++) {
3416 j = adapter->tx_ring[i]->reg_idx;
3417 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3418 if (adapter->rx_itr_setting == 0) {
3419 /* cannot set wthresh when itr==0 */
3420 txdctl &= ~0x007F0000;
3421 } else {
3422 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
3423 txdctl |= (8 << 16);
3424 }
3425 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3426 }
3427 3525
3428 if (hw->mac.type == ixgbe_mac_82599EB) { 3526static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3429 /* DMATXCTL.EN must be set after all Tx queue config is done */ 3527{
3430 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 3528 struct ixgbe_hw *hw = &adapter->hw;
3431 dmatxctl |= IXGBE_DMATXCTL_TE; 3529 int err;
3432 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); 3530 u32 ctrl_ext;
3433 }
3434 for (i = 0; i < adapter->num_tx_queues; i++) {
3435 j = adapter->tx_ring[i]->reg_idx;
3436 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3437 txdctl |= IXGBE_TXDCTL_ENABLE;
3438 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3439 if (hw->mac.type == ixgbe_mac_82599EB) {
3440 int wait_loop = 10;
3441 /* poll for Tx Enable ready */
3442 do {
3443 msleep(1);
3444 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3445 } while (--wait_loop &&
3446 !(txdctl & IXGBE_TXDCTL_ENABLE));
3447 if (!wait_loop)
3448 e_err(drv, "Could not enable Tx Queue %d\n", j);
3449 }
3450 }
3451 3531
3452 for (i = 0; i < num_rx_rings; i++) { 3532 ixgbe_get_hw_control(adapter);
3453 j = adapter->rx_ring[i]->reg_idx; 3533 ixgbe_setup_gpie(adapter);
3454 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3455 /* enable PTHRESH=32 descriptors (half the internal cache)
3456 * and HTHRESH=0 descriptors (to minimize latency on fetch),
3457 * this also removes a pesky rx_no_buffer_count increment */
3458 rxdctl |= 0x0020;
3459 rxdctl |= IXGBE_RXDCTL_ENABLE;
3460 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
3461 if (hw->mac.type == ixgbe_mac_82599EB)
3462 ixgbe_rx_desc_queue_enable(adapter, i);
3463 }
3464 /* enable all receives */
3465 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3466 if (hw->mac.type == ixgbe_mac_82598EB)
3467 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
3468 else
3469 rxdctl |= IXGBE_RXCTRL_RXEN;
3470 hw->mac.ops.enable_rx_dma(hw, rxdctl);
3471 3534
3472 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 3535 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3473 ixgbe_configure_msix(adapter); 3536 ixgbe_configure_msix(adapter);
@@ -3483,7 +3546,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3483 3546
3484 /* clear any pending interrupts, may auto mask */ 3547 /* clear any pending interrupts, may auto mask */
3485 IXGBE_READ_REG(hw, IXGBE_EICR); 3548 IXGBE_READ_REG(hw, IXGBE_EICR);
3486
3487 ixgbe_irq_enable(adapter); 3549 ixgbe_irq_enable(adapter);
3488 3550
3489 /* 3551 /*
@@ -3525,12 +3587,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3525 e_err(probe, "link_config FAILED %d\n", err); 3587 e_err(probe, "link_config FAILED %d\n", err);
3526 } 3588 }
3527 3589
3528 for (i = 0; i < adapter->num_tx_queues; i++)
3529 set_bit(__IXGBE_FDIR_INIT_DONE,
3530 &(adapter->tx_ring[i]->reinit_state));
3531
3532 /* enable transmits */ 3590 /* enable transmits */
3533 netif_tx_start_all_queues(netdev); 3591 netif_tx_start_all_queues(adapter->netdev);
3534 3592
3535 /* bring the link up in the watchdog, this could race with our first 3593 /* bring the link up in the watchdog, this could race with our first
3536 * link up interrupt but shouldn't be a problem */ 3594 * link up interrupt but shouldn't be a problem */
@@ -3609,21 +3667,24 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3609 * @rx_ring: ring to free buffers from 3667 * @rx_ring: ring to free buffers from
3610 **/ 3668 **/
3611static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 3669static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3612 struct ixgbe_ring *rx_ring) 3670 struct ixgbe_ring *rx_ring)
3613{ 3671{
3614 struct pci_dev *pdev = adapter->pdev; 3672 struct pci_dev *pdev = adapter->pdev;
3615 unsigned long size; 3673 unsigned long size;
3616 unsigned int i; 3674 unsigned int i;
3617 3675
3618 /* Free all the Rx ring sk_buffs */ 3676 /* ring already cleared, nothing to do */
3677 if (!rx_ring->rx_buffer_info)
3678 return;
3619 3679
3680 /* Free all the Rx ring sk_buffs */
3620 for (i = 0; i < rx_ring->count; i++) { 3681 for (i = 0; i < rx_ring->count; i++) {
3621 struct ixgbe_rx_buffer *rx_buffer_info; 3682 struct ixgbe_rx_buffer *rx_buffer_info;
3622 3683
3623 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3684 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3624 if (rx_buffer_info->dma) { 3685 if (rx_buffer_info->dma) {
3625 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 3686 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
3626 rx_ring->rx_buf_len, 3687 rx_ring->rx_buf_len,
3627 DMA_FROM_DEVICE); 3688 DMA_FROM_DEVICE);
3628 rx_buffer_info->dma = 0; 3689 rx_buffer_info->dma = 0;
3629 } 3690 }
@@ -3635,7 +3696,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3635 if (IXGBE_RSC_CB(this)->delay_unmap) { 3696 if (IXGBE_RSC_CB(this)->delay_unmap) {
3636 dma_unmap_single(&pdev->dev, 3697 dma_unmap_single(&pdev->dev,
3637 IXGBE_RSC_CB(this)->dma, 3698 IXGBE_RSC_CB(this)->dma,
3638 rx_ring->rx_buf_len, 3699 rx_ring->rx_buf_len,
3639 DMA_FROM_DEVICE); 3700 DMA_FROM_DEVICE);
3640 IXGBE_RSC_CB(this)->dma = 0; 3701 IXGBE_RSC_CB(this)->dma = 0;
3641 IXGBE_RSC_CB(skb)->delay_unmap = false; 3702 IXGBE_RSC_CB(skb)->delay_unmap = false;
@@ -3677,14 +3738,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3677 * @tx_ring: ring to be cleaned 3738 * @tx_ring: ring to be cleaned
3678 **/ 3739 **/
3679static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 3740static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3680 struct ixgbe_ring *tx_ring) 3741 struct ixgbe_ring *tx_ring)
3681{ 3742{
3682 struct ixgbe_tx_buffer *tx_buffer_info; 3743 struct ixgbe_tx_buffer *tx_buffer_info;
3683 unsigned long size; 3744 unsigned long size;
3684 unsigned int i; 3745 unsigned int i;
3685 3746
3686 /* Free all the Tx ring sk_buffs */ 3747 /* ring already cleared, nothing to do */
3748 if (!tx_ring->tx_buffer_info)
3749 return;
3687 3750
3751 /* Free all the Tx ring sk_buffs */
3688 for (i = 0; i < tx_ring->count; i++) { 3752 for (i = 0; i < tx_ring->count; i++) {
3689 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3753 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3690 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 3754 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
@@ -3786,13 +3850,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3786 j = adapter->tx_ring[i]->reg_idx; 3850 j = adapter->tx_ring[i]->reg_idx;
3787 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 3851 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3788 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), 3852 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
3789 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 3853 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3790 } 3854 }
3791 /* Disable the Tx DMA engine on 82599 */ 3855 /* Disable the Tx DMA engine on 82599 */
3792 if (hw->mac.type == ixgbe_mac_82599EB) 3856 if (hw->mac.type == ixgbe_mac_82599EB)
3793 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 3857 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3794 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 3858 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3795 ~IXGBE_DMATXCTL_TE)); 3859 ~IXGBE_DMATXCTL_TE));
3796 3860
3797 /* power down the optics */ 3861 /* power down the optics */
3798 if (hw->phy.multispeed_fiber) 3862 if (hw->phy.multispeed_fiber)
@@ -3822,7 +3886,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3822static int ixgbe_poll(struct napi_struct *napi, int budget) 3886static int ixgbe_poll(struct napi_struct *napi, int budget)
3823{ 3887{
3824 struct ixgbe_q_vector *q_vector = 3888 struct ixgbe_q_vector *q_vector =
3825 container_of(napi, struct ixgbe_q_vector, napi); 3889 container_of(napi, struct ixgbe_q_vector, napi);
3826 struct ixgbe_adapter *adapter = q_vector->adapter; 3890 struct ixgbe_adapter *adapter = q_vector->adapter;
3827 int tx_clean_complete, work_done = 0; 3891 int tx_clean_complete, work_done = 0;
3828 3892
@@ -3932,7 +3996,7 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3932 * Rx load across CPUs using RSS. 3996 * Rx load across CPUs using RSS.
3933 * 3997 *
3934 **/ 3998 **/
3935static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) 3999static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3936{ 4000{
3937 bool ret = false; 4001 bool ret = false;
3938 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; 4002 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
@@ -4061,7 +4125,7 @@ done:
4061} 4125}
4062 4126
4063static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 4127static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4064 int vectors) 4128 int vectors)
4065{ 4129{
4066 int err, vector_threshold; 4130 int err, vector_threshold;
4067 4131
@@ -4080,7 +4144,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4080 */ 4144 */
4081 while (vectors >= vector_threshold) { 4145 while (vectors >= vector_threshold) {
4082 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 4146 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
4083 vectors); 4147 vectors);
4084 if (!err) /* Success in acquiring all requested vectors. */ 4148 if (!err) /* Success in acquiring all requested vectors. */
4085 break; 4149 break;
4086 else if (err < 0) 4150 else if (err < 0)
@@ -4107,7 +4171,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4107 * vectors we were allocated. 4171 * vectors we were allocated.
4108 */ 4172 */
4109 adapter->num_msix_vectors = min(vectors, 4173 adapter->num_msix_vectors = min(vectors,
4110 adapter->max_msix_q_vectors + NON_Q_VECTORS); 4174 adapter->max_msix_q_vectors + NON_Q_VECTORS);
4111 } 4175 }
4112} 4176}
4113 4177
@@ -4178,12 +4242,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4178 } 4242 }
4179 for ( ; i < 5; i++) { 4243 for ( ; i < 5; i++) {
4180 adapter->tx_ring[i]->reg_idx = 4244 adapter->tx_ring[i]->reg_idx =
4181 ((i + 2) << 4); 4245 ((i + 2) << 4);
4182 adapter->rx_ring[i]->reg_idx = i << 4; 4246 adapter->rx_ring[i]->reg_idx = i << 4;
4183 } 4247 }
4184 for ( ; i < dcb_i; i++) { 4248 for ( ; i < dcb_i; i++) {
4185 adapter->tx_ring[i]->reg_idx = 4249 adapter->tx_ring[i]->reg_idx =
4186 ((i + 8) << 3); 4250 ((i + 8) << 3);
4187 adapter->rx_ring[i]->reg_idx = i << 4; 4251 adapter->rx_ring[i]->reg_idx = i << 4;
4188 } 4252 }
4189 4253
@@ -4226,7 +4290,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4226 * Cache the descriptor ring offsets for Flow Director to the assigned rings. 4290 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4227 * 4291 *
4228 **/ 4292 **/
4229static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) 4293static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
4230{ 4294{
4231 int i; 4295 int i;
4232 bool ret = false; 4296 bool ret = false;
@@ -4383,7 +4447,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4383 adapter->node = cur_node; 4447 adapter->node = cur_node;
4384 } 4448 }
4385 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, 4449 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4386 adapter->node); 4450 adapter->node);
4387 if (!ring) 4451 if (!ring)
4388 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4452 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4389 if (!ring) 4453 if (!ring)
@@ -4407,7 +4471,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4407 adapter->node = cur_node; 4471 adapter->node = cur_node;
4408 } 4472 }
4409 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, 4473 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4410 adapter->node); 4474 adapter->node);
4411 if (!ring) 4475 if (!ring)
4412 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4476 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4413 if (!ring) 4477 if (!ring)
@@ -4453,7 +4517,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4453 * (roughly) the same number of vectors as there are CPU's. 4517 * (roughly) the same number of vectors as there are CPU's.
4454 */ 4518 */
4455 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 4519 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
4456 (int)num_online_cpus()) + NON_Q_VECTORS; 4520 (int)num_online_cpus()) + NON_Q_VECTORS;
4457 4521
4458 /* 4522 /*
4459 * At the same time, hardware can only support a maximum of 4523 * At the same time, hardware can only support a maximum of
@@ -4467,7 +4531,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4467 /* A failure in MSI-X entry allocation isn't fatal, but it does 4531 /* A failure in MSI-X entry allocation isn't fatal, but it does
4468 * mean we disable MSI-X capabilities of the adapter. */ 4532 * mean we disable MSI-X capabilities of the adapter. */
4469 adapter->msix_entries = kcalloc(v_budget, 4533 adapter->msix_entries = kcalloc(v_budget,
4470 sizeof(struct msix_entry), GFP_KERNEL); 4534 sizeof(struct msix_entry), GFP_KERNEL);
4471 if (adapter->msix_entries) { 4535 if (adapter->msix_entries) {
4472 for (vector = 0; vector < v_budget; vector++) 4536 for (vector = 0; vector < v_budget; vector++)
4473 adapter->msix_entries[vector].entry = vector; 4537 adapter->msix_entries[vector].entry = vector;
@@ -4529,10 +4593,10 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4529 4593
4530 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 4594 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
4531 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), 4595 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
4532 GFP_KERNEL, adapter->node); 4596 GFP_KERNEL, adapter->node);
4533 if (!q_vector) 4597 if (!q_vector)
4534 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), 4598 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
4535 GFP_KERNEL); 4599 GFP_KERNEL);
4536 if (!q_vector) 4600 if (!q_vector)
4537 goto err_out; 4601 goto err_out;
4538 q_vector->adapter = adapter; 4602 q_vector->adapter = adapter;
@@ -4693,8 +4757,8 @@ static void ixgbe_sfp_timer(unsigned long data)
4693static void ixgbe_sfp_task(struct work_struct *work) 4757static void ixgbe_sfp_task(struct work_struct *work)
4694{ 4758{
4695 struct ixgbe_adapter *adapter = container_of(work, 4759 struct ixgbe_adapter *adapter = container_of(work,
4696 struct ixgbe_adapter, 4760 struct ixgbe_adapter,
4697 sfp_task); 4761 sfp_task);
4698 struct ixgbe_hw *hw = &adapter->hw; 4762 struct ixgbe_hw *hw = &adapter->hw;
4699 4763
4700 if ((hw->phy.type == ixgbe_phy_nl) && 4764 if ((hw->phy.type == ixgbe_phy_nl) &&
@@ -4719,7 +4783,7 @@ static void ixgbe_sfp_task(struct work_struct *work)
4719reschedule: 4783reschedule:
4720 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state)) 4784 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
4721 mod_timer(&adapter->sfp_timer, 4785 mod_timer(&adapter->sfp_timer,
4722 round_jiffies(jiffies + (2 * HZ))); 4786 round_jiffies(jiffies + (2 * HZ)));
4723} 4787}
4724 4788
4725/** 4789/**
@@ -4775,7 +4839,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4775 adapter->atr_sample_rate = 20; 4839 adapter->atr_sample_rate = 20;
4776 } 4840 }
4777 adapter->ring_feature[RING_F_FDIR].indices = 4841 adapter->ring_feature[RING_F_FDIR].indices =
4778 IXGBE_MAX_FDIR_INDICES; 4842 IXGBE_MAX_FDIR_INDICES;
4779 adapter->fdir_pballoc = 0; 4843 adapter->fdir_pballoc = 0;
4780#ifdef IXGBE_FCOE 4844#ifdef IXGBE_FCOE
4781 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; 4845 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
@@ -4806,7 +4870,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4806 adapter->dcb_cfg.round_robin_enable = false; 4870 adapter->dcb_cfg.round_robin_enable = false;
4807 adapter->dcb_set_bitmap = 0x00; 4871 adapter->dcb_set_bitmap = 0x00;
4808 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, 4872 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
4809 adapter->ring_feature[RING_F_DCB].indices); 4873 adapter->ring_feature[RING_F_DCB].indices);
4810 4874
4811#endif 4875#endif
4812 4876
@@ -4861,7 +4925,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4861 * Return 0 on success, negative on failure 4925 * Return 0 on success, negative on failure
4862 **/ 4926 **/
4863int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 4927int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4864 struct ixgbe_ring *tx_ring) 4928 struct ixgbe_ring *tx_ring)
4865{ 4929{
4866 struct pci_dev *pdev = adapter->pdev; 4930 struct pci_dev *pdev = adapter->pdev;
4867 int size; 4931 int size;
@@ -4928,7 +4992,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4928 * Returns 0 on success, negative on failure 4992 * Returns 0 on success, negative on failure
4929 **/ 4993 **/
4930int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 4994int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4931 struct ixgbe_ring *rx_ring) 4995 struct ixgbe_ring *rx_ring)
4932{ 4996{
4933 struct pci_dev *pdev = adapter->pdev; 4997 struct pci_dev *pdev = adapter->pdev;
4934 int size; 4998 int size;
@@ -5001,7 +5065,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5001 * Free all transmit software resources 5065 * Free all transmit software resources
5002 **/ 5066 **/
5003void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 5067void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
5004 struct ixgbe_ring *tx_ring) 5068 struct ixgbe_ring *tx_ring)
5005{ 5069{
5006 struct pci_dev *pdev = adapter->pdev; 5070 struct pci_dev *pdev = adapter->pdev;
5007 5071
@@ -5039,7 +5103,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5039 * Free all receive software resources 5103 * Free all receive software resources
5040 **/ 5104 **/
5041void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 5105void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
5042 struct ixgbe_ring *rx_ring) 5106 struct ixgbe_ring *rx_ring)
5043{ 5107{
5044 struct pci_dev *pdev = adapter->pdev; 5108 struct pci_dev *pdev = adapter->pdev;
5045 5109
@@ -5333,6 +5397,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5333 u64 total_mpc = 0; 5397 u64 total_mpc = 0;
5334 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 5398 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5335 u64 non_eop_descs = 0, restart_queue = 0; 5399 u64 non_eop_descs = 0, restart_queue = 0;
5400 struct ixgbe_hw_stats *hwstats = &adapter->stats;
5336 5401
5337 if (test_bit(__IXGBE_DOWN, &adapter->state) || 5402 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5338 test_bit(__IXGBE_RESETTING, &adapter->state)) 5403 test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5343,7 +5408,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5343 u64 rsc_flush = 0; 5408 u64 rsc_flush = 0;
5344 for (i = 0; i < 16; i++) 5409 for (i = 0; i < 16; i++)
5345 adapter->hw_rx_no_dma_resources += 5410 adapter->hw_rx_no_dma_resources +=
5346 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5411 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5347 for (i = 0; i < adapter->num_rx_queues; i++) { 5412 for (i = 0; i < adapter->num_rx_queues; i++) {
5348 rsc_count += adapter->rx_ring[i]->rsc_count; 5413 rsc_count += adapter->rx_ring[i]->rsc_count;
5349 rsc_flush += adapter->rx_ring[i]->rsc_flush; 5414 rsc_flush += adapter->rx_ring[i]->rsc_flush;
@@ -5361,119 +5426,118 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5361 non_eop_descs += adapter->rx_ring[i]->non_eop_descs; 5426 non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
5362 adapter->non_eop_descs = non_eop_descs; 5427 adapter->non_eop_descs = non_eop_descs;
5363 5428
5364 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 5429 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5365 for (i = 0; i < 8; i++) { 5430 for (i = 0; i < 8; i++) {
5366 /* for packet buffers not used, the register should read 0 */ 5431 /* for packet buffers not used, the register should read 0 */
5367 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); 5432 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5368 missed_rx += mpc; 5433 missed_rx += mpc;
5369 adapter->stats.mpc[i] += mpc; 5434 hwstats->mpc[i] += mpc;
5370 total_mpc += adapter->stats.mpc[i]; 5435 total_mpc += hwstats->mpc[i];
5371 if (hw->mac.type == ixgbe_mac_82598EB) 5436 if (hw->mac.type == ixgbe_mac_82598EB)
5372 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 5437 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5373 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); 5438 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5374 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 5439 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5375 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 5440 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5376 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 5441 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5377 if (hw->mac.type == ixgbe_mac_82599EB) { 5442 if (hw->mac.type == ixgbe_mac_82599EB) {
5378 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 5443 hwstats->pxonrxc[i] +=
5379 IXGBE_PXONRXCNT(i)); 5444 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5380 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 5445 hwstats->pxoffrxc[i] +=
5381 IXGBE_PXOFFRXCNT(i)); 5446 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5382 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5447 hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5383 } else { 5448 } else {
5384 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, 5449 hwstats->pxonrxc[i] +=
5385 IXGBE_PXONRXC(i)); 5450 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5386 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, 5451 hwstats->pxoffrxc[i] +=
5387 IXGBE_PXOFFRXC(i)); 5452 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
5388 } 5453 }
5389 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw, 5454 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5390 IXGBE_PXONTXC(i)); 5455 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
5391 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
5392 IXGBE_PXOFFTXC(i));
5393 } 5456 }
5394 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 5457 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5395 /* work around hardware counting issue */ 5458 /* work around hardware counting issue */
5396 adapter->stats.gprc -= missed_rx; 5459 hwstats->gprc -= missed_rx;
5397 5460
5398 /* 82598 hardware only has a 32 bit counter in the high register */ 5461 /* 82598 hardware only has a 32 bit counter in the high register */
5399 if (hw->mac.type == ixgbe_mac_82599EB) { 5462 if (hw->mac.type == ixgbe_mac_82599EB) {
5400 u64 tmp; 5463 u64 tmp;
5401 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5464 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5402 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */ 5465 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
5403 adapter->stats.gorc += (tmp << 32); 5466 /* 4 high bits of GORC */
5404 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5467 hwstats->gorc += (tmp << 32);
5405 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */ 5468 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5406 adapter->stats.gotc += (tmp << 32); 5469 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
5407 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); 5470 /* 4 high bits of GOTC */
5408 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 5471 hwstats->gotc += (tmp << 32);
5409 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 5472 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5410 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); 5473 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5411 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 5474 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5412 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 5475 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5476 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5477 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5413#ifdef IXGBE_FCOE 5478#ifdef IXGBE_FCOE
5414 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); 5479 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5415 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); 5480 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5416 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); 5481 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5417 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); 5482 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5418 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 5483 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5419 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 5484 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5420#endif /* IXGBE_FCOE */ 5485#endif /* IXGBE_FCOE */
5421 } else { 5486 } else {
5422 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 5487 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5423 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 5488 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
5424 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); 5489 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5425 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); 5490 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5426 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); 5491 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5427 } 5492 }
5428 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 5493 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5429 adapter->stats.bprc += bprc; 5494 hwstats->bprc += bprc;
5430 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); 5495 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
5431 if (hw->mac.type == ixgbe_mac_82598EB) 5496 if (hw->mac.type == ixgbe_mac_82598EB)
5432 adapter->stats.mprc -= bprc; 5497 hwstats->mprc -= bprc;
5433 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); 5498 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5434 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); 5499 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5435 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); 5500 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5436 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); 5501 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5437 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); 5502 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5438 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); 5503 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5439 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); 5504 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5440 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); 5505 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
5441 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); 5506 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5442 adapter->stats.lxontxc += lxon; 5507 hwstats->lxontxc += lxon;
5443 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); 5508 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5444 adapter->stats.lxofftxc += lxoff; 5509 hwstats->lxofftxc += lxoff;
5445 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 5510 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5446 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); 5511 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5447 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); 5512 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
5448 /* 5513 /*
5449 * 82598 errata - tx of flow control packets is included in tx counters 5514 * 82598 errata - tx of flow control packets is included in tx counters
5450 */ 5515 */
5451 xon_off_tot = lxon + lxoff; 5516 xon_off_tot = lxon + lxoff;
5452 adapter->stats.gptc -= xon_off_tot; 5517 hwstats->gptc -= xon_off_tot;
5453 adapter->stats.mptc -= xon_off_tot; 5518 hwstats->mptc -= xon_off_tot;
5454 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); 5519 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5455 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); 5520 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5456 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); 5521 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5457 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); 5522 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5458 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); 5523 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5459 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); 5524 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5460 adapter->stats.ptc64 -= xon_off_tot; 5525 hwstats->ptc64 -= xon_off_tot;
5461 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); 5526 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5462 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); 5527 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5463 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); 5528 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5464 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); 5529 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5465 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); 5530 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5466 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 5531 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
5467 5532
5468 /* Fill out the OS statistics structure */ 5533 /* Fill out the OS statistics structure */
5469 netdev->stats.multicast = adapter->stats.mprc; 5534 netdev->stats.multicast = hwstats->mprc;
5470 5535
5471 /* Rx Errors */ 5536 /* Rx Errors */
5472 netdev->stats.rx_errors = adapter->stats.crcerrs + 5537 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
5473 adapter->stats.rlec;
5474 netdev->stats.rx_dropped = 0; 5538 netdev->stats.rx_dropped = 0;
5475 netdev->stats.rx_length_errors = adapter->stats.rlec; 5539 netdev->stats.rx_length_errors = hwstats->rlec;
5476 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 5540 netdev->stats.rx_crc_errors = hwstats->crcerrs;
5477 netdev->stats.rx_missed_errors = total_mpc; 5541 netdev->stats.rx_missed_errors = total_mpc;
5478} 5542}
5479 5543
@@ -5532,8 +5596,8 @@ watchdog_short_circuit:
5532static void ixgbe_multispeed_fiber_task(struct work_struct *work) 5596static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5533{ 5597{
5534 struct ixgbe_adapter *adapter = container_of(work, 5598 struct ixgbe_adapter *adapter = container_of(work,
5535 struct ixgbe_adapter, 5599 struct ixgbe_adapter,
5536 multispeed_fiber_task); 5600 multispeed_fiber_task);
5537 struct ixgbe_hw *hw = &adapter->hw; 5601 struct ixgbe_hw *hw = &adapter->hw;
5538 u32 autoneg; 5602 u32 autoneg;
5539 bool negotiation; 5603 bool negotiation;
@@ -5556,8 +5620,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work)
5556static void ixgbe_sfp_config_module_task(struct work_struct *work) 5620static void ixgbe_sfp_config_module_task(struct work_struct *work)
5557{ 5621{
5558 struct ixgbe_adapter *adapter = container_of(work, 5622 struct ixgbe_adapter *adapter = container_of(work,
5559 struct ixgbe_adapter, 5623 struct ixgbe_adapter,
5560 sfp_config_module_task); 5624 sfp_config_module_task);
5561 struct ixgbe_hw *hw = &adapter->hw; 5625 struct ixgbe_hw *hw = &adapter->hw;
5562 u32 err; 5626 u32 err;
5563 5627
@@ -5590,15 +5654,15 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
5590static void ixgbe_fdir_reinit_task(struct work_struct *work) 5654static void ixgbe_fdir_reinit_task(struct work_struct *work)
5591{ 5655{
5592 struct ixgbe_adapter *adapter = container_of(work, 5656 struct ixgbe_adapter *adapter = container_of(work,
5593 struct ixgbe_adapter, 5657 struct ixgbe_adapter,
5594 fdir_reinit_task); 5658 fdir_reinit_task);
5595 struct ixgbe_hw *hw = &adapter->hw; 5659 struct ixgbe_hw *hw = &adapter->hw;
5596 int i; 5660 int i;
5597 5661
5598 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 5662 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5599 for (i = 0; i < adapter->num_tx_queues; i++) 5663 for (i = 0; i < adapter->num_tx_queues; i++)
5600 set_bit(__IXGBE_FDIR_INIT_DONE, 5664 set_bit(__IXGBE_FDIR_INIT_DONE,
5601 &(adapter->tx_ring[i]->reinit_state)); 5665 &(adapter->tx_ring[i]->reinit_state));
5602 } else { 5666 } else {
5603 e_err(probe, "failed to finish FDIR re-initialization, " 5667 e_err(probe, "failed to finish FDIR re-initialization, "
5604 "ignored adding FDIR ATR filters\n"); 5668 "ignored adding FDIR ATR filters\n");
@@ -5616,8 +5680,8 @@ static DEFINE_MUTEX(ixgbe_watchdog_lock);
5616static void ixgbe_watchdog_task(struct work_struct *work) 5680static void ixgbe_watchdog_task(struct work_struct *work)
5617{ 5681{
5618 struct ixgbe_adapter *adapter = container_of(work, 5682 struct ixgbe_adapter *adapter = container_of(work,
5619 struct ixgbe_adapter, 5683 struct ixgbe_adapter,
5620 watchdog_task); 5684 watchdog_task);
5621 struct net_device *netdev = adapter->netdev; 5685 struct net_device *netdev = adapter->netdev;
5622 struct ixgbe_hw *hw = &adapter->hw; 5686 struct ixgbe_hw *hw = &adapter->hw;
5623 u32 link_speed; 5687 u32 link_speed;
@@ -5648,7 +5712,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5648 5712
5649 if (link_up || 5713 if (link_up ||
5650 time_after(jiffies, (adapter->link_check_timeout + 5714 time_after(jiffies, (adapter->link_check_timeout +
5651 IXGBE_TRY_LINK_TIMEOUT))) { 5715 IXGBE_TRY_LINK_TIMEOUT))) {
5652 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 5716 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
5653 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); 5717 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
5654 } 5718 }
@@ -5719,8 +5783,8 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5719} 5783}
5720 5784
5721static int ixgbe_tso(struct ixgbe_adapter *adapter, 5785static int ixgbe_tso(struct ixgbe_adapter *adapter,
5722 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 5786 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5723 u32 tx_flags, u8 *hdr_len) 5787 u32 tx_flags, u8 *hdr_len)
5724{ 5788{
5725 struct ixgbe_adv_tx_context_desc *context_desc; 5789 struct ixgbe_adv_tx_context_desc *context_desc;
5726 unsigned int i; 5790 unsigned int i;
@@ -5743,28 +5807,28 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5743 iph->tot_len = 0; 5807 iph->tot_len = 0;
5744 iph->check = 0; 5808 iph->check = 0;
5745 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 5809 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5746 iph->daddr, 0, 5810 iph->daddr, 0,
5747 IPPROTO_TCP, 5811 IPPROTO_TCP,
5748 0); 5812 0);
5749 } else if (skb_is_gso_v6(skb)) { 5813 } else if (skb_is_gso_v6(skb)) {
5750 ipv6_hdr(skb)->payload_len = 0; 5814 ipv6_hdr(skb)->payload_len = 0;
5751 tcp_hdr(skb)->check = 5815 tcp_hdr(skb)->check =
5752 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5816 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5753 &ipv6_hdr(skb)->daddr, 5817 &ipv6_hdr(skb)->daddr,
5754 0, IPPROTO_TCP, 0); 5818 0, IPPROTO_TCP, 0);
5755 } 5819 }
5756 5820
5757 i = tx_ring->next_to_use; 5821 i = tx_ring->next_to_use;
5758 5822
5759 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5823 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5760 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 5824 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
5761 5825
5762 /* VLAN MACLEN IPLEN */ 5826 /* VLAN MACLEN IPLEN */
5763 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 5827 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5764 vlan_macip_lens |= 5828 vlan_macip_lens |=
5765 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 5829 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5766 vlan_macip_lens |= ((skb_network_offset(skb)) << 5830 vlan_macip_lens |= ((skb_network_offset(skb)) <<
5767 IXGBE_ADVTXD_MACLEN_SHIFT); 5831 IXGBE_ADVTXD_MACLEN_SHIFT);
5768 *hdr_len += skb_network_offset(skb); 5832 *hdr_len += skb_network_offset(skb);
5769 vlan_macip_lens |= 5833 vlan_macip_lens |=
5770 (skb_transport_header(skb) - skb_network_header(skb)); 5834 (skb_transport_header(skb) - skb_network_header(skb));
@@ -5775,7 +5839,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5775 5839
5776 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 5840 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5777 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 5841 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
5778 IXGBE_ADVTXD_DTYP_CTXT); 5842 IXGBE_ADVTXD_DTYP_CTXT);
5779 5843
5780 if (skb->protocol == htons(ETH_P_IP)) 5844 if (skb->protocol == htons(ETH_P_IP))
5781 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 5845 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -5803,9 +5867,53 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5803 return false; 5867 return false;
5804} 5868}
5805 5869
5870static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5871{
5872 u32 rtn = 0;
5873 __be16 protocol;
5874
5875 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5876 protocol = ((const struct vlan_ethhdr *)skb->data)->
5877 h_vlan_encapsulated_proto;
5878 else
5879 protocol = skb->protocol;
5880
5881 switch (protocol) {
5882 case cpu_to_be16(ETH_P_IP):
5883 rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
5884 switch (ip_hdr(skb)->protocol) {
5885 case IPPROTO_TCP:
5886 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5887 break;
5888 case IPPROTO_SCTP:
5889 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5890 break;
5891 }
5892 break;
5893 case cpu_to_be16(ETH_P_IPV6):
5894 /* XXX what about other V6 headers?? */
5895 switch (ipv6_hdr(skb)->nexthdr) {
5896 case IPPROTO_TCP:
5897 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5898 break;
5899 case IPPROTO_SCTP:
5900 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5901 break;
5902 }
5903 break;
5904 default:
5905 if (unlikely(net_ratelimit()))
5906 e_warn(probe, "partial checksum but proto=%x!\n",
5907 skb->protocol);
5908 break;
5909 }
5910
5911 return rtn;
5912}
5913
5806static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 5914static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5807 struct ixgbe_ring *tx_ring, 5915 struct ixgbe_ring *tx_ring,
5808 struct sk_buff *skb, u32 tx_flags) 5916 struct sk_buff *skb, u32 tx_flags)
5809{ 5917{
5810 struct ixgbe_adv_tx_context_desc *context_desc; 5918 struct ixgbe_adv_tx_context_desc *context_desc;
5811 unsigned int i; 5919 unsigned int i;
@@ -5816,63 +5924,25 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5816 (tx_flags & IXGBE_TX_FLAGS_VLAN)) { 5924 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
5817 i = tx_ring->next_to_use; 5925 i = tx_ring->next_to_use;
5818 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5926 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5819 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 5927 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
5820 5928
5821 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 5929 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5822 vlan_macip_lens |= 5930 vlan_macip_lens |=
5823 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 5931 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
5824 vlan_macip_lens |= (skb_network_offset(skb) << 5932 vlan_macip_lens |= (skb_network_offset(skb) <<
5825 IXGBE_ADVTXD_MACLEN_SHIFT); 5933 IXGBE_ADVTXD_MACLEN_SHIFT);
5826 if (skb->ip_summed == CHECKSUM_PARTIAL) 5934 if (skb->ip_summed == CHECKSUM_PARTIAL)
5827 vlan_macip_lens |= (skb_transport_header(skb) - 5935 vlan_macip_lens |= (skb_transport_header(skb) -
5828 skb_network_header(skb)); 5936 skb_network_header(skb));
5829 5937
5830 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 5938 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5831 context_desc->seqnum_seed = 0; 5939 context_desc->seqnum_seed = 0;
5832 5940
5833 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 5941 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
5834 IXGBE_ADVTXD_DTYP_CTXT); 5942 IXGBE_ADVTXD_DTYP_CTXT);
5835
5836 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5837 __be16 protocol;
5838
5839 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
5840 const struct vlan_ethhdr *vhdr =
5841 (const struct vlan_ethhdr *)skb->data;
5842
5843 protocol = vhdr->h_vlan_encapsulated_proto;
5844 } else {
5845 protocol = skb->protocol;
5846 }
5847 5943
5848 switch (protocol) { 5944 if (skb->ip_summed == CHECKSUM_PARTIAL)
5849 case cpu_to_be16(ETH_P_IP): 5945 type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
5850 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5851 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5852 type_tucmd_mlhl |=
5853 IXGBE_ADVTXD_TUCMD_L4T_TCP;
5854 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
5855 type_tucmd_mlhl |=
5856 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5857 break;
5858 case cpu_to_be16(ETH_P_IPV6):
5859 /* XXX what about other V6 headers?? */
5860 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5861 type_tucmd_mlhl |=
5862 IXGBE_ADVTXD_TUCMD_L4T_TCP;
5863 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
5864 type_tucmd_mlhl |=
5865 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
5866 break;
5867 default:
5868 if (unlikely(net_ratelimit())) {
5869 e_warn(probe, "partial checksum "
5870 "but proto=%x!\n",
5871 skb->protocol);
5872 }
5873 break;
5874 }
5875 }
5876 5946
5877 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5947 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5878 /* use index zero for tx checksum offload */ 5948 /* use index zero for tx checksum offload */
@@ -5893,9 +5963,9 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5893} 5963}
5894 5964
5895static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 5965static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5896 struct ixgbe_ring *tx_ring, 5966 struct ixgbe_ring *tx_ring,
5897 struct sk_buff *skb, u32 tx_flags, 5967 struct sk_buff *skb, u32 tx_flags,
5898 unsigned int first) 5968 unsigned int first)
5899{ 5969{
5900 struct pci_dev *pdev = adapter->pdev; 5970 struct pci_dev *pdev = adapter->pdev;
5901 struct ixgbe_tx_buffer *tx_buffer_info; 5971 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -5990,7 +6060,7 @@ dma_error:
5990 6060
5991 /* clear timestamp and dma mappings for remaining portion of packet */ 6061 /* clear timestamp and dma mappings for remaining portion of packet */
5992 while (count--) { 6062 while (count--) {
5993 if (i==0) 6063 if (i == 0)
5994 i += tx_ring->count; 6064 i += tx_ring->count;
5995 i--; 6065 i--;
5996 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6066 tx_buffer_info = &tx_ring->tx_buffer_info[i];
@@ -6001,8 +6071,8 @@ dma_error:
6001} 6071}
6002 6072
6003static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 6073static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6004 struct ixgbe_ring *tx_ring, 6074 struct ixgbe_ring *tx_ring,
6005 int tx_flags, int count, u32 paylen, u8 hdr_len) 6075 int tx_flags, int count, u32 paylen, u8 hdr_len)
6006{ 6076{
6007 union ixgbe_adv_tx_desc *tx_desc = NULL; 6077 union ixgbe_adv_tx_desc *tx_desc = NULL;
6008 struct ixgbe_tx_buffer *tx_buffer_info; 6078 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -6021,17 +6091,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6021 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 6091 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6022 6092
6023 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 6093 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6024 IXGBE_ADVTXD_POPTS_SHIFT; 6094 IXGBE_ADVTXD_POPTS_SHIFT;
6025 6095
6026 /* use index 1 context for tso */ 6096 /* use index 1 context for tso */
6027 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 6097 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6028 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 6098 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6029 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 6099 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
6030 IXGBE_ADVTXD_POPTS_SHIFT; 6100 IXGBE_ADVTXD_POPTS_SHIFT;
6031 6101
6032 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 6102 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6033 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 6103 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
6034 IXGBE_ADVTXD_POPTS_SHIFT; 6104 IXGBE_ADVTXD_POPTS_SHIFT;
6035 6105
6036 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 6106 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6037 olinfo_status |= IXGBE_ADVTXD_CC; 6107 olinfo_status |= IXGBE_ADVTXD_CC;
@@ -6045,10 +6115,10 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6045 i = tx_ring->next_to_use; 6115 i = tx_ring->next_to_use;
6046 while (count--) { 6116 while (count--) {
6047 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6117 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6048 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 6118 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
6049 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 6119 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6050 tx_desc->read.cmd_type_len = 6120 tx_desc->read.cmd_type_len =
6051 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 6121 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
6052 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 6122 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6053 i++; 6123 i++;
6054 if (i == tx_ring->count) 6124 if (i == tx_ring->count)
@@ -6070,7 +6140,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6070} 6140}
6071 6141
6072static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6142static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6073 int queue, u32 tx_flags) 6143 int queue, u32 tx_flags)
6074{ 6144{
6075 struct ixgbe_atr_input atr_input; 6145 struct ixgbe_atr_input atr_input;
6076 struct tcphdr *th; 6146 struct tcphdr *th;
@@ -6098,7 +6168,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6098 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); 6168 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
6099 6169
6100 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> 6170 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
6101 IXGBE_TX_FLAGS_VLAN_SHIFT; 6171 IXGBE_TX_FLAGS_VLAN_SHIFT;
6102 src_ipv4_addr = iph->saddr; 6172 src_ipv4_addr = iph->saddr;
6103 dst_ipv4_addr = iph->daddr; 6173 dst_ipv4_addr = iph->daddr;
6104 flex_bytes = eth->h_proto; 6174 flex_bytes = eth->h_proto;
@@ -6117,7 +6187,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6117} 6187}
6118 6188
6119static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 6189static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6120 struct ixgbe_ring *tx_ring, int size) 6190 struct ixgbe_ring *tx_ring, int size)
6121{ 6191{
6122 netif_stop_subqueue(netdev, tx_ring->queue_index); 6192 netif_stop_subqueue(netdev, tx_ring->queue_index);
6123 /* Herbert's original patch had: 6193 /* Herbert's original patch had:
@@ -6137,7 +6207,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6137} 6207}
6138 6208
6139static int ixgbe_maybe_stop_tx(struct net_device *netdev, 6209static int ixgbe_maybe_stop_tx(struct net_device *netdev,
6140 struct ixgbe_ring *tx_ring, int size) 6210 struct ixgbe_ring *tx_ring, int size)
6141{ 6211{
6142 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 6212 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6143 return 0; 6213 return 0;
@@ -6183,11 +6253,10 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6183 return skb_tx_hash(dev, skb); 6253 return skb_tx_hash(dev, skb);
6184} 6254}
6185 6255
6186static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, 6256netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
6187 struct net_device *netdev) 6257 struct ixgbe_adapter *adapter,
6258 struct ixgbe_ring *tx_ring)
6188{ 6259{
6189 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6190 struct ixgbe_ring *tx_ring;
6191 struct netdev_queue *txq; 6260 struct netdev_queue *txq;
6192 unsigned int first; 6261 unsigned int first;
6193 unsigned int tx_flags = 0; 6262 unsigned int tx_flags = 0;
@@ -6211,8 +6280,6 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6211 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6280 tx_flags |= IXGBE_TX_FLAGS_VLAN;
6212 } 6281 }
6213 6282
6214 tx_ring = adapter->tx_ring[skb->queue_mapping];
6215
6216#ifdef IXGBE_FCOE 6283#ifdef IXGBE_FCOE
6217 /* for FCoE with DCB, we force the priority to what 6284 /* for FCoE with DCB, we force the priority to what
6218 * was specified by the switch */ 6285 * was specified by the switch */
@@ -6283,10 +6350,10 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6283 if (tx_ring->atr_sample_rate) { 6350 if (tx_ring->atr_sample_rate) {
6284 ++tx_ring->atr_count; 6351 ++tx_ring->atr_count;
6285 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && 6352 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6286 test_bit(__IXGBE_FDIR_INIT_DONE, 6353 test_bit(__IXGBE_FDIR_INIT_DONE,
6287 &tx_ring->reinit_state)) { 6354 &tx_ring->reinit_state)) {
6288 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6355 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6289 tx_flags); 6356 tx_flags);
6290 tx_ring->atr_count = 0; 6357 tx_ring->atr_count = 0;
6291 } 6358 }
6292 } 6359 }
@@ -6294,7 +6361,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6294 txq->tx_bytes += skb->len; 6361 txq->tx_bytes += skb->len;
6295 txq->tx_packets++; 6362 txq->tx_packets++;
6296 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, 6363 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
6297 hdr_len); 6364 hdr_len);
6298 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 6365 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
6299 6366
6300 } else { 6367 } else {
@@ -6306,6 +6373,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6306 return NETDEV_TX_OK; 6373 return NETDEV_TX_OK;
6307} 6374}
6308 6375
6376static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
6377{
6378 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6379 struct ixgbe_ring *tx_ring;
6380
6381 tx_ring = adapter->tx_ring[skb->queue_mapping];
6382 return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
6383}
6384
6309/** 6385/**
6310 * ixgbe_set_mac - Change the Ethernet Address of the NIC 6386 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6311 * @netdev: network interface device structure 6387 * @netdev: network interface device structure
@@ -6437,7 +6513,7 @@ static void ixgbe_netpoll(struct net_device *netdev)
6437#endif 6513#endif
6438 6514
6439static const struct net_device_ops ixgbe_netdev_ops = { 6515static const struct net_device_ops ixgbe_netdev_ops = {
6440 .ndo_open = ixgbe_open, 6516 .ndo_open = ixgbe_open,
6441 .ndo_stop = ixgbe_close, 6517 .ndo_stop = ixgbe_close,
6442 .ndo_start_xmit = ixgbe_xmit_frame, 6518 .ndo_start_xmit = ixgbe_xmit_frame,
6443 .ndo_select_queue = ixgbe_select_queue, 6519 .ndo_select_queue = ixgbe_select_queue,
@@ -6532,7 +6608,7 @@ err_novfs:
6532 * and a hardware reset occur. 6608 * and a hardware reset occur.
6533 **/ 6609 **/
6534static int __devinit ixgbe_probe(struct pci_dev *pdev, 6610static int __devinit ixgbe_probe(struct pci_dev *pdev,
6535 const struct pci_device_id *ent) 6611 const struct pci_device_id *ent)
6536{ 6612{
6537 struct net_device *netdev; 6613 struct net_device *netdev;
6538 struct ixgbe_adapter *adapter = NULL; 6614 struct ixgbe_adapter *adapter = NULL;
@@ -6577,7 +6653,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6577 } 6653 }
6578 6654
6579 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 6655 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
6580 IORESOURCE_MEM), ixgbe_driver_name); 6656 IORESOURCE_MEM), ixgbe_driver_name);
6581 if (err) { 6657 if (err) {
6582 dev_err(&pdev->dev, 6658 dev_err(&pdev->dev,
6583 "pci_request_selected_regions failed 0x%x\n", err); 6659 "pci_request_selected_regions failed 0x%x\n", err);
@@ -6617,7 +6693,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6617 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 6693 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
6618 6694
6619 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 6695 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
6620 pci_resource_len(pdev, 0)); 6696 pci_resource_len(pdev, 0));
6621 if (!hw->hw_addr) { 6697 if (!hw->hw_addr) {
6622 err = -EIO; 6698 err = -EIO;
6623 goto err_ioremap; 6699 goto err_ioremap;
@@ -6661,7 +6737,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6661 * which might start the timer 6737 * which might start the timer
6662 */ 6738 */
6663 init_timer(&adapter->sfp_timer); 6739 init_timer(&adapter->sfp_timer);
6664 adapter->sfp_timer.function = &ixgbe_sfp_timer; 6740 adapter->sfp_timer.function = ixgbe_sfp_timer;
6665 adapter->sfp_timer.data = (unsigned long) adapter; 6741 adapter->sfp_timer.data = (unsigned long) adapter;
6666 6742
6667 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task); 6743 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
@@ -6671,7 +6747,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6671 6747
6672 /* a new SFP+ module arrival, called from GPI SDP2 context */ 6748 /* a new SFP+ module arrival, called from GPI SDP2 context */
6673 INIT_WORK(&adapter->sfp_config_module_task, 6749 INIT_WORK(&adapter->sfp_config_module_task,
6674 ixgbe_sfp_config_module_task); 6750 ixgbe_sfp_config_module_task);
6675 6751
6676 ii->get_invariants(hw); 6752 ii->get_invariants(hw);
6677 6753
@@ -6723,10 +6799,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6723 ixgbe_probe_vf(adapter, ii); 6799 ixgbe_probe_vf(adapter, ii);
6724 6800
6725 netdev->features = NETIF_F_SG | 6801 netdev->features = NETIF_F_SG |
6726 NETIF_F_IP_CSUM | 6802 NETIF_F_IP_CSUM |
6727 NETIF_F_HW_VLAN_TX | 6803 NETIF_F_HW_VLAN_TX |
6728 NETIF_F_HW_VLAN_RX | 6804 NETIF_F_HW_VLAN_RX |
6729 NETIF_F_HW_VLAN_FILTER; 6805 NETIF_F_HW_VLAN_FILTER;
6730 6806
6731 netdev->features |= NETIF_F_IPV6_CSUM; 6807 netdev->features |= NETIF_F_IPV6_CSUM;
6732 netdev->features |= NETIF_F_TSO; 6808 netdev->features |= NETIF_F_TSO;
@@ -6766,8 +6842,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6766 netdev->vlan_features |= NETIF_F_FCOE_MTU; 6842 netdev->vlan_features |= NETIF_F_FCOE_MTU;
6767 } 6843 }
6768#endif /* IXGBE_FCOE */ 6844#endif /* IXGBE_FCOE */
6769 if (pci_using_dac) 6845 if (pci_using_dac) {
6770 netdev->features |= NETIF_F_HIGHDMA; 6846 netdev->features |= NETIF_F_HIGHDMA;
6847 netdev->vlan_features |= NETIF_F_HIGHDMA;
6848 }
6771 6849
6772 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 6850 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
6773 netdev->features |= NETIF_F_LRO; 6851 netdev->features |= NETIF_F_LRO;
@@ -6793,7 +6871,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6793 hw->mac.ops.disable_tx_laser(hw); 6871 hw->mac.ops.disable_tx_laser(hw);
6794 6872
6795 init_timer(&adapter->watchdog_timer); 6873 init_timer(&adapter->watchdog_timer);
6796 adapter->watchdog_timer.function = &ixgbe_watchdog; 6874 adapter->watchdog_timer.function = ixgbe_watchdog;
6797 adapter->watchdog_timer.data = (unsigned long)adapter; 6875 adapter->watchdog_timer.data = (unsigned long)adapter;
6798 6876
6799 INIT_WORK(&adapter->reset_task, ixgbe_reset_task); 6877 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
@@ -6806,7 +6884,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6806 switch (pdev->device) { 6884 switch (pdev->device) {
6807 case IXGBE_DEV_ID_82599_KX4: 6885 case IXGBE_DEV_ID_82599_KX4:
6808 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 6886 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6809 IXGBE_WUFC_MC | IXGBE_WUFC_BC); 6887 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
6810 break; 6888 break;
6811 default: 6889 default:
6812 adapter->wol = 0; 6890 adapter->wol = 0;
@@ -6819,13 +6897,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6819 6897
6820 /* print bus type/speed/width info */ 6898 /* print bus type/speed/width info */
6821 e_dev_info("(PCI Express:%s:%s) %pM\n", 6899 e_dev_info("(PCI Express:%s:%s) %pM\n",
6822 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": 6900 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
6823 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), 6901 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
6824 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : 6902 "Unknown"),
6825 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : 6903 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
6826 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : 6904 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
6827 "Unknown"), 6905 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
6828 netdev->dev_addr); 6906 "Unknown"),
6907 netdev->dev_addr);
6829 ixgbe_read_pba_num_generic(hw, &part_num); 6908 ixgbe_read_pba_num_generic(hw, &part_num);
6830 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 6909 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
6831 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " 6910 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
@@ -6872,7 +6951,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6872 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); 6951 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
6873 6952
6874 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) 6953 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
6875 INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task); 6954 INIT_WORK(&adapter->check_overtemp_task,
6955 ixgbe_check_overtemp_task);
6876#ifdef CONFIG_IXGBE_DCA 6956#ifdef CONFIG_IXGBE_DCA
6877 if (dca_add_requester(&pdev->dev) == 0) { 6957 if (dca_add_requester(&pdev->dev) == 0) {
6878 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 6958 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
@@ -6908,8 +6988,8 @@ err_eeprom:
6908err_ioremap: 6988err_ioremap:
6909 free_netdev(netdev); 6989 free_netdev(netdev);
6910err_alloc_etherdev: 6990err_alloc_etherdev:
6911 pci_release_selected_regions(pdev, pci_select_bars(pdev, 6991 pci_release_selected_regions(pdev,
6912 IORESOURCE_MEM)); 6992 pci_select_bars(pdev, IORESOURCE_MEM));
6913err_pci_reg: 6993err_pci_reg:
6914err_dma: 6994err_dma:
6915 pci_disable_device(pdev); 6995 pci_disable_device(pdev);
@@ -6976,7 +7056,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6976 7056
6977 iounmap(adapter->hw.hw_addr); 7057 iounmap(adapter->hw.hw_addr);
6978 pci_release_selected_regions(pdev, pci_select_bars(pdev, 7058 pci_release_selected_regions(pdev, pci_select_bars(pdev,
6979 IORESOURCE_MEM)); 7059 IORESOURCE_MEM));
6980 7060
6981 e_dev_info("complete\n"); 7061 e_dev_info("complete\n");
6982 7062
@@ -6996,7 +7076,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6996 * this device has been detected. 7076 * this device has been detected.
6997 */ 7077 */
6998static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 7078static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
6999 pci_channel_state_t state) 7079 pci_channel_state_t state)
7000{ 7080{
7001 struct net_device *netdev = pci_get_drvdata(pdev); 7081 struct net_device *netdev = pci_get_drvdata(pdev);
7002 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7082 struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7102,8 +7182,7 @@ static struct pci_driver ixgbe_driver = {
7102static int __init ixgbe_init_module(void) 7182static int __init ixgbe_init_module(void)
7103{ 7183{
7104 int ret; 7184 int ret;
7105 pr_info("%s - version %s\n", ixgbe_driver_string, 7185 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
7106 ixgbe_driver_version);
7107 pr_info("%s\n", ixgbe_copyright); 7186 pr_info("%s\n", ixgbe_copyright);
7108 7187
7109#ifdef CONFIG_IXGBE_DCA 7188#ifdef CONFIG_IXGBE_DCA
@@ -7132,12 +7211,12 @@ static void __exit ixgbe_exit_module(void)
7132 7211
7133#ifdef CONFIG_IXGBE_DCA 7212#ifdef CONFIG_IXGBE_DCA
7134static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 7213static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
7135 void *p) 7214 void *p)
7136{ 7215{
7137 int ret_val; 7216 int ret_val;
7138 7217
7139 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 7218 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
7140 __ixgbe_notify_dca); 7219 __ixgbe_notify_dca);
7141 7220
7142 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 7221 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7143} 7222}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 9587d975d66c..d3cc6ce7c973 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -871,6 +871,8 @@
871#define IXGBE_RDRXCTL_MVMEN 0x00000020 871#define IXGBE_RDRXCTL_MVMEN 0x00000020
872#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ 872#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
873#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ 873#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
874#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
875#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
874 876
875/* RQTC Bit Masks and Shifts */ 877/* RQTC Bit Masks and Shifts */
876#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) 878#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 4680b069b84f..4cc817acfb62 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -330,10 +330,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
330{ 330{
331 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 331 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
332 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; 332 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
333 int i, err; 333 int i, err = 0;
334 u32 new_rx_count, new_tx_count; 334 u32 new_rx_count, new_tx_count;
335 bool need_tx_update = false;
336 bool need_rx_update = false;
337 335
338 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 336 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
339 return -EINVAL; 337 return -EINVAL;
@@ -355,89 +353,96 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
355 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 353 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
356 msleep(1); 354 msleep(1);
357 355
358 if (new_tx_count != adapter->tx_ring_count) { 356 /*
359 tx_ring = kcalloc(adapter->num_tx_queues, 357 * If the adapter isn't up and running then just set the
360 sizeof(struct ixgbevf_ring), GFP_KERNEL); 358 * new parameters and scurry for the exits.
361 if (!tx_ring) { 359 */
362 err = -ENOMEM; 360 if (!netif_running(adapter->netdev)) {
363 goto err_setup; 361 for (i = 0; i < adapter->num_tx_queues; i++)
364 } 362 adapter->tx_ring[i].count = new_tx_count;
365 memcpy(tx_ring, adapter->tx_ring, 363 for (i = 0; i < adapter->num_rx_queues; i++)
366 adapter->num_tx_queues * sizeof(struct ixgbevf_ring)); 364 adapter->rx_ring[i].count = new_rx_count;
367 for (i = 0; i < adapter->num_tx_queues; i++) { 365 adapter->tx_ring_count = new_tx_count;
368 tx_ring[i].count = new_tx_count; 366 adapter->rx_ring_count = new_rx_count;
369 err = ixgbevf_setup_tx_resources(adapter, 367 goto clear_reset;
370 &tx_ring[i]);
371 if (err) {
372 while (i) {
373 i--;
374 ixgbevf_free_tx_resources(adapter,
375 &tx_ring[i]);
376 }
377 kfree(tx_ring);
378 goto err_setup;
379 }
380 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
381 }
382 need_tx_update = true;
383 } 368 }
384 369
385 if (new_rx_count != adapter->rx_ring_count) { 370 tx_ring = kcalloc(adapter->num_tx_queues,
386 rx_ring = kcalloc(adapter->num_rx_queues, 371 sizeof(struct ixgbevf_ring), GFP_KERNEL);
387 sizeof(struct ixgbevf_ring), GFP_KERNEL); 372 if (!tx_ring) {
388 if ((!rx_ring) && (need_tx_update)) { 373 err = -ENOMEM;
389 err = -ENOMEM; 374 goto clear_reset;
390 goto err_rx_setup;
391 }
392 memcpy(rx_ring, adapter->rx_ring,
393 adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
394 for (i = 0; i < adapter->num_rx_queues; i++) {
395 rx_ring[i].count = new_rx_count;
396 err = ixgbevf_setup_rx_resources(adapter,
397 &rx_ring[i]);
398 if (err) {
399 while (i) {
400 i--;
401 ixgbevf_free_rx_resources(adapter,
402 &rx_ring[i]);
403 }
404 kfree(rx_ring);
405 goto err_rx_setup;
406 }
407 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
408 }
409 need_rx_update = true;
410 } 375 }
411 376
412err_rx_setup: 377 rx_ring = kcalloc(adapter->num_rx_queues,
413 /* if rings need to be updated, here's the place to do it in one shot */ 378 sizeof(struct ixgbevf_ring), GFP_KERNEL);
414 if (need_tx_update || need_rx_update) { 379 if (!rx_ring) {
415 if (netif_running(netdev)) 380 err = -ENOMEM;
416 ixgbevf_down(adapter); 381 goto err_rx_setup;
417 } 382 }
418 383
419 /* tx */ 384 ixgbevf_down(adapter);
420 if (need_tx_update) { 385
421 kfree(adapter->tx_ring); 386 memcpy(tx_ring, adapter->tx_ring,
422 adapter->tx_ring = tx_ring; 387 adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
423 tx_ring = NULL; 388 for (i = 0; i < adapter->num_tx_queues; i++) {
424 adapter->tx_ring_count = new_tx_count; 389 tx_ring[i].count = new_tx_count;
390 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
391 if (err) {
392 while (i) {
393 i--;
394 ixgbevf_free_tx_resources(adapter,
395 &tx_ring[i]);
396 }
397 goto err_tx_ring_setup;
398 }
399 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
425 } 400 }
426 401
427 /* rx */ 402 memcpy(rx_ring, adapter->rx_ring,
428 if (need_rx_update) { 403 adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
429 kfree(adapter->rx_ring); 404 for (i = 0; i < adapter->num_rx_queues; i++) {
430 adapter->rx_ring = rx_ring; 405 rx_ring[i].count = new_rx_count;
431 rx_ring = NULL; 406 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
432 adapter->rx_ring_count = new_rx_count; 407 if (err) {
408 while (i) {
409 i--;
410 ixgbevf_free_rx_resources(adapter,
411 &rx_ring[i]);
412 }
413 goto err_rx_ring_setup;
414 }
415 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
433 } 416 }
434 417
418 /*
419 * Only switch to new rings if all the prior allocations
420 * and ring setups have succeeded.
421 */
422 kfree(adapter->tx_ring);
423 adapter->tx_ring = tx_ring;
424 adapter->tx_ring_count = new_tx_count;
425
426 kfree(adapter->rx_ring);
427 adapter->rx_ring = rx_ring;
428 adapter->rx_ring_count = new_rx_count;
429
435 /* success! */ 430 /* success! */
436 err = 0; 431 ixgbevf_up(adapter);
437 if (netif_running(netdev)) 432
438 ixgbevf_up(adapter); 433 goto clear_reset;
434
435err_rx_ring_setup:
436 for(i = 0; i < adapter->num_tx_queues; i++)
437 ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
438
439err_tx_ring_setup:
440 kfree(rx_ring);
441
442err_rx_setup:
443 kfree(tx_ring);
439 444
440err_setup: 445clear_reset:
441 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 446 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
442 return err; 447 return err;
443} 448}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index f7015efbff05..da4033c6efa2 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -243,7 +243,6 @@ struct ixgbevf_adapter {
243 /* OS defined structs */ 243 /* OS defined structs */
244 struct net_device *netdev; 244 struct net_device *netdev;
245 struct pci_dev *pdev; 245 struct pci_dev *pdev;
246 struct net_device_stats net_stats;
247 246
248 /* structs defined in ixgbe_vf.h */ 247 /* structs defined in ixgbe_vf.h */
249 struct ixgbe_hw hw; 248 struct ixgbe_hw hw;
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 918c00359b0a..0866a1cf4d7b 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -308,10 +308,10 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
308 tx_ring->total_bytes += total_bytes; 308 tx_ring->total_bytes += total_bytes;
309 tx_ring->total_packets += total_packets; 309 tx_ring->total_packets += total_packets;
310 310
311 adapter->net_stats.tx_bytes += total_bytes; 311 netdev->stats.tx_bytes += total_bytes;
312 adapter->net_stats.tx_packets += total_packets; 312 netdev->stats.tx_packets += total_packets;
313 313
314 return (count < tx_ring->work_limit); 314 return count < tx_ring->work_limit;
315} 315}
316 316
317/** 317/**
@@ -356,7 +356,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
356static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, 356static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
357 u32 status_err, struct sk_buff *skb) 357 u32 status_err, struct sk_buff *skb)
358{ 358{
359 skb->ip_summed = CHECKSUM_NONE; 359 skb_checksum_none_assert(skb);
360 360
361 /* Rx csum disabled */ 361 /* Rx csum disabled */
362 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 362 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -639,8 +639,8 @@ next_desc:
639 639
640 rx_ring->total_packets += total_rx_packets; 640 rx_ring->total_packets += total_rx_packets;
641 rx_ring->total_bytes += total_rx_bytes; 641 rx_ring->total_bytes += total_rx_bytes;
642 adapter->net_stats.rx_bytes += total_rx_bytes; 642 adapter->netdev->stats.rx_bytes += total_rx_bytes;
643 adapter->net_stats.rx_packets += total_rx_packets; 643 adapter->netdev->stats.rx_packets += total_rx_packets;
644 644
645 return cleaned; 645 return cleaned;
646} 646}
@@ -2297,7 +2297,7 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2297 adapter->stats.vfmprc); 2297 adapter->stats.vfmprc);
2298 2298
2299 /* Fill out the OS statistics structure */ 2299 /* Fill out the OS statistics structure */
2300 adapter->net_stats.multicast = adapter->stats.vfmprc - 2300 adapter->netdev->stats.multicast = adapter->stats.vfmprc -
2301 adapter->stats.base_vfmprc; 2301 adapter->stats.base_vfmprc;
2302} 2302}
2303 2303
@@ -3181,21 +3181,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3181} 3181}
3182 3182
3183/** 3183/**
3184 * ixgbevf_get_stats - Get System Network Statistics
3185 * @netdev: network interface device structure
3186 *
3187 * Returns the address of the device statistics structure.
3188 * The statistics are actually updated from the timer callback.
3189 **/
3190static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
3191{
3192 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3193
3194 /* only return the current stats */
3195 return &adapter->net_stats;
3196}
3197
3198/**
3199 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3184 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3200 * @netdev: network interface device structure 3185 * @netdev: network interface device structure
3201 * @p: pointer to an address structure 3186 * @p: pointer to an address structure
@@ -3272,7 +3257,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
3272 .ndo_open = &ixgbevf_open, 3257 .ndo_open = &ixgbevf_open,
3273 .ndo_stop = &ixgbevf_close, 3258 .ndo_stop = &ixgbevf_close,
3274 .ndo_start_xmit = &ixgbevf_xmit_frame, 3259 .ndo_start_xmit = &ixgbevf_xmit_frame,
3275 .ndo_get_stats = &ixgbevf_get_stats,
3276 .ndo_set_rx_mode = &ixgbevf_set_rx_mode, 3260 .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
3277 .ndo_set_multicast_list = &ixgbevf_set_rx_mode, 3261 .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
3278 .ndo_validate_addr = eth_validate_addr, 3262 .ndo_validate_addr = eth_validate_addr,
@@ -3426,7 +3410,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3426 } 3410 }
3427 3411
3428 init_timer(&adapter->watchdog_timer); 3412 init_timer(&adapter->watchdog_timer);
3429 adapter->watchdog_timer.function = &ixgbevf_watchdog; 3413 adapter->watchdog_timer.function = ixgbevf_watchdog;
3430 adapter->watchdog_timer.data = (unsigned long)adapter; 3414 adapter->watchdog_timer.data = (unsigned long)adapter;
3431 3415
3432 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3416 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 94b750b8874f..61f9dc831424 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -124,8 +124,6 @@ struct ixgbe_hw {
124 void *back; 124 void *back;
125 125
126 u8 __iomem *hw_addr; 126 u8 __iomem *hw_addr;
127 u8 *flash_address;
128 unsigned long io_base;
129 127
130 struct ixgbe_mac_info mac; 128 struct ixgbe_mac_info mac;
131 struct ixgbe_mbx_info mbx; 129 struct ixgbe_mbx_info mbx;
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 99f24f5cac53..c04c096bc6a9 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -21,6 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
24#include <linux/module.h> 26#include <linux/module.h>
25#include <linux/kernel.h> 27#include <linux/kernel.h>
26#include <linux/pci.h> 28#include <linux/pci.h>
@@ -73,7 +75,7 @@ read_again:
73 } 75 }
74 76
75 if (i == 0) { 77 if (i == 0) {
76 jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg); 78 pr_err("phy(%d) read timeout : %d\n", phy, reg);
77 return 0; 79 return 0;
78 } 80 }
79 81
@@ -102,7 +104,7 @@ jme_mdio_write(struct net_device *netdev,
102 } 104 }
103 105
104 if (i == 0) 106 if (i == 0)
105 jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg); 107 pr_err("phy(%d) write timeout : %d\n", phy, reg);
106} 108}
107 109
108static inline void 110static inline void
@@ -227,7 +229,7 @@ jme_reload_eeprom(struct jme_adapter *jme)
227 } 229 }
228 230
229 if (i == 0) { 231 if (i == 0) {
230 jeprintk(jme->pdev, "eeprom reload timeout\n"); 232 pr_err("eeprom reload timeout\n");
231 return -EIO; 233 return -EIO;
232 } 234 }
233 } 235 }
@@ -397,8 +399,7 @@ jme_check_link(struct net_device *netdev, int testonly)
397 phylink = jread32(jme, JME_PHY_LINK); 399 phylink = jread32(jme, JME_PHY_LINK);
398 } 400 }
399 if (!cnt) 401 if (!cnt)
400 jeprintk(jme->pdev, 402 pr_err("Waiting speed resolve timeout\n");
401 "Waiting speed resolve timeout.\n");
402 403
403 strcat(linkmsg, "ANed: "); 404 strcat(linkmsg, "ANed: ");
404 } 405 }
@@ -480,13 +481,13 @@ jme_check_link(struct net_device *netdev, int testonly)
480 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ? 481 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
481 "MDI-X" : 482 "MDI-X" :
482 "MDI"); 483 "MDI");
483 netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg); 484 netif_info(jme, link, jme->dev, "Link is up at %s\n", linkmsg);
484 netif_carrier_on(netdev); 485 netif_carrier_on(netdev);
485 } else { 486 } else {
486 if (testonly) 487 if (testonly)
487 goto out; 488 goto out;
488 489
489 netif_info(jme, link, jme->dev, "Link is down.\n"); 490 netif_info(jme, link, jme->dev, "Link is down\n");
490 jme->phylink = 0; 491 jme->phylink = 0;
491 netif_carrier_off(netdev); 492 netif_carrier_off(netdev);
492 } 493 }
@@ -648,7 +649,7 @@ jme_disable_tx_engine(struct jme_adapter *jme)
648 } 649 }
649 650
650 if (!i) 651 if (!i)
651 jeprintk(jme->pdev, "Disable TX engine timeout.\n"); 652 pr_err("Disable TX engine timeout\n");
652} 653}
653 654
654static void 655static void
@@ -867,7 +868,7 @@ jme_disable_rx_engine(struct jme_adapter *jme)
867 } 868 }
868 869
869 if (!i) 870 if (!i)
870 jeprintk(jme->pdev, "Disable RX engine timeout.\n"); 871 pr_err("Disable RX engine timeout\n");
871 872
872} 873}
873 874
@@ -887,13 +888,13 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
887 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) 888 if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
888 == RXWBFLAG_UDPON)) { 889 == RXWBFLAG_UDPON)) {
889 if (flags & RXWBFLAG_IPV4) 890 if (flags & RXWBFLAG_IPV4)
890 netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n"); 891 netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
891 return false; 892 return false;
892 } 893 }
893 894
894 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS)) 895 if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
895 == RXWBFLAG_IPV4)) { 896 == RXWBFLAG_IPV4)) {
896 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n"); 897 netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error\n");
897 return false; 898 return false;
898 } 899 }
899 900
@@ -936,7 +937,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
936 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) 937 if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
937 skb->ip_summed = CHECKSUM_UNNECESSARY; 938 skb->ip_summed = CHECKSUM_UNNECESSARY;
938 else 939 else
939 skb->ip_summed = CHECKSUM_NONE; 940 skb_checksum_none_assert(skb);
940 941
941 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) { 942 if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
942 if (jme->vlgrp) { 943 if (jme->vlgrp) {
@@ -1185,9 +1186,9 @@ jme_link_change_tasklet(unsigned long arg)
1185 1186
1186 while (!atomic_dec_and_test(&jme->link_changing)) { 1187 while (!atomic_dec_and_test(&jme->link_changing)) {
1187 atomic_inc(&jme->link_changing); 1188 atomic_inc(&jme->link_changing);
1188 netif_info(jme, intr, jme->dev, "Get link change lock failed.\n"); 1189 netif_info(jme, intr, jme->dev, "Get link change lock failed\n");
1189 while (atomic_read(&jme->link_changing) != 1) 1190 while (atomic_read(&jme->link_changing) != 1)
1190 netif_info(jme, intr, jme->dev, "Waiting link change lock.\n"); 1191 netif_info(jme, intr, jme->dev, "Waiting link change lock\n");
1191 } 1192 }
1192 1193
1193 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu) 1194 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
@@ -1221,15 +1222,13 @@ jme_link_change_tasklet(unsigned long arg)
1221 if (netif_carrier_ok(netdev)) { 1222 if (netif_carrier_ok(netdev)) {
1222 rc = jme_setup_rx_resources(jme); 1223 rc = jme_setup_rx_resources(jme);
1223 if (rc) { 1224 if (rc) {
1224 jeprintk(jme->pdev, "Allocating resources for RX error" 1225 pr_err("Allocating resources for RX error, Device STOPPED!\n");
1225 ", Device STOPPED!\n");
1226 goto out_enable_tasklet; 1226 goto out_enable_tasklet;
1227 } 1227 }
1228 1228
1229 rc = jme_setup_tx_resources(jme); 1229 rc = jme_setup_tx_resources(jme);
1230 if (rc) { 1230 if (rc) {
1231 jeprintk(jme->pdev, "Allocating resources for TX error" 1231 pr_err("Allocating resources for TX error, Device STOPPED!\n");
1232 ", Device STOPPED!\n");
1233 goto err_out_free_rx_resources; 1232 goto err_out_free_rx_resources;
1234 } 1233 }
1235 1234
@@ -1324,7 +1323,7 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
1324 smp_wmb(); 1323 smp_wmb();
1325 if (unlikely(netif_queue_stopped(jme->dev) && 1324 if (unlikely(netif_queue_stopped(jme->dev) &&
1326 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) { 1325 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1327 netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n"); 1326 netif_info(jme, tx_done, jme->dev, "TX Queue Waked\n");
1328 netif_wake_queue(jme->dev); 1327 netif_wake_queue(jme->dev);
1329 } 1328 }
1330 1329
@@ -1339,7 +1338,7 @@ jme_tx_clean_tasklet(unsigned long arg)
1339 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi; 1338 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1340 int i, j, cnt = 0, max, err, mask; 1339 int i, j, cnt = 0, max, err, mask;
1341 1340
1342 tx_dbg(jme, "Into txclean.\n"); 1341 tx_dbg(jme, "Into txclean\n");
1343 1342
1344 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning))) 1343 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1345 goto out; 1344 goto out;
@@ -1361,7 +1360,7 @@ jme_tx_clean_tasklet(unsigned long arg)
1361 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) { 1360 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1362 1361
1363 tx_dbg(jme, "txclean: %d+%d@%lu\n", 1362 tx_dbg(jme, "txclean: %d+%d@%lu\n",
1364 i, ctxbi->nr_desc, jiffies); 1363 i, ctxbi->nr_desc, jiffies);
1365 1364
1366 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR; 1365 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1367 1366
@@ -1402,7 +1401,7 @@ jme_tx_clean_tasklet(unsigned long arg)
1402 ctxbi->nr_desc = 0; 1401 ctxbi->nr_desc = 0;
1403 } 1402 }
1404 1403
1405 tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies); 1404 tx_dbg(jme, "txclean: done %d@%lu\n", i, jiffies);
1406 atomic_set(&txring->next_to_clean, i); 1405 atomic_set(&txring->next_to_clean, i);
1407 atomic_add(cnt, &txring->nr_free); 1406 atomic_add(cnt, &txring->nr_free);
1408 1407
@@ -1548,10 +1547,10 @@ jme_request_irq(struct jme_adapter *jme)
1548 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name, 1547 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1549 netdev); 1548 netdev);
1550 if (rc) { 1549 if (rc) {
1551 jeprintk(jme->pdev, 1550 netdev_err(netdev,
1552 "Unable to request %s interrupt (return: %d)\n", 1551 "Unable to request %s interrupt (return: %d)\n",
1553 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx", 1552 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
1554 rc); 1553 rc);
1555 1554
1556 if (test_bit(JME_FLAG_MSI, &jme->flags)) { 1555 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1557 pci_disable_msi(jme->pdev); 1556 pci_disable_msi(jme->pdev);
@@ -1834,7 +1833,7 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
1834 *flags |= TXFLAG_UDPCS; 1833 *flags |= TXFLAG_UDPCS;
1835 break; 1834 break;
1836 default: 1835 default:
1837 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n"); 1836 netif_err(jme, tx_err, jme->dev, "Error upper layer protocol\n");
1838 break; 1837 break;
1839 } 1838 }
1840 } 1839 }
@@ -1909,12 +1908,12 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
1909 smp_wmb(); 1908 smp_wmb();
1910 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) { 1909 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
1911 netif_stop_queue(jme->dev); 1910 netif_stop_queue(jme->dev);
1912 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n"); 1911 netif_info(jme, tx_queued, jme->dev, "TX Queue Paused\n");
1913 smp_wmb(); 1912 smp_wmb();
1914 if (atomic_read(&txring->nr_free) 1913 if (atomic_read(&txring->nr_free)
1915 >= (jme->tx_wake_threshold)) { 1914 >= (jme->tx_wake_threshold)) {
1916 netif_wake_queue(jme->dev); 1915 netif_wake_queue(jme->dev);
1917 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n"); 1916 netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked\n");
1918 } 1917 }
1919 } 1918 }
1920 1919
@@ -1922,7 +1921,8 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
1922 (jiffies - txbi->start_xmit) >= TX_TIMEOUT && 1921 (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
1923 txbi->skb)) { 1922 txbi->skb)) {
1924 netif_stop_queue(jme->dev); 1923 netif_stop_queue(jme->dev);
1925 netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies); 1924 netif_info(jme, tx_queued, jme->dev,
1925 "TX Queue Stopped %d@%lu\n", idx, jiffies);
1926 } 1926 }
1927} 1927}
1928 1928
@@ -1945,7 +1945,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1945 1945
1946 if (unlikely(idx < 0)) { 1946 if (unlikely(idx < 0)) {
1947 netif_stop_queue(netdev); 1947 netif_stop_queue(netdev);
1948 netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n"); 1948 netif_err(jme, tx_err, jme->dev,
1949 "BUG! Tx ring full when queue awake!\n");
1949 1950
1950 return NETDEV_TX_BUSY; 1951 return NETDEV_TX_BUSY;
1951 } 1952 }
@@ -1957,9 +1958,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1957 TXCS_QUEUE0S | 1958 TXCS_QUEUE0S |
1958 TXCS_ENABLE); 1959 TXCS_ENABLE);
1959 1960
1960 tx_dbg(jme, "xmit: %d+%d@%lu\n", idx, 1961 tx_dbg(jme, "xmit: %d+%d@%lu\n",
1961 skb_shinfo(skb)->nr_frags + 2, 1962 idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
1962 jiffies);
1963 jme_stop_queue_if_full(jme); 1963 jme_stop_queue_if_full(jme);
1964 1964
1965 return NETDEV_TX_OK; 1965 return NETDEV_TX_OK;
@@ -2501,7 +2501,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2501 val = jread32(jme, JME_SMBCSR); 2501 val = jread32(jme, JME_SMBCSR);
2502 } 2502 }
2503 if (!to) { 2503 if (!to) {
2504 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2504 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2505 return 0xFF; 2505 return 0xFF;
2506 } 2506 }
2507 2507
@@ -2517,7 +2517,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2517 val = jread32(jme, JME_SMBINTF); 2517 val = jread32(jme, JME_SMBINTF);
2518 } 2518 }
2519 if (!to) { 2519 if (!to) {
2520 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2520 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2521 return 0xFF; 2521 return 0xFF;
2522 } 2522 }
2523 2523
@@ -2537,7 +2537,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2537 val = jread32(jme, JME_SMBCSR); 2537 val = jread32(jme, JME_SMBCSR);
2538 } 2538 }
2539 if (!to) { 2539 if (!to) {
2540 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2540 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2541 return; 2541 return;
2542 } 2542 }
2543 2543
@@ -2554,7 +2554,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2554 val = jread32(jme, JME_SMBINTF); 2554 val = jread32(jme, JME_SMBINTF);
2555 } 2555 }
2556 if (!to) { 2556 if (!to) {
2557 netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n"); 2557 netif_err(jme, hw, jme->dev, "SMB Bus Busy\n");
2558 return; 2558 return;
2559 } 2559 }
2560 2560
@@ -2699,26 +2699,26 @@ jme_init_one(struct pci_dev *pdev,
2699 */ 2699 */
2700 rc = pci_enable_device(pdev); 2700 rc = pci_enable_device(pdev);
2701 if (rc) { 2701 if (rc) {
2702 jeprintk(pdev, "Cannot enable PCI device.\n"); 2702 pr_err("Cannot enable PCI device\n");
2703 goto err_out; 2703 goto err_out;
2704 } 2704 }
2705 2705
2706 using_dac = jme_pci_dma64(pdev); 2706 using_dac = jme_pci_dma64(pdev);
2707 if (using_dac < 0) { 2707 if (using_dac < 0) {
2708 jeprintk(pdev, "Cannot set PCI DMA Mask.\n"); 2708 pr_err("Cannot set PCI DMA Mask\n");
2709 rc = -EIO; 2709 rc = -EIO;
2710 goto err_out_disable_pdev; 2710 goto err_out_disable_pdev;
2711 } 2711 }
2712 2712
2713 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2713 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2714 jeprintk(pdev, "No PCI resource region found.\n"); 2714 pr_err("No PCI resource region found\n");
2715 rc = -ENOMEM; 2715 rc = -ENOMEM;
2716 goto err_out_disable_pdev; 2716 goto err_out_disable_pdev;
2717 } 2717 }
2718 2718
2719 rc = pci_request_regions(pdev, DRV_NAME); 2719 rc = pci_request_regions(pdev, DRV_NAME);
2720 if (rc) { 2720 if (rc) {
2721 jeprintk(pdev, "Cannot obtain PCI resource region.\n"); 2721 pr_err("Cannot obtain PCI resource region\n");
2722 goto err_out_disable_pdev; 2722 goto err_out_disable_pdev;
2723 } 2723 }
2724 2724
@@ -2729,7 +2729,7 @@ jme_init_one(struct pci_dev *pdev,
2729 */ 2729 */
2730 netdev = alloc_etherdev(sizeof(*jme)); 2730 netdev = alloc_etherdev(sizeof(*jme));
2731 if (!netdev) { 2731 if (!netdev) {
2732 jeprintk(pdev, "Cannot allocate netdev structure.\n"); 2732 pr_err("Cannot allocate netdev structure\n");
2733 rc = -ENOMEM; 2733 rc = -ENOMEM;
2734 goto err_out_release_regions; 2734 goto err_out_release_regions;
2735 } 2735 }
@@ -2767,7 +2767,7 @@ jme_init_one(struct pci_dev *pdev,
2767 jme->regs = ioremap(pci_resource_start(pdev, 0), 2767 jme->regs = ioremap(pci_resource_start(pdev, 0),
2768 pci_resource_len(pdev, 0)); 2768 pci_resource_len(pdev, 0));
2769 if (!(jme->regs)) { 2769 if (!(jme->regs)) {
2770 jeprintk(pdev, "Mapping PCI resource region error.\n"); 2770 pr_err("Mapping PCI resource region error\n");
2771 rc = -ENOMEM; 2771 rc = -ENOMEM;
2772 goto err_out_free_netdev; 2772 goto err_out_free_netdev;
2773 } 2773 }
@@ -2855,8 +2855,8 @@ jme_init_one(struct pci_dev *pdev,
2855 2855
2856 if (!jme->mii_if.phy_id) { 2856 if (!jme->mii_if.phy_id) {
2857 rc = -EIO; 2857 rc = -EIO;
2858 jeprintk(pdev, "Can not find phy_id.\n"); 2858 pr_err("Can not find phy_id\n");
2859 goto err_out_unmap; 2859 goto err_out_unmap;
2860 } 2860 }
2861 2861
2862 jme->reg_ghc |= GHC_LINK_POLL; 2862 jme->reg_ghc |= GHC_LINK_POLL;
@@ -2883,8 +2883,7 @@ jme_init_one(struct pci_dev *pdev,
2883 jme_reset_mac_processor(jme); 2883 jme_reset_mac_processor(jme);
2884 rc = jme_reload_eeprom(jme); 2884 rc = jme_reload_eeprom(jme);
2885 if (rc) { 2885 if (rc) {
2886 jeprintk(pdev, 2886 pr_err("Reload eeprom for reading MAC Address error\n");
2887 "Reload eeprom for reading MAC Address error.\n");
2888 goto err_out_unmap; 2887 goto err_out_unmap;
2889 } 2888 }
2890 jme_load_macaddr(netdev); 2889 jme_load_macaddr(netdev);
@@ -2900,7 +2899,7 @@ jme_init_one(struct pci_dev *pdev,
2900 */ 2899 */
2901 rc = register_netdev(netdev); 2900 rc = register_netdev(netdev);
2902 if (rc) { 2901 if (rc) {
2903 jeprintk(pdev, "Cannot register net device.\n"); 2902 pr_err("Cannot register net device\n");
2904 goto err_out_unmap; 2903 goto err_out_unmap;
2905 } 2904 }
2906 2905
@@ -3042,8 +3041,7 @@ static struct pci_driver jme_driver = {
3042static int __init 3041static int __init
3043jme_init_module(void) 3042jme_init_module(void)
3044{ 3043{
3045 printk(KERN_INFO PFX "JMicron JMC2XX ethernet " 3044 pr_info("JMicron JMC2XX ethernet driver version %s\n", DRV_VERSION);
3046 "driver version %s\n", DRV_VERSION);
3047 return pci_register_driver(&jme_driver); 3045 return pci_register_driver(&jme_driver);
3048} 3046}
3049 3047
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 07ad3a457185..1360f68861b8 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -41,9 +41,6 @@
41 NETIF_MSG_TX_ERR | \ 41 NETIF_MSG_TX_ERR | \
42 NETIF_MSG_HW) 42 NETIF_MSG_HW)
43 43
44#define jeprintk(pdev, fmt, args...) \
45 printk(KERN_ERR PFX fmt, ## args)
46
47#ifdef TX_DEBUG 44#ifdef TX_DEBUG
48#define tx_dbg(priv, fmt, args...) \ 45#define tx_dbg(priv, fmt, args...) \
49 printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args) 46 printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args)
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index bdf2149e5296..8762dcb84e8b 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -494,7 +494,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
494 lp->options |= options; 494 lp->options |= options;
495 mutex_unlock(&lp->indirect_mutex); 495 mutex_unlock(&lp->indirect_mutex);
496 496
497 return (0); 497 return 0;
498} 498}
499 499
500/* Initialize temac */ 500/* Initialize temac */
@@ -760,7 +760,7 @@ static void ll_temac_recv(struct net_device *ndev)
760 skb_put(skb, length); 760 skb_put(skb, length);
761 skb->dev = ndev; 761 skb->dev = ndev;
762 skb->protocol = eth_type_trans(skb, ndev); 762 skb->protocol = eth_type_trans(skb, ndev);
763 skb->ip_summed = CHECKSUM_NONE; 763 skb_checksum_none_assert(skb);
764 764
765 /* if we're doing rx csum offload, set it up */ 765 /* if we're doing rx csum offload, set it up */
766 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && 766 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 9a0996795321..4b0e30b564e5 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -74,7 +74,6 @@ struct pcpu_lstats {
74static netdev_tx_t loopback_xmit(struct sk_buff *skb, 74static netdev_tx_t loopback_xmit(struct sk_buff *skb,
75 struct net_device *dev) 75 struct net_device *dev)
76{ 76{
77 struct pcpu_lstats __percpu *pcpu_lstats;
78 struct pcpu_lstats *lb_stats; 77 struct pcpu_lstats *lb_stats;
79 int len; 78 int len;
80 79
@@ -83,8 +82,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
83 skb->protocol = eth_type_trans(skb, dev); 82 skb->protocol = eth_type_trans(skb, dev);
84 83
85 /* it's OK to use per_cpu_ptr() because BHs are off */ 84 /* it's OK to use per_cpu_ptr() because BHs are off */
86 pcpu_lstats = (void __percpu __force *)dev->ml_priv; 85 lb_stats = this_cpu_ptr(dev->lstats);
87 lb_stats = this_cpu_ptr(pcpu_lstats);
88 86
89 len = skb->len; 87 len = skb->len;
90 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { 88 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
@@ -101,19 +99,17 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
101static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev, 99static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
102 struct rtnl_link_stats64 *stats) 100 struct rtnl_link_stats64 *stats)
103{ 101{
104 const struct pcpu_lstats __percpu *pcpu_lstats;
105 u64 bytes = 0; 102 u64 bytes = 0;
106 u64 packets = 0; 103 u64 packets = 0;
107 u64 drops = 0; 104 u64 drops = 0;
108 int i; 105 int i;
109 106
110 pcpu_lstats = (void __percpu __force *)dev->ml_priv;
111 for_each_possible_cpu(i) { 107 for_each_possible_cpu(i) {
112 const struct pcpu_lstats *lb_stats; 108 const struct pcpu_lstats *lb_stats;
113 u64 tbytes, tpackets; 109 u64 tbytes, tpackets;
114 unsigned int start; 110 unsigned int start;
115 111
116 lb_stats = per_cpu_ptr(pcpu_lstats, i); 112 lb_stats = per_cpu_ptr(dev->lstats, i);
117 do { 113 do {
118 start = u64_stats_fetch_begin(&lb_stats->syncp); 114 start = u64_stats_fetch_begin(&lb_stats->syncp);
119 tbytes = lb_stats->bytes; 115 tbytes = lb_stats->bytes;
@@ -147,22 +143,16 @@ static const struct ethtool_ops loopback_ethtool_ops = {
147 143
148static int loopback_dev_init(struct net_device *dev) 144static int loopback_dev_init(struct net_device *dev)
149{ 145{
150 struct pcpu_lstats __percpu *lstats; 146 dev->lstats = alloc_percpu(struct pcpu_lstats);
151 147 if (!dev->lstats)
152 lstats = alloc_percpu(struct pcpu_lstats);
153 if (!lstats)
154 return -ENOMEM; 148 return -ENOMEM;
155 149
156 dev->ml_priv = (void __force *)lstats;
157 return 0; 150 return 0;
158} 151}
159 152
160static void loopback_dev_free(struct net_device *dev) 153static void loopback_dev_free(struct net_device *dev)
161{ 154{
162 struct pcpu_lstats __percpu *lstats = 155 free_percpu(dev->lstats);
163 (void __percpu __force *)dev->ml_priv;
164
165 free_percpu(lstats);
166 free_netdev(dev); 156 free_netdev(dev);
167} 157}
168 158
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 3df046a58b1d..3698824744cb 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -460,7 +460,7 @@ init_rx_bufs(struct net_device *dev, int num) {
460 } 460 }
461 lp->rbd_tail->next = rfd->rbd; 461 lp->rbd_tail->next = rfd->rbd;
462#endif 462#endif
463 return (i); 463 return i;
464} 464}
465 465
466static inline void 466static inline void
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 3832fa4961dd..f84f5e6ededb 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -562,19 +562,19 @@ static int __init mac8390_initdev(struct net_device *dev,
562 562
563 case ACCESS_16: 563 case ACCESS_16:
564 /* 16 bit card, register map is reversed */ 564 /* 16 bit card, register map is reversed */
565 ei_status.reset_8390 = &mac8390_no_reset; 565 ei_status.reset_8390 = mac8390_no_reset;
566 ei_status.block_input = &slow_sane_block_input; 566 ei_status.block_input = slow_sane_block_input;
567 ei_status.block_output = &slow_sane_block_output; 567 ei_status.block_output = slow_sane_block_output;
568 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 568 ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
569 ei_status.reg_offset = back4_offsets; 569 ei_status.reg_offset = back4_offsets;
570 break; 570 break;
571 571
572 case ACCESS_32: 572 case ACCESS_32:
573 /* 32 bit card, register map is reversed */ 573 /* 32 bit card, register map is reversed */
574 ei_status.reset_8390 = &mac8390_no_reset; 574 ei_status.reset_8390 = mac8390_no_reset;
575 ei_status.block_input = &sane_block_input; 575 ei_status.block_input = sane_block_input;
576 ei_status.block_output = &sane_block_output; 576 ei_status.block_output = sane_block_output;
577 ei_status.get_8390_hdr = &sane_get_8390_hdr; 577 ei_status.get_8390_hdr = sane_get_8390_hdr;
578 ei_status.reg_offset = back4_offsets; 578 ei_status.reg_offset = back4_offsets;
579 access_bitmode = 1; 579 access_bitmode = 1;
580 break; 580 break;
@@ -586,19 +586,19 @@ static int __init mac8390_initdev(struct net_device *dev,
586 * but overwrite system memory when run at 32 bit. 586 * but overwrite system memory when run at 32 bit.
587 * so we run them all at 16 bit. 587 * so we run them all at 16 bit.
588 */ 588 */
589 ei_status.reset_8390 = &mac8390_no_reset; 589 ei_status.reset_8390 = mac8390_no_reset;
590 ei_status.block_input = &slow_sane_block_input; 590 ei_status.block_input = slow_sane_block_input;
591 ei_status.block_output = &slow_sane_block_output; 591 ei_status.block_output = slow_sane_block_output;
592 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 592 ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
593 ei_status.reg_offset = back4_offsets; 593 ei_status.reg_offset = back4_offsets;
594 break; 594 break;
595 595
596 case MAC8390_CABLETRON: 596 case MAC8390_CABLETRON:
597 /* 16 bit card, register map is short forward */ 597 /* 16 bit card, register map is short forward */
598 ei_status.reset_8390 = &mac8390_no_reset; 598 ei_status.reset_8390 = mac8390_no_reset;
599 ei_status.block_input = &slow_sane_block_input; 599 ei_status.block_input = slow_sane_block_input;
600 ei_status.block_output = &slow_sane_block_output; 600 ei_status.block_output = slow_sane_block_output;
601 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 601 ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
602 ei_status.reg_offset = fwrd2_offsets; 602 ei_status.reg_offset = fwrd2_offsets;
603 break; 603 break;
604 604
@@ -606,19 +606,19 @@ static int __init mac8390_initdev(struct net_device *dev,
606 case MAC8390_KINETICS: 606 case MAC8390_KINETICS:
607 /* 16 bit memory, register map is forward */ 607 /* 16 bit memory, register map is forward */
608 /* dayna and similar */ 608 /* dayna and similar */
609 ei_status.reset_8390 = &mac8390_no_reset; 609 ei_status.reset_8390 = mac8390_no_reset;
610 ei_status.block_input = &dayna_block_input; 610 ei_status.block_input = dayna_block_input;
611 ei_status.block_output = &dayna_block_output; 611 ei_status.block_output = dayna_block_output;
612 ei_status.get_8390_hdr = &dayna_get_8390_hdr; 612 ei_status.get_8390_hdr = dayna_get_8390_hdr;
613 ei_status.reg_offset = fwrd4_offsets; 613 ei_status.reg_offset = fwrd4_offsets;
614 break; 614 break;
615 615
616 case MAC8390_INTERLAN: 616 case MAC8390_INTERLAN:
617 /* 16 bit memory, register map is forward */ 617 /* 16 bit memory, register map is forward */
618 ei_status.reset_8390 = &interlan_reset; 618 ei_status.reset_8390 = interlan_reset;
619 ei_status.block_input = &slow_sane_block_input; 619 ei_status.block_input = slow_sane_block_input;
620 ei_status.block_output = &slow_sane_block_output; 620 ei_status.block_output = slow_sane_block_output;
621 ei_status.get_8390_hdr = &slow_sane_get_8390_hdr; 621 ei_status.get_8390_hdr = slow_sane_get_8390_hdr;
622 ei_status.reg_offset = fwrd4_offsets; 622 ei_status.reg_offset = fwrd4_offsets;
623 break; 623 break;
624 624
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index ff2f158ab0b9..4297f6e8c4bc 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -407,7 +407,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
407 } 407 }
408 408
409 skb_reserve(skb, RX_OFFSET); 409 skb_reserve(skb, RX_OFFSET);
410 skb->ip_summed = CHECKSUM_NONE; 410 skb_checksum_none_assert(skb);
411 skb_put(skb, len); 411 skb_put(skb, len);
412 412
413 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 413 for (frag = first_frag; ; frag = NEXT_RX(frag)) {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0ef0eb0db945..0fc9dc7f20db 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -788,6 +788,10 @@ static int macvlan_device_event(struct notifier_block *unused,
788 } 788 }
789 break; 789 break;
790 case NETDEV_UNREGISTER: 790 case NETDEV_UNREGISTER:
791 /* twiddle thumbs on netns device moves */
792 if (dev->reg_state != NETREG_UNREGISTERING)
793 break;
794
791 list_for_each_entry_safe(vlan, next, &port->vlans, list) 795 list_for_each_entry_safe(vlan, next, &port->vlans, list)
792 vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL); 796 vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
793 break; 797 break;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3b1c54a9c6ef..42567279843e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -84,26 +84,45 @@ static const struct proto_ops macvtap_socket_ops;
84static DEFINE_SPINLOCK(macvtap_lock); 84static DEFINE_SPINLOCK(macvtap_lock);
85 85
86/* 86/*
87 * Choose the next free queue, for now there is only one 87 * get_slot: return a [unused/occupied] slot in vlan->taps[]:
88 * - if 'q' is NULL, return the first empty slot;
89 * - otherwise, return the slot this pointer occupies.
88 */ 90 */
91static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
92{
93 int i;
94
95 for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
96 if (rcu_dereference(vlan->taps[i]) == q)
97 return i;
98 }
99
100 /* Should never happen */
101 BUG_ON(1);
102}
103
89static int macvtap_set_queue(struct net_device *dev, struct file *file, 104static int macvtap_set_queue(struct net_device *dev, struct file *file,
90 struct macvtap_queue *q) 105 struct macvtap_queue *q)
91{ 106{
92 struct macvlan_dev *vlan = netdev_priv(dev); 107 struct macvlan_dev *vlan = netdev_priv(dev);
108 int index;
93 int err = -EBUSY; 109 int err = -EBUSY;
94 110
95 spin_lock(&macvtap_lock); 111 spin_lock(&macvtap_lock);
96 if (rcu_dereference(vlan->tap)) 112 if (vlan->numvtaps == MAX_MACVTAP_QUEUES)
97 goto out; 113 goto out;
98 114
99 err = 0; 115 err = 0;
116 index = get_slot(vlan, NULL);
100 rcu_assign_pointer(q->vlan, vlan); 117 rcu_assign_pointer(q->vlan, vlan);
101 rcu_assign_pointer(vlan->tap, q); 118 rcu_assign_pointer(vlan->taps[index], q);
102 sock_hold(&q->sk); 119 sock_hold(&q->sk);
103 120
104 q->file = file; 121 q->file = file;
105 file->private_data = q; 122 file->private_data = q;
106 123
124 vlan->numvtaps++;
125
107out: 126out:
108 spin_unlock(&macvtap_lock); 127 spin_unlock(&macvtap_lock);
109 return err; 128 return err;
@@ -124,9 +143,12 @@ static void macvtap_put_queue(struct macvtap_queue *q)
124 spin_lock(&macvtap_lock); 143 spin_lock(&macvtap_lock);
125 vlan = rcu_dereference(q->vlan); 144 vlan = rcu_dereference(q->vlan);
126 if (vlan) { 145 if (vlan) {
127 rcu_assign_pointer(vlan->tap, NULL); 146 int index = get_slot(vlan, q);
147
148 rcu_assign_pointer(vlan->taps[index], NULL);
128 rcu_assign_pointer(q->vlan, NULL); 149 rcu_assign_pointer(q->vlan, NULL);
129 sock_put(&q->sk); 150 sock_put(&q->sk);
151 --vlan->numvtaps;
130 } 152 }
131 153
132 spin_unlock(&macvtap_lock); 154 spin_unlock(&macvtap_lock);
@@ -136,39 +158,82 @@ static void macvtap_put_queue(struct macvtap_queue *q)
136} 158}
137 159
138/* 160/*
139 * Since we only support one queue, just dereference the pointer. 161 * Select a queue based on the rxq of the device on which this packet
162 * arrived. If the incoming device is not mq, calculate a flow hash
163 * to select a queue. If all fails, find the first available queue.
164 * Cache vlan->numvtaps since it can become zero during the execution
165 * of this function.
140 */ 166 */
141static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, 167static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
142 struct sk_buff *skb) 168 struct sk_buff *skb)
143{ 169{
144 struct macvlan_dev *vlan = netdev_priv(dev); 170 struct macvlan_dev *vlan = netdev_priv(dev);
171 struct macvtap_queue *tap = NULL;
172 int numvtaps = vlan->numvtaps;
173 __u32 rxq;
174
175 if (!numvtaps)
176 goto out;
177
178 if (likely(skb_rx_queue_recorded(skb))) {
179 rxq = skb_get_rx_queue(skb);
180
181 while (unlikely(rxq >= numvtaps))
182 rxq -= numvtaps;
183
184 tap = rcu_dereference(vlan->taps[rxq]);
185 if (tap)
186 goto out;
187 }
188
189 /* Check if we can use flow to select a queue */
190 rxq = skb_get_rxhash(skb);
191 if (rxq) {
192 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
193 if (tap)
194 goto out;
195 }
145 196
146 return rcu_dereference(vlan->tap); 197 /* Everything failed - find first available queue */
198 for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
199 tap = rcu_dereference(vlan->taps[rxq]);
200 if (tap)
201 break;
202 }
203
204out:
205 return tap;
147} 206}
148 207
149/* 208/*
150 * The net_device is going away, give up the reference 209 * The net_device is going away, give up the reference
151 * that it holds on the queue (all the queues one day) 210 * that it holds on all queues and safely set the pointer
152 * and safely set the pointer from the queues to NULL. 211 * from the queues to NULL.
153 */ 212 */
154static void macvtap_del_queues(struct net_device *dev) 213static void macvtap_del_queues(struct net_device *dev)
155{ 214{
156 struct macvlan_dev *vlan = netdev_priv(dev); 215 struct macvlan_dev *vlan = netdev_priv(dev);
157 struct macvtap_queue *q; 216 struct macvtap_queue *q, *qlist[MAX_MACVTAP_QUEUES];
217 int i, j = 0;
158 218
219 /* macvtap_put_queue can free some slots, so go through all slots */
159 spin_lock(&macvtap_lock); 220 spin_lock(&macvtap_lock);
160 q = rcu_dereference(vlan->tap); 221 for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
161 if (!q) { 222 q = rcu_dereference(vlan->taps[i]);
162 spin_unlock(&macvtap_lock); 223 if (q) {
163 return; 224 qlist[j++] = q;
225 rcu_assign_pointer(vlan->taps[i], NULL);
226 rcu_assign_pointer(q->vlan, NULL);
227 vlan->numvtaps--;
228 }
164 } 229 }
165 230 BUG_ON(vlan->numvtaps != 0);
166 rcu_assign_pointer(vlan->tap, NULL);
167 rcu_assign_pointer(q->vlan, NULL);
168 spin_unlock(&macvtap_lock); 231 spin_unlock(&macvtap_lock);
169 232
170 synchronize_rcu(); 233 synchronize_rcu();
171 sock_put(&q->sk); 234
235 for (--j; j >= 0; j--)
236 sock_put(&qlist[j]->sk);
172} 237}
173 238
174/* 239/*
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 42e3294671d7..60135aa55802 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -461,7 +461,7 @@ static int meth_tx_full(struct net_device *dev)
461{ 461{
462 struct meth_private *priv = netdev_priv(dev); 462 struct meth_private *priv = netdev_priv(dev);
463 463
464 return (priv->tx_count >= TX_RING_ENTRIES - 1); 464 return priv->tx_count >= TX_RING_ENTRIES - 1;
465} 465}
466 466
467static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) 467static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 1fd068e1d930..d1aa45a15854 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -6,4 +6,4 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o 6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ 8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o 9 en_resources.o en_netdev.o en_selftest.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8c8515619b8e..8f4bf1f07c11 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -74,7 +74,7 @@ void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
74 74
75u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align) 75u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
76{ 76{
77 u32 obj, i; 77 u32 obj;
78 78
79 if (likely(cnt == 1 && align == 1)) 79 if (likely(cnt == 1 && align == 1))
80 return mlx4_bitmap_alloc(bitmap); 80 return mlx4_bitmap_alloc(bitmap);
@@ -91,8 +91,7 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
91 } 91 }
92 92
93 if (obj < bitmap->max) { 93 if (obj < bitmap->max) {
94 for (i = 0; i < cnt; i++) 94 bitmap_set(bitmap->table, obj, cnt);
95 set_bit(obj + i, bitmap->table);
96 if (obj == bitmap->last) { 95 if (obj == bitmap->last) {
97 bitmap->last = (obj + cnt); 96 bitmap->last = (obj + cnt);
98 if (bitmap->last >= bitmap->max) 97 if (bitmap->last >= bitmap->max)
@@ -109,13 +108,10 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
109 108
110void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) 109void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
111{ 110{
112 u32 i;
113
114 obj &= bitmap->max + bitmap->reserved_top - 1; 111 obj &= bitmap->max + bitmap->reserved_top - 1;
115 112
116 spin_lock(&bitmap->lock); 113 spin_lock(&bitmap->lock);
117 for (i = 0; i < cnt; i++) 114 bitmap_clear(bitmap->table, obj, cnt);
118 clear_bit(obj + i, bitmap->table);
119 bitmap->last = min(bitmap->last, obj); 115 bitmap->last = min(bitmap->last, obj);
120 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 116 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
121 & bitmap->mask; 117 & bitmap->mask;
@@ -125,8 +121,6 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
125int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 121int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
126 u32 reserved_bot, u32 reserved_top) 122 u32 reserved_bot, u32 reserved_top)
127{ 123{
128 int i;
129
130 /* num must be a power of 2 */ 124 /* num must be a power of 2 */
131 if (num != roundup_pow_of_two(num)) 125 if (num != roundup_pow_of_two(num))
132 return -EINVAL; 126 return -EINVAL;
@@ -142,8 +136,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
142 if (!bitmap->table) 136 if (!bitmap->table)
143 return -ENOMEM; 137 return -ENOMEM;
144 138
145 for (i = 0; i < reserved_bot; ++i) 139 bitmap_set(bitmap->table, 0, reserved_bot);
146 set_bit(i, bitmap->table);
147 140
148 return 0; 141 return 0;
149} 142}
@@ -188,7 +181,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
188 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 181 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
189 buf->npages = buf->nbufs; 182 buf->npages = buf->nbufs;
190 buf->page_shift = PAGE_SHIFT; 183 buf->page_shift = PAGE_SHIFT;
191 buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list, 184 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
192 GFP_KERNEL); 185 GFP_KERNEL);
193 if (!buf->page_list) 186 if (!buf->page_list)
194 return -ENOMEM; 187 return -ENOMEM;
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index b275238fe70d..056152b3ff58 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -39,21 +39,6 @@
39#include "en_port.h" 39#include "en_port.h"
40 40
41 41
42static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
43{
44 int i;
45
46 priv->port_stats.lro_aggregated = 0;
47 priv->port_stats.lro_flushed = 0;
48 priv->port_stats.lro_no_desc = 0;
49
50 for (i = 0; i < priv->rx_ring_num; i++) {
51 priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
52 priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
53 priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
54 }
55}
56
57static void 42static void
58mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) 43mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
59{ 44{
@@ -112,7 +97,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
112 "tx_heartbeat_errors", "tx_window_errors", 97 "tx_heartbeat_errors", "tx_window_errors",
113 98
114 /* port statistics */ 99 /* port statistics */
115 "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets", 100 "tso_packets",
116 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed", 101 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
117 "rx_csum_good", "rx_csum_none", "tx_chksum_offload", 102 "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
118 103
@@ -125,6 +110,14 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
125#define NUM_MAIN_STATS 21 110#define NUM_MAIN_STATS 21
126#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) 111#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
127 112
113static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
114 "Interupt Test",
115 "Link Test",
116 "Speed Test",
117 "Register Test",
118 "Loopback Test",
119};
120
128static u32 mlx4_en_get_msglevel(struct net_device *dev) 121static u32 mlx4_en_get_msglevel(struct net_device *dev)
129{ 122{
130 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable; 123 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
@@ -146,10 +139,15 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
146{ 139{
147 struct mlx4_en_priv *priv = netdev_priv(dev); 140 struct mlx4_en_priv *priv = netdev_priv(dev);
148 141
149 if (sset != ETH_SS_STATS) 142 switch (sset) {
143 case ETH_SS_STATS:
144 return NUM_ALL_STATS +
145 (priv->tx_ring_num + priv->rx_ring_num) * 2;
146 case ETH_SS_TEST:
147 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.loopback_support) * 2;
148 default:
150 return -EOPNOTSUPP; 149 return -EOPNOTSUPP;
151 150 }
152 return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
153} 151}
154 152
155static void mlx4_en_get_ethtool_stats(struct net_device *dev, 153static void mlx4_en_get_ethtool_stats(struct net_device *dev,
@@ -161,8 +159,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
161 159
162 spin_lock_bh(&priv->stats_lock); 160 spin_lock_bh(&priv->stats_lock);
163 161
164 mlx4_en_update_lro_stats(priv);
165
166 for (i = 0; i < NUM_MAIN_STATS; i++) 162 for (i = 0; i < NUM_MAIN_STATS; i++)
167 data[index++] = ((unsigned long *) &priv->stats)[i]; 163 data[index++] = ((unsigned long *) &priv->stats)[i];
168 for (i = 0; i < NUM_PORT_STATS; i++) 164 for (i = 0; i < NUM_PORT_STATS; i++)
@@ -181,6 +177,12 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
181 177
182} 178}
183 179
180static void mlx4_en_self_test(struct net_device *dev,
181 struct ethtool_test *etest, u64 *buf)
182{
183 mlx4_en_ex_selftest(dev, &etest->flags, buf);
184}
185
184static void mlx4_en_get_strings(struct net_device *dev, 186static void mlx4_en_get_strings(struct net_device *dev,
185 uint32_t stringset, uint8_t *data) 187 uint32_t stringset, uint8_t *data)
186{ 188{
@@ -188,44 +190,76 @@ static void mlx4_en_get_strings(struct net_device *dev,
188 int index = 0; 190 int index = 0;
189 int i; 191 int i;
190 192
191 if (stringset != ETH_SS_STATS) 193 switch (stringset) {
192 return; 194 case ETH_SS_TEST:
193 195 for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
194 /* Add main counters */ 196 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
195 for (i = 0; i < NUM_MAIN_STATS; i++) 197 if (priv->mdev->dev->caps.loopback_support)
196 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]); 198 for (; i < MLX4_EN_NUM_SELF_TEST; i++)
197 for (i = 0; i < NUM_PORT_STATS; i++) 199 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
198 strcpy(data + (index++) * ETH_GSTRING_LEN, 200 break;
201
202 case ETH_SS_STATS:
203 /* Add main counters */
204 for (i = 0; i < NUM_MAIN_STATS; i++)
205 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
206 for (i = 0; i< NUM_PORT_STATS; i++)
207 strcpy(data + (index++) * ETH_GSTRING_LEN,
199 main_strings[i + NUM_MAIN_STATS]); 208 main_strings[i + NUM_MAIN_STATS]);
200 for (i = 0; i < priv->tx_ring_num; i++) { 209 for (i = 0; i < priv->tx_ring_num; i++) {
201 sprintf(data + (index++) * ETH_GSTRING_LEN, 210 sprintf(data + (index++) * ETH_GSTRING_LEN,
202 "tx%d_packets", i); 211 "tx%d_packets", i);
203 sprintf(data + (index++) * ETH_GSTRING_LEN, 212 sprintf(data + (index++) * ETH_GSTRING_LEN,
204 "tx%d_bytes", i); 213 "tx%d_bytes", i);
205 } 214 }
206 for (i = 0; i < priv->rx_ring_num; i++) { 215 for (i = 0; i < priv->rx_ring_num; i++) {
207 sprintf(data + (index++) * ETH_GSTRING_LEN, 216 sprintf(data + (index++) * ETH_GSTRING_LEN,
208 "rx%d_packets", i); 217 "rx%d_packets", i);
209 sprintf(data + (index++) * ETH_GSTRING_LEN, 218 sprintf(data + (index++) * ETH_GSTRING_LEN,
210 "rx%d_bytes", i); 219 "rx%d_bytes", i);
211 } 220 }
212 for (i = 0; i < NUM_PKT_STATS; i++) 221 for (i = 0; i< NUM_PKT_STATS; i++)
213 strcpy(data + (index++) * ETH_GSTRING_LEN, 222 strcpy(data + (index++) * ETH_GSTRING_LEN,
214 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]); 223 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
224 break;
225 }
215} 226}
216 227
217static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 228static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
218{ 229{
230 struct mlx4_en_priv *priv = netdev_priv(dev);
231 int trans_type;
232
219 cmd->autoneg = AUTONEG_DISABLE; 233 cmd->autoneg = AUTONEG_DISABLE;
220 cmd->supported = SUPPORTED_10000baseT_Full; 234 cmd->supported = SUPPORTED_10000baseT_Full;
221 cmd->advertising = ADVERTISED_1000baseT_Full; 235 cmd->advertising = ADVERTISED_10000baseT_Full;
236
237 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
238 return -ENOMEM;
239
240 trans_type = priv->port_state.transciver;
222 if (netif_carrier_ok(dev)) { 241 if (netif_carrier_ok(dev)) {
223 cmd->speed = SPEED_10000; 242 cmd->speed = priv->port_state.link_speed;
224 cmd->duplex = DUPLEX_FULL; 243 cmd->duplex = DUPLEX_FULL;
225 } else { 244 } else {
226 cmd->speed = -1; 245 cmd->speed = -1;
227 cmd->duplex = -1; 246 cmd->duplex = -1;
228 } 247 }
248
249 if (trans_type > 0 && trans_type <= 0xC) {
250 cmd->port = PORT_FIBRE;
251 cmd->transceiver = XCVR_EXTERNAL;
252 cmd->supported |= SUPPORTED_FIBRE;
253 cmd->advertising |= ADVERTISED_FIBRE;
254 } else if (trans_type == 0x80 || trans_type == 0) {
255 cmd->port = PORT_TP;
256 cmd->transceiver = XCVR_INTERNAL;
257 cmd->supported |= SUPPORTED_TP;
258 cmd->advertising |= ADVERTISED_TP;
259 } else {
260 cmd->port = -1;
261 cmd->transceiver = -1;
262 }
229 return 0; 263 return 0;
230} 264}
231 265
@@ -343,8 +377,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
343 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 377 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
344 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 378 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
345 379
346 if (rx_size == priv->prof->rx_ring_size && 380 if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
347 tx_size == priv->prof->tx_ring_size) 381 priv->rx_ring[0].size) &&
382 tx_size == priv->tx_ring[0].size)
348 return 0; 383 return 0;
349 384
350 mutex_lock(&mdev->state_lock); 385 mutex_lock(&mdev->state_lock);
@@ -378,49 +413,13 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
378 struct ethtool_ringparam *param) 413 struct ethtool_ringparam *param)
379{ 414{
380 struct mlx4_en_priv *priv = netdev_priv(dev); 415 struct mlx4_en_priv *priv = netdev_priv(dev);
381 struct mlx4_en_dev *mdev = priv->mdev;
382 416
383 memset(param, 0, sizeof(*param)); 417 memset(param, 0, sizeof(*param));
384 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; 418 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
385 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; 419 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
386 param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size; 420 param->rx_pending = priv->port_up ?
387 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size; 421 priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
388} 422 param->tx_pending = priv->tx_ring[0].size;
389
390static int mlx4_ethtool_op_set_flags(struct net_device *dev, u32 data)
391{
392 struct mlx4_en_priv *priv = netdev_priv(dev);
393 struct mlx4_en_dev *mdev = priv->mdev;
394 int rc = 0;
395 int changed = 0;
396
397 if (data & ~ETH_FLAG_LRO)
398 return -EOPNOTSUPP;
399
400 if (data & ETH_FLAG_LRO) {
401 if (mdev->profile.num_lro == 0)
402 return -EOPNOTSUPP;
403 if (!(dev->features & NETIF_F_LRO))
404 changed = 1;
405 } else if (dev->features & NETIF_F_LRO) {
406 changed = 1;
407 }
408
409 if (changed) {
410 if (netif_running(dev)) {
411 mutex_lock(&mdev->state_lock);
412 mlx4_en_stop_port(dev);
413 }
414 dev->features ^= NETIF_F_LRO;
415 if (netif_running(dev)) {
416 rc = mlx4_en_start_port(dev);
417 if (rc)
418 en_err(priv, "Failed to restart port\n");
419 mutex_unlock(&mdev->state_lock);
420 }
421 }
422
423 return rc;
424} 423}
425 424
426const struct ethtool_ops mlx4_en_ethtool_ops = { 425const struct ethtool_ops mlx4_en_ethtool_ops = {
@@ -441,6 +440,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
441 .get_strings = mlx4_en_get_strings, 440 .get_strings = mlx4_en_get_strings,
442 .get_sset_count = mlx4_en_get_sset_count, 441 .get_sset_count = mlx4_en_get_sset_count,
443 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 442 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
443 .self_test = mlx4_en_self_test,
444 .get_wol = mlx4_en_get_wol, 444 .get_wol = mlx4_en_get_wol,
445 .get_msglevel = mlx4_en_get_msglevel, 445 .get_msglevel = mlx4_en_get_msglevel,
446 .set_msglevel = mlx4_en_set_msglevel, 446 .set_msglevel = mlx4_en_set_msglevel,
@@ -451,7 +451,6 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
451 .get_ringparam = mlx4_en_get_ringparam, 451 .get_ringparam = mlx4_en_get_ringparam,
452 .set_ringparam = mlx4_en_set_ringparam, 452 .set_ringparam = mlx4_en_set_ringparam,
453 .get_flags = ethtool_op_get_flags, 453 .get_flags = ethtool_op_get_flags,
454 .set_flags = mlx4_ethtool_op_set_flags,
455}; 454};
456 455
457 456
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 97934f1ec53a..143906417048 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -63,15 +63,12 @@ static const char mlx4_en_version[] =
63 */ 63 */
64 64
65 65
66/* Use a XOR rathern than Toeplitz hash function for RSS */ 66/* Enable RSS TCP traffic */
67MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS"); 67MLX4_EN_PARM_INT(tcp_rss, 1,
68 68 "Enable RSS for incomming TCP traffic or disabled (0)");
69/* RSS hash type mask - default to <saddr, daddr, sport, dport> */ 69/* Enable RSS UDP traffic */
70MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask"); 70MLX4_EN_PARM_INT(udp_rss, 1,
71 71 "Enable RSS for incomming UDP traffic or disabled (0)");
72/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
73MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
74 "Number of LRO sessions per ring or disabled (0)");
75 72
76/* Priority pausing */ 73/* Priority pausing */
77MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." 74MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
@@ -107,9 +104,12 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
107 struct mlx4_en_profile *params = &mdev->profile; 104 struct mlx4_en_profile *params = &mdev->profile;
108 int i; 105 int i;
109 106
110 params->rss_xor = (rss_xor != 0); 107 params->tcp_rss = tcp_rss;
111 params->rss_mask = rss_mask & 0x1f; 108 params->udp_rss = udp_rss;
112 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS); 109 if (params->udp_rss && !mdev->dev->caps.udp_rss) {
110 mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
111 params->udp_rss = 0;
112 }
113 for (i = 1; i <= MLX4_MAX_PORTS; i++) { 113 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
114 params->prof[i].rx_pause = 1; 114 params->prof[i].rx_pause = 1;
115 params->prof[i].rx_ppp = pfcrx; 115 params->prof[i].rx_ppp = pfcrx;
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index a0d8a26f5a02..411bda581c04 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -109,7 +109,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
109 mutex_unlock(&mdev->state_lock); 109 mutex_unlock(&mdev->state_lock);
110} 110}
111 111
112static u64 mlx4_en_mac_to_u64(u8 *addr) 112u64 mlx4_en_mac_to_u64(u8 *addr)
113{ 113{
114 u64 mac = 0; 114 u64 mac = 0;
115 int i; 115 int i;
@@ -513,6 +513,10 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
513 513
514 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 514 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
515 } 515 }
516 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
517 queue_work(mdev->workqueue, &priv->mac_task);
518 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
519 }
516 mutex_unlock(&mdev->state_lock); 520 mutex_unlock(&mdev->state_lock);
517} 521}
518 522
@@ -528,10 +532,10 @@ static void mlx4_en_linkstate(struct work_struct *work)
528 * report to system log */ 532 * report to system log */
529 if (priv->last_link_state != linkstate) { 533 if (priv->last_link_state != linkstate) {
530 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 534 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
531 en_dbg(LINK, priv, "Link Down\n"); 535 en_info(priv, "Link Down\n");
532 netif_carrier_off(priv->dev); 536 netif_carrier_off(priv->dev);
533 } else { 537 } else {
534 en_dbg(LINK, priv, "Link Up\n"); 538 en_info(priv, "Link Up\n");
535 netif_carrier_on(priv->dev); 539 netif_carrier_on(priv->dev);
536 } 540 }
537 } 541 }
@@ -653,6 +657,7 @@ int mlx4_en_start_port(struct net_device *dev)
653 en_err(priv, "Failed setting port mac\n"); 657 en_err(priv, "Failed setting port mac\n");
654 goto tx_err; 658 goto tx_err;
655 } 659 }
660 mdev->mac_removed[priv->port] = 0;
656 661
657 /* Init port */ 662 /* Init port */
658 en_dbg(HW, priv, "Initializing port\n"); 663 en_dbg(HW, priv, "Initializing port\n");
@@ -704,12 +709,12 @@ void mlx4_en_stop_port(struct net_device *dev)
704 netif_tx_stop_all_queues(dev); 709 netif_tx_stop_all_queues(dev);
705 netif_tx_unlock_bh(dev); 710 netif_tx_unlock_bh(dev);
706 711
707 /* close port*/ 712 /* Set port as not active */
708 priv->port_up = false; 713 priv->port_up = false;
709 mlx4_CLOSE_PORT(mdev->dev, priv->port);
710 714
711 /* Unregister Mac address for the port */ 715 /* Unregister Mac address for the port */
712 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 716 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
717 mdev->mac_removed[priv->port] = 1;
713 718
714 /* Free TX Rings */ 719 /* Free TX Rings */
715 for (i = 0; i < priv->tx_ring_num; i++) { 720 for (i = 0; i < priv->tx_ring_num; i++) {
@@ -731,6 +736,9 @@ void mlx4_en_stop_port(struct net_device *dev)
731 msleep(1); 736 msleep(1);
732 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); 737 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
733 } 738 }
739
740 /* close port*/
741 mlx4_CLOSE_PORT(mdev->dev, priv->port);
734} 742}
735 743
736static void mlx4_en_restart(struct work_struct *work) 744static void mlx4_en_restart(struct work_struct *work)
@@ -1023,9 +1031,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1023 1031
1024 /* Set defualt MAC */ 1032 /* Set defualt MAC */
1025 dev->addr_len = ETH_ALEN; 1033 dev->addr_len = ETH_ALEN;
1026 for (i = 0; i < ETH_ALEN; i++) 1034 for (i = 0; i < ETH_ALEN; i++) {
1027 dev->dev_addr[ETH_ALEN - 1 - i] = 1035 dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1028 (u8) (priv->mac >> (8 * i)); 1036 dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1037 }
1029 1038
1030 /* 1039 /*
1031 * Set driver features 1040 * Set driver features
@@ -1038,8 +1047,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1038 dev->features |= NETIF_F_HW_VLAN_TX | 1047 dev->features |= NETIF_F_HW_VLAN_TX |
1039 NETIF_F_HW_VLAN_RX | 1048 NETIF_F_HW_VLAN_RX |
1040 NETIF_F_HW_VLAN_FILTER; 1049 NETIF_F_HW_VLAN_FILTER;
1041 if (mdev->profile.num_lro) 1050 dev->features |= NETIF_F_GRO;
1042 dev->features |= NETIF_F_LRO;
1043 if (mdev->LSO_support) { 1051 if (mdev->LSO_support) {
1044 dev->features |= NETIF_F_TSO; 1052 dev->features |= NETIF_F_TSO;
1045 dev->features |= NETIF_F_TSO6; 1053 dev->features |= NETIF_F_TSO6;
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index a29abe845d2e..aa3ef2aee5bf 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -142,6 +142,38 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
142 return err; 142 return err;
143} 143}
144 144
145int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
146{
147 struct mlx4_en_query_port_context *qport_context;
148 struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
149 struct mlx4_en_port_state *state = &priv->port_state;
150 struct mlx4_cmd_mailbox *mailbox;
151 int err;
152
153 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
154 if (IS_ERR(mailbox))
155 return PTR_ERR(mailbox);
156 memset(mailbox->buf, 0, sizeof(*qport_context));
157 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
158 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
159 if (err)
160 goto out;
161 qport_context = mailbox->buf;
162
163 /* This command is always accessed from Ethtool context
164 * already synchronized, no need in locking */
165 state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
166 if ((qport_context->link_speed & MLX4_EN_SPEED_MASK) ==
167 MLX4_EN_1G_SPEED)
168 state->link_speed = 1000;
169 else
170 state->link_speed = 10000;
171 state->transciver = qport_context->transceiver;
172
173out:
174 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
175 return err;
176}
145 177
146int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) 178int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
147{ 179{
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index e6477f12beb5..f6511aa2b7df 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -84,6 +84,20 @@ enum {
84 MLX4_MCAST_ENABLE = 2, 84 MLX4_MCAST_ENABLE = 2,
85}; 85};
86 86
87struct mlx4_en_query_port_context {
88 u8 link_up;
89#define MLX4_EN_LINK_UP_MASK 0x80
90 u8 reserved;
91 __be16 mtu;
92 u8 reserved2;
93 u8 link_speed;
94#define MLX4_EN_SPEED_MASK 0x3
95#define MLX4_EN_1G_SPEED 0x2
96 u16 reserved3[5];
97 __be64 mac;
98 u8 transceiver;
99};
100
87 101
88struct mlx4_en_stat_out_mbox { 102struct mlx4_en_stat_out_mbox {
89 /* Received frames with a length of 64 octets */ 103 /* Received frames with a length of 64 octets */
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 8e2fcb7103c3..570f2508fb30 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -42,18 +42,6 @@
42#include "mlx4_en.h" 42#include "mlx4_en.h"
43 43
44 44
45static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
46 void **ip_hdr, void **tcpudp_hdr,
47 u64 *hdr_flags, void *priv)
48{
49 *mac_hdr = page_address(frags->page) + frags->page_offset;
50 *ip_hdr = *mac_hdr + ETH_HLEN;
51 *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
52 *hdr_flags = LRO_IPV4 | LRO_TCP;
53
54 return 0;
55}
56
57static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv, 45static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
58 struct mlx4_en_rx_desc *rx_desc, 46 struct mlx4_en_rx_desc *rx_desc,
59 struct skb_frag_struct *skb_frags, 47 struct skb_frag_struct *skb_frags,
@@ -251,7 +239,6 @@ reduce_rings:
251 ring->prod--; 239 ring->prod--;
252 mlx4_en_free_rx_desc(priv, ring, ring->actual_size); 240 mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
253 } 241 }
254 ring->size_mask = ring->actual_size - 1;
255 } 242 }
256 243
257 return 0; 244 return 0;
@@ -313,28 +300,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
313 } 300 }
314 ring->buf = ring->wqres.buf.direct.buf; 301 ring->buf = ring->wqres.buf.direct.buf;
315 302
316 /* Configure lro mngr */
317 memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
318 ring->lro.dev = priv->dev;
319 ring->lro.features = LRO_F_NAPI;
320 ring->lro.frag_align_pad = NET_IP_ALIGN;
321 ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
322 ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
323 ring->lro.max_desc = mdev->profile.num_lro;
324 ring->lro.max_aggr = MAX_SKB_FRAGS;
325 ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
326 sizeof(struct net_lro_desc),
327 GFP_KERNEL);
328 if (!ring->lro.lro_arr) {
329 en_err(priv, "Failed to allocate lro array\n");
330 goto err_map;
331 }
332 ring->lro.get_frag_header = mlx4_en_get_frag_header;
333
334 return 0; 303 return 0;
335 304
336err_map:
337 mlx4_en_unmap_buffer(&ring->wqres.buf);
338err_hwq: 305err_hwq:
339 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 306 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
340err_ring: 307err_ring:
@@ -389,6 +356,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
389 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 356 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
390 ring = &priv->rx_ring[ring_ind]; 357 ring = &priv->rx_ring[ring_ind];
391 358
359 ring->size_mask = ring->actual_size - 1;
392 mlx4_en_update_rx_prod_db(ring); 360 mlx4_en_update_rx_prod_db(ring);
393 } 361 }
394 362
@@ -412,7 +380,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
412{ 380{
413 struct mlx4_en_dev *mdev = priv->mdev; 381 struct mlx4_en_dev *mdev = priv->mdev;
414 382
415 kfree(ring->lro.lro_arr);
416 mlx4_en_unmap_buffer(&ring->wqres.buf); 383 mlx4_en_unmap_buffer(&ring->wqres.buf);
417 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE); 384 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
418 vfree(ring->rx_info); 385 vfree(ring->rx_info);
@@ -459,7 +426,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
459 goto fail; 426 goto fail;
460 427
461 /* Unmap buffer */ 428 /* Unmap buffer */
462 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, 429 pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size,
463 PCI_DMA_FROMDEVICE); 430 PCI_DMA_FROMDEVICE);
464 } 431 }
465 /* Adjust size of last fragment to match actual length */ 432 /* Adjust size of last fragment to match actual length */
@@ -541,6 +508,21 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
541 return skb; 508 return skb;
542} 509}
543 510
511static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
512{
513 int i;
514 int offset = ETH_HLEN;
515
516 for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
517 if (*(skb->data + offset) != (unsigned char) (i & 0xff))
518 goto out_loopback;
519 }
520 /* Loopback found */
521 priv->loopback_ok = 1;
522
523out_loopback:
524 dev_kfree_skb_any(skb);
525}
544 526
545int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 527int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
546{ 528{
@@ -548,7 +530,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
548 struct mlx4_cqe *cqe; 530 struct mlx4_cqe *cqe;
549 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 531 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
550 struct skb_frag_struct *skb_frags; 532 struct skb_frag_struct *skb_frags;
551 struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
552 struct mlx4_en_rx_desc *rx_desc; 533 struct mlx4_en_rx_desc *rx_desc;
553 struct sk_buff *skb; 534 struct sk_buff *skb;
554 int index; 535 int index;
@@ -608,37 +589,35 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
608 * - TCP/IP (v4) 589 * - TCP/IP (v4)
609 * - without IP options 590 * - without IP options
610 * - not an IP fragment */ 591 * - not an IP fragment */
611 if (mlx4_en_can_lro(cqe->status) && 592 if (dev->features & NETIF_F_GRO) {
612 dev->features & NETIF_F_LRO) { 593 struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
594 if (!gro_skb)
595 goto next;
613 596
614 nr = mlx4_en_complete_rx_desc( 597 nr = mlx4_en_complete_rx_desc(
615 priv, rx_desc, 598 priv, rx_desc,
616 skb_frags, lro_frags, 599 skb_frags, skb_shinfo(gro_skb)->frags,
617 ring->page_alloc, length); 600 ring->page_alloc, length);
618 if (!nr) 601 if (!nr)
619 goto next; 602 goto next;
620 603
604 skb_shinfo(gro_skb)->nr_frags = nr;
605 gro_skb->len = length;
606 gro_skb->data_len = length;
607 gro_skb->truesize += length;
608 gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
609
621 if (priv->vlgrp && (cqe->vlan_my_qpn & 610 if (priv->vlgrp && (cqe->vlan_my_qpn &
622 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) { 611 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)))
623 lro_vlan_hwaccel_receive_frags( 612 vlan_gro_frags(&cq->napi, priv->vlgrp, be16_to_cpu(cqe->sl_vid));
624 &ring->lro, lro_frags, 613 else
625 length, length, 614 napi_gro_frags(&cq->napi);
626 priv->vlgrp,
627 be16_to_cpu(cqe->sl_vid),
628 NULL, 0);
629 } else
630 lro_receive_frags(&ring->lro,
631 lro_frags,
632 length,
633 length,
634 NULL, 0);
635 615
636 goto next; 616 goto next;
637 } 617 }
638 618
639 /* LRO not possible, complete processing here */ 619 /* LRO not possible, complete processing here */
640 ip_summed = CHECKSUM_UNNECESSARY; 620 ip_summed = CHECKSUM_UNNECESSARY;
641 INC_PERF_COUNTER(priv->pstats.lro_misses);
642 } else { 621 } else {
643 ip_summed = CHECKSUM_NONE; 622 ip_summed = CHECKSUM_NONE;
644 priv->port_stats.rx_chksum_none++; 623 priv->port_stats.rx_chksum_none++;
@@ -655,6 +634,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
655 goto next; 634 goto next;
656 } 635 }
657 636
637 if (unlikely(priv->validate_loopback)) {
638 validate_loopback(priv, skb);
639 goto next;
640 }
641
658 skb->ip_summed = ip_summed; 642 skb->ip_summed = ip_summed;
659 skb->protocol = eth_type_trans(skb, dev); 643 skb->protocol = eth_type_trans(skb, dev);
660 skb_record_rx_queue(skb, cq->ring); 644 skb_record_rx_queue(skb, cq->ring);
@@ -674,14 +658,10 @@ next:
674 if (++polled == budget) { 658 if (++polled == budget) {
675 /* We are here because we reached the NAPI budget - 659 /* We are here because we reached the NAPI budget -
676 * flush only pending LRO sessions */ 660 * flush only pending LRO sessions */
677 lro_flush_all(&ring->lro);
678 goto out; 661 goto out;
679 } 662 }
680 } 663 }
681 664
682 /* If CQ is empty flush all LRO sessions unconditionally */
683 lro_flush_all(&ring->lro);
684
685out: 665out:
686 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); 666 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
687 mlx4_cq_set_ci(&cq->mcq); 667 mlx4_cq_set_ci(&cq->mcq);
@@ -816,7 +796,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
816 qp->event = mlx4_en_sqp_event; 796 qp->event = mlx4_en_sqp_event;
817 797
818 memset(context, 0, sizeof *context); 798 memset(context, 0, sizeof *context);
819 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 0, 0, 799 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
820 qpn, ring->cqn, context); 800 qpn, ring->cqn, context);
821 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 801 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
822 802
@@ -839,8 +819,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
839 struct mlx4_qp_context context; 819 struct mlx4_qp_context context;
840 struct mlx4_en_rss_context *rss_context; 820 struct mlx4_en_rss_context *rss_context;
841 void *ptr; 821 void *ptr;
842 int rss_xor = mdev->profile.rss_xor; 822 u8 rss_mask = 0x3f;
843 u8 rss_mask = mdev->profile.rss_mask;
844 int i, qpn; 823 int i, qpn;
845 int err = 0; 824 int err = 0;
846 int good_qps = 0; 825 int good_qps = 0;
@@ -886,9 +865,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
886 rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 | 865 rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
887 (rss_map->base_qpn)); 866 (rss_map->base_qpn));
888 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn); 867 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
889 rss_context->hash_fn = rss_xor & 0x3; 868 rss_context->flags = rss_mask;
890 rss_context->flags = rss_mask << 2;
891 869
870 if (priv->mdev->profile.udp_rss)
871 rss_context->base_qpn_udp = rss_context->default_qpn;
892 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, 872 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
893 &rss_map->indir_qp, &rss_map->indir_state); 873 &rss_map->indir_qp, &rss_map->indir_state);
894 if (err) 874 if (err)
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
new file mode 100644
index 000000000000..9c91a92da705
--- /dev/null
+++ b/drivers/net/mlx4/en_selftest.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/ethtool.h>
36#include <linux/netdevice.h>
37#include <linux/delay.h>
38#include <linux/mlx4/driver.h>
39
40#include "mlx4_en.h"
41
42
43static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
44{
45 return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
46 MLX4_CMD_TIME_CLASS_A);
47}
48
49static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
50{
51 struct sk_buff *skb;
52 struct ethhdr *ethh;
53 unsigned char *packet;
54 unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
55 unsigned int i;
56 int err;
57
58
59 /* build the pkt before xmit */
60 skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
61 if (!skb) {
62 en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
63 return -ENOMEM;
64 }
65 skb_reserve(skb, NET_IP_ALIGN);
66
67 ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
68 packet = (unsigned char *)skb_put(skb, packet_size);
69 memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
70 memset(ethh->h_source, 0, ETH_ALEN);
71 ethh->h_proto = htons(ETH_P_ARP);
72 skb_set_mac_header(skb, 0);
73 for (i = 0; i < packet_size; ++i) /* fill our packet */
74 packet[i] = (unsigned char)(i & 0xff);
75
76 /* xmit the pkt */
77 err = mlx4_en_xmit(skb, priv->dev);
78 return err;
79}
80
81static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
82{
83 u32 loopback_ok = 0;
84 int i;
85
86
87 priv->loopback_ok = 0;
88 priv->validate_loopback = 1;
89
90 /* xmit */
91 if (mlx4_en_test_loopback_xmit(priv)) {
92 en_err(priv, "Transmitting loopback packet failed\n");
93 goto mlx4_en_test_loopback_exit;
94 }
95
96 /* polling for result */
97 for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
98 msleep(MLX4_EN_LOOPBACK_TIMEOUT);
99 if (priv->loopback_ok) {
100 loopback_ok = 1;
101 break;
102 }
103 }
104 if (!loopback_ok)
105 en_err(priv, "Loopback packet didn't arrive\n");
106
107mlx4_en_test_loopback_exit:
108
109 priv->validate_loopback = 0;
110 return !loopback_ok;
111}
112
113
114static int mlx4_en_test_link(struct mlx4_en_priv *priv)
115{
116 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
117 return -ENOMEM;
118 if (priv->port_state.link_state == 1)
119 return 0;
120 else
121 return 1;
122}
123
124static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
125{
126
127 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
128 return -ENOMEM;
129
130 /* The device currently only supports 10G speed */
131 if (priv->port_state.link_speed != SPEED_10000)
132 return priv->port_state.link_speed;
133 return 0;
134}
135
136
137void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
138{
139 struct mlx4_en_priv *priv = netdev_priv(dev);
140 struct mlx4_en_dev *mdev = priv->mdev;
141 struct mlx4_en_tx_ring *tx_ring;
142 int i, carrier_ok;
143
144 memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
145
146 if (*flags & ETH_TEST_FL_OFFLINE) {
147 /* disable the interface */
148 carrier_ok = netif_carrier_ok(dev);
149
150 netif_carrier_off(dev);
151retry_tx:
152 /* Wait untill all tx queues are empty.
153 * there should not be any additional incoming traffic
154 * since we turned the carrier off */
155 msleep(200);
156 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
157 tx_ring = &priv->tx_ring[i];
158 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
159 goto retry_tx;
160 }
161
162 if (priv->mdev->dev->caps.loopback_support){
163 buf[3] = mlx4_en_test_registers(priv);
164 buf[4] = mlx4_en_test_loopback(priv);
165 }
166
167 if (carrier_ok)
168 netif_carrier_on(dev);
169
170 }
171 buf[0] = mlx4_test_interrupts(mdev->dev);
172 buf[1] = mlx4_en_test_link(priv);
173 buf[2] = mlx4_en_test_speed(priv);
174
175 for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
176 if (buf[i])
177 *flags |= ETH_TEST_FL_FAILED;
178 }
179}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 580968f304eb..98dd620042a8 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -38,6 +38,7 @@
38#include <linux/skbuff.h> 38#include <linux/skbuff.h>
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
41#include <linux/tcp.h>
41 42
42#include "mlx4_en.h" 43#include "mlx4_en.h"
43 44
@@ -600,6 +601,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
600 struct mlx4_wqe_data_seg *data; 601 struct mlx4_wqe_data_seg *data;
601 struct skb_frag_struct *frag; 602 struct skb_frag_struct *frag;
602 struct mlx4_en_tx_info *tx_info; 603 struct mlx4_en_tx_info *tx_info;
604 struct ethhdr *ethh;
605 u64 mac;
606 u32 mac_l, mac_h;
603 int tx_ind = 0; 607 int tx_ind = 0;
604 int nr_txbb; 608 int nr_txbb;
605 int desc_size; 609 int desc_size;
@@ -612,6 +616,9 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
612 int lso_header_size; 616 int lso_header_size;
613 void *fragptr; 617 void *fragptr;
614 618
619 if (!priv->port_up)
620 goto tx_drop;
621
615 real_size = get_real_size(skb, dev, &lso_header_size); 622 real_size = get_real_size(skb, dev, &lso_header_size);
616 if (unlikely(!real_size)) 623 if (unlikely(!real_size))
617 goto tx_drop; 624 goto tx_drop;
@@ -676,6 +683,19 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
676 priv->port_stats.tx_chksum_offload++; 683 priv->port_stats.tx_chksum_offload++;
677 } 684 }
678 685
686 if (unlikely(priv->validate_loopback)) {
687 /* Copy dst mac address to wqe */
688 skb_reset_mac_header(skb);
689 ethh = eth_hdr(skb);
690 if (ethh && ethh->h_dest) {
691 mac = mlx4_en_mac_to_u64(ethh->h_dest);
692 mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
693 mac_l = (u32) (mac & 0xffffffff);
694 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
695 tx_desc->ctrl.imm = cpu_to_be32(mac_l);
696 }
697 }
698
679 /* Handle LSO (TSO) packets */ 699 /* Handle LSO (TSO) packets */
680 if (lso_header_size) { 700 if (lso_header_size) {
681 /* Mark opcode as LSO */ 701 /* Mark opcode as LSO */
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 6d7b2bf210ce..552d0fce6f67 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -699,3 +699,47 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
699 699
700 kfree(priv->eq_table.uar_map); 700 kfree(priv->eq_table.uar_map);
701} 701}
702
703/* A test that verifies that we can accept interrupts on all
704 * the irq vectors of the device.
705 * Interrupts are checked using the NOP command.
706 */
707int mlx4_test_interrupts(struct mlx4_dev *dev)
708{
709 struct mlx4_priv *priv = mlx4_priv(dev);
710 int i;
711 int err;
712
713 err = mlx4_NOP(dev);
714 /* When not in MSI_X, there is only one irq to check */
715 if (!(dev->flags & MLX4_FLAG_MSI_X))
716 return err;
717
718 /* A loop over all completion vectors, for each vector we will check
719 * whether it works by mapping command completions to that vector
720 * and performing a NOP command
721 */
722 for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
723 /* Temporary use polling for command completions */
724 mlx4_cmd_use_polling(dev);
725
726 /* Map the new eq to handle all asyncronous events */
727 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
728 priv->eq_table.eq[i].eqn);
729 if (err) {
730 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
731 mlx4_cmd_use_events(dev);
732 break;
733 }
734
735 /* Go back to using events */
736 mlx4_cmd_use_events(dev);
737 err = mlx4_NOP(dev);
738 }
739
740 /* Return to default */
741 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
742 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
743 return err;
744}
745EXPORT_SYMBOL(mlx4_test_interrupts);
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 04f42ae1eda0..b716e1a1b298 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -141,6 +141,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
141 struct mlx4_cmd_mailbox *mailbox; 141 struct mlx4_cmd_mailbox *mailbox;
142 u32 *outbox; 142 u32 *outbox;
143 u8 field; 143 u8 field;
144 u32 field32;
144 u16 size; 145 u16 size;
145 u16 stat_rate; 146 u16 stat_rate;
146 int err; 147 int err;
@@ -178,6 +179,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
178#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 179#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
179#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 180#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
180#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 181#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
182#define QUERY_DEV_CAP_UDP_RSS_OFFSET 0x42
183#define QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET 0x43
181#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 184#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
182#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 185#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
183#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 186#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -268,6 +271,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
268 dev_cap->max_msg_sz = 1 << (field & 0x1f); 271 dev_cap->max_msg_sz = 1 << (field & 0x1f);
269 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 272 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
270 dev_cap->stat_rate_support = stat_rate; 273 dev_cap->stat_rate_support = stat_rate;
274 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
275 dev_cap->udp_rss = field & 0x1;
276 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
277 dev_cap->loopback_support = field & 0x1;
271 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 278 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
272 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 279 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
273 dev_cap->reserved_uars = field >> 4; 280 dev_cap->reserved_uars = field >> 4;
@@ -365,6 +372,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
365#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a 372#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
366#define QUERY_PORT_MAX_VL_OFFSET 0x0b 373#define QUERY_PORT_MAX_VL_OFFSET 0x0b
367#define QUERY_PORT_MAC_OFFSET 0x10 374#define QUERY_PORT_MAC_OFFSET 0x10
375#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
376#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
377#define QUERY_PORT_TRANS_CODE_OFFSET 0x20
368 378
369 for (i = 1; i <= dev_cap->num_ports; ++i) { 379 for (i = 1; i <= dev_cap->num_ports; ++i) {
370 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, 380 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
@@ -388,6 +398,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
388 dev_cap->log_max_vlans[i] = field >> 4; 398 dev_cap->log_max_vlans[i] = field >> 4;
389 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET); 399 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
390 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET); 400 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
401 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
402 dev_cap->trans_type[i] = field32 >> 24;
403 dev_cap->vendor_oui[i] = field32 & 0xffffff;
404 MLX4_GET(dev_cap->wavelength[i], outbox, QUERY_PORT_WAVELENGTH_OFFSET);
405 MLX4_GET(dev_cap->trans_code[i], outbox, QUERY_PORT_TRANS_CODE_OFFSET);
391 } 406 }
392 } 407 }
393 408
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 526d7f30c041..65cc72eb899d 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -73,7 +73,13 @@ struct mlx4_dev_cap {
73 int max_pkeys[MLX4_MAX_PORTS + 1]; 73 int max_pkeys[MLX4_MAX_PORTS + 1];
74 u64 def_mac[MLX4_MAX_PORTS + 1]; 74 u64 def_mac[MLX4_MAX_PORTS + 1];
75 u16 eth_mtu[MLX4_MAX_PORTS + 1]; 75 u16 eth_mtu[MLX4_MAX_PORTS + 1];
76 int trans_type[MLX4_MAX_PORTS + 1];
77 int vendor_oui[MLX4_MAX_PORTS + 1];
78 u16 wavelength[MLX4_MAX_PORTS + 1];
79 u64 trans_code[MLX4_MAX_PORTS + 1];
76 u16 stat_rate_support; 80 u16 stat_rate_support;
81 int udp_rss;
82 int loopback_support;
77 u32 flags; 83 u32 flags;
78 int reserved_uars; 84 int reserved_uars;
79 int uar_size; 85 int uar_size;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 5102ab1ac561..569fa3df381f 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -184,6 +184,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
184 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; 184 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
185 dev->caps.def_mac[i] = dev_cap->def_mac[i]; 185 dev->caps.def_mac[i] = dev_cap->def_mac[i];
186 dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; 186 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
187 dev->caps.trans_type[i] = dev_cap->trans_type[i];
188 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
189 dev->caps.wavelength[i] = dev_cap->wavelength[i];
190 dev->caps.trans_code[i] = dev_cap->trans_code[i];
187 } 191 }
188 192
189 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 193 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -221,6 +225,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
221 dev->caps.bmme_flags = dev_cap->bmme_flags; 225 dev->caps.bmme_flags = dev_cap->bmme_flags;
222 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 226 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
223 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 227 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
228 dev->caps.udp_rss = dev_cap->udp_rss;
229 dev->caps.loopback_support = dev_cap->loopback_support;
224 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 230 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
225 231
226 dev->caps.log_num_macs = log_num_mac; 232 dev->caps.log_num_macs = log_num_mac;
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 449210994ee9..1fc16ab7ad2f 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -38,19 +38,19 @@
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/netdevice.h> 40#include <linux/netdevice.h>
41#include <linux/inet_lro.h>
42 41
43#include <linux/mlx4/device.h> 42#include <linux/mlx4/device.h>
44#include <linux/mlx4/qp.h> 43#include <linux/mlx4/qp.h>
45#include <linux/mlx4/cq.h> 44#include <linux/mlx4/cq.h>
46#include <linux/mlx4/srq.h> 45#include <linux/mlx4/srq.h>
47#include <linux/mlx4/doorbell.h> 46#include <linux/mlx4/doorbell.h>
47#include <linux/mlx4/cmd.h>
48 48
49#include "en_port.h" 49#include "en_port.h"
50 50
51#define DRV_NAME "mlx4_en" 51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.4.1.1" 52#define DRV_VERSION "1.5.1.6"
53#define DRV_RELDATE "June 2009" 53#define DRV_RELDATE "August 2010"
54 54
55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
56 56
@@ -61,7 +61,6 @@
61 61
62#define MLX4_EN_PAGE_SHIFT 12 62#define MLX4_EN_PAGE_SHIFT 12
63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) 63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
64#define MAX_TX_RINGS 16
65#define MAX_RX_RINGS 16 64#define MAX_RX_RINGS 16
66#define TXBB_SIZE 64 65#define TXBB_SIZE 64
67#define HEADROOM (2048 / TXBB_SIZE + 1) 66#define HEADROOM (2048 / TXBB_SIZE + 1)
@@ -107,6 +106,7 @@ enum {
107#define MLX4_EN_SMALL_PKT_SIZE 64 106#define MLX4_EN_SMALL_PKT_SIZE 64
108#define MLX4_EN_NUM_TX_RINGS 8 107#define MLX4_EN_NUM_TX_RINGS 8
109#define MLX4_EN_NUM_PPP_RINGS 8 108#define MLX4_EN_NUM_PPP_RINGS 8
109#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
110#define MLX4_EN_DEF_TX_RING_SIZE 512 110#define MLX4_EN_DEF_TX_RING_SIZE 512
111#define MLX4_EN_DEF_RX_RING_SIZE 1024 111#define MLX4_EN_DEF_RX_RING_SIZE 1024
112 112
@@ -139,10 +139,14 @@ enum {
139 139
140#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) 140#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
141#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) 141#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
142#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
142 143
143#define MLX4_EN_MIN_MTU 46 144#define MLX4_EN_MIN_MTU 46
144#define ETH_BCAST 0xffffffffffffULL 145#define ETH_BCAST 0xffffffffffffULL
145 146
147#define MLX4_EN_LOOPBACK_RETRIES 5
148#define MLX4_EN_LOOPBACK_TIMEOUT 100
149
146#ifdef MLX4_EN_PERF_STAT 150#ifdef MLX4_EN_PERF_STAT
147/* Number of samples to 'average' */ 151/* Number of samples to 'average' */
148#define AVG_SIZE 128 152#define AVG_SIZE 128
@@ -249,7 +253,6 @@ struct mlx4_en_rx_desc {
249struct mlx4_en_rx_ring { 253struct mlx4_en_rx_ring {
250 struct mlx4_hwq_resources wqres; 254 struct mlx4_hwq_resources wqres;
251 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; 255 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
252 struct net_lro_mgr lro;
253 u32 size ; /* number of Rx descs*/ 256 u32 size ; /* number of Rx descs*/
254 u32 actual_size; 257 u32 actual_size;
255 u32 size_mask; 258 u32 size_mask;
@@ -313,7 +316,8 @@ struct mlx4_en_port_profile {
313 316
314struct mlx4_en_profile { 317struct mlx4_en_profile {
315 int rss_xor; 318 int rss_xor;
316 int num_lro; 319 int tcp_rss;
320 int udp_rss;
317 u8 rss_mask; 321 u8 rss_mask;
318 u32 active_ports; 322 u32 active_ports;
319 u32 small_pkt_int; 323 u32 small_pkt_int;
@@ -337,6 +341,7 @@ struct mlx4_en_dev {
337 struct mlx4_mr mr; 341 struct mlx4_mr mr;
338 u32 priv_pdn; 342 u32 priv_pdn;
339 spinlock_t uar_lock; 343 spinlock_t uar_lock;
344 u8 mac_removed[MLX4_MAX_PORTS + 1];
340}; 345};
341 346
342 347
@@ -355,6 +360,13 @@ struct mlx4_en_rss_context {
355 u8 hash_fn; 360 u8 hash_fn;
356 u8 flags; 361 u8 flags;
357 __be32 rss_key[10]; 362 __be32 rss_key[10];
363 __be32 base_qpn_udp;
364};
365
366struct mlx4_en_port_state {
367 int link_state;
368 int link_speed;
369 int transciver;
358}; 370};
359 371
360struct mlx4_en_pkt_stats { 372struct mlx4_en_pkt_stats {
@@ -365,9 +377,6 @@ struct mlx4_en_pkt_stats {
365}; 377};
366 378
367struct mlx4_en_port_stats { 379struct mlx4_en_port_stats {
368 unsigned long lro_aggregated;
369 unsigned long lro_flushed;
370 unsigned long lro_no_desc;
371 unsigned long tso_packets; 380 unsigned long tso_packets;
372 unsigned long queue_stopped; 381 unsigned long queue_stopped;
373 unsigned long wake_queue; 382 unsigned long wake_queue;
@@ -376,7 +385,7 @@ struct mlx4_en_port_stats {
376 unsigned long rx_chksum_good; 385 unsigned long rx_chksum_good;
377 unsigned long rx_chksum_none; 386 unsigned long rx_chksum_none;
378 unsigned long tx_chksum_offload; 387 unsigned long tx_chksum_offload;
379#define NUM_PORT_STATS 11 388#define NUM_PORT_STATS 8
380}; 389};
381 390
382struct mlx4_en_perf_stats { 391struct mlx4_en_perf_stats {
@@ -405,6 +414,7 @@ struct mlx4_en_priv {
405 struct vlan_group *vlgrp; 414 struct vlan_group *vlgrp;
406 struct net_device_stats stats; 415 struct net_device_stats stats;
407 struct net_device_stats ret_stats; 416 struct net_device_stats ret_stats;
417 struct mlx4_en_port_state port_state;
408 spinlock_t stats_lock; 418 spinlock_t stats_lock;
409 419
410 unsigned long last_moder_packets; 420 unsigned long last_moder_packets;
@@ -423,6 +433,8 @@ struct mlx4_en_priv {
423 u16 sample_interval; 433 u16 sample_interval;
424 u16 adaptive_rx_coal; 434 u16 adaptive_rx_coal;
425 u32 msg_enable; 435 u32 msg_enable;
436 u32 loopback_ok;
437 u32 validate_loopback;
426 438
427 struct mlx4_hwq_resources res; 439 struct mlx4_hwq_resources res;
428 int link_state; 440 int link_state;
@@ -531,6 +543,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
531 u8 promisc); 543 u8 promisc);
532 544
533int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); 545int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
546int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
547
548#define MLX4_EN_NUM_SELF_TEST 5
549void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
550u64 mlx4_en_mac_to_u64(u8 *addr);
534 551
535/* 552/*
536 * Globals 553 * Globals
@@ -555,6 +572,8 @@ do { \
555 en_print(KERN_WARNING, priv, format, ##arg) 572 en_print(KERN_WARNING, priv, format, ##arg)
556#define en_err(priv, format, arg...) \ 573#define en_err(priv, format, arg...) \
557 en_print(KERN_ERR, priv, format, ##arg) 574 en_print(KERN_ERR, priv, format, ##arg)
575#define en_info(priv, format, arg...) \
576 en_print(KERN_INFO, priv, format, ## arg)
558 577
559#define mlx4_err(mdev, format, arg...) \ 578#define mlx4_err(mdev, format, arg...) \
560 pr_err("%s %s: " format, DRV_NAME, \ 579 pr_err("%s %s: " format, DRV_NAME, \
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index 5caf0115fa5b..e749f82865fe 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -85,7 +85,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
85 struct mlx4_resource tmp; 85 struct mlx4_resource tmp;
86 int i, j; 86 int i, j;
87 87
88 profile = kzalloc(MLX4_RES_NUM * sizeof *profile, GFP_KERNEL); 88 profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
89 if (!profile) 89 if (!profile)
90 return -ENOMEM; 90 return -ENOMEM;
91 91
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index fb2c0927d3cc..4f3a3c0d6d08 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1555,12 +1555,12 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1555 * valid since MSI-X irqs are not shared */ 1555 * valid since MSI-X irqs are not shared */
1556 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { 1556 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
1557 napi_schedule(&ss->napi); 1557 napi_schedule(&ss->napi);
1558 return (IRQ_HANDLED); 1558 return IRQ_HANDLED;
1559 } 1559 }
1560 1560
1561 /* make sure it is our IRQ, and that the DMA has finished */ 1561 /* make sure it is our IRQ, and that the DMA has finished */
1562 if (unlikely(!stats->valid)) 1562 if (unlikely(!stats->valid))
1563 return (IRQ_NONE); 1563 return IRQ_NONE;
1564 1564
1565 /* low bit indicates receives are present, so schedule 1565 /* low bit indicates receives are present, so schedule
1566 * napi poll handler */ 1566 * napi poll handler */
@@ -1599,7 +1599,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1599 myri10ge_check_statblock(mgp); 1599 myri10ge_check_statblock(mgp);
1600 1600
1601 put_be32(htonl(3), ss->irq_claim + 1); 1601 put_be32(htonl(3), ss->irq_claim + 1);
1602 return (IRQ_HANDLED); 1602 return IRQ_HANDLED;
1603} 1603}
1604 1604
1605static int 1605static int
@@ -3753,8 +3753,8 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3753 * slices. We give up on MSI-X if we can only get a single 3753 * slices. We give up on MSI-X if we can only get a single
3754 * vector. */ 3754 * vector. */
3755 3755
3756 mgp->msix_vectors = kzalloc(mgp->num_slices * 3756 mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
3757 sizeof(*mgp->msix_vectors), GFP_KERNEL); 3757 GFP_KERNEL);
3758 if (mgp->msix_vectors == NULL) 3758 if (mgp->msix_vectors == NULL)
3759 goto disable_msix; 3759 goto disable_msix;
3760 for (i = 0; i < mgp->num_slices; i++) { 3760 for (i = 0; i < mgp->num_slices; i++) {
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 617f898ba5f0..4846e131a04e 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -735,7 +735,7 @@ static int myri_header(struct sk_buff *skb, struct net_device *dev,
735 int i; 735 int i;
736 for (i = 0; i < dev->addr_len; i++) 736 for (i = 0; i < dev->addr_len; i++)
737 eth->h_dest[i] = 0; 737 eth->h_dest[i] = 0;
738 return(dev->hard_header_len); 738 return dev->hard_header_len;
739 } 739 }
740 740
741 if (daddr) { 741 if (daddr) {
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index a6033d48b5cc..2fd39630b1e5 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1570,7 +1570,7 @@ static int netdev_open(struct net_device *dev)
1570 init_timer(&np->timer); 1570 init_timer(&np->timer);
1571 np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ); 1571 np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1572 np->timer.data = (unsigned long)dev; 1572 np->timer.data = (unsigned long)dev;
1573 np->timer.function = &netdev_timer; /* timer handler */ 1573 np->timer.function = netdev_timer; /* timer handler */
1574 add_timer(&np->timer); 1574 add_timer(&np->timer);
1575 1575
1576 return 0; 1576 return 0;
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index b075a35b85d4..a2d805aa75cd 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -346,7 +346,7 @@ static u32 netxen_decode_crb_addr(u32 addr)
346 if (pci_base == NETXEN_ADDR_ERROR) 346 if (pci_base == NETXEN_ADDR_ERROR)
347 return pci_base; 347 return pci_base;
348 else 348 else
349 return (pci_base + offset); 349 return pci_base + offset;
350} 350}
351 351
352#define NETXEN_MAX_ROM_WAIT_USEC 100 352#define NETXEN_MAX_ROM_WAIT_USEC 100
@@ -1789,7 +1789,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1789 done = (sw_consumer == hw_consumer); 1789 done = (sw_consumer == hw_consumer);
1790 spin_unlock(&adapter->tx_clean_lock); 1790 spin_unlock(&adapter->tx_clean_lock);
1791 1791
1792 return (done); 1792 return done;
1793} 1793}
1794 1794
1795void 1795void
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 73d314592230..2c6ceeb592b3 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -177,7 +177,7 @@ netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
177 177
178 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); 178 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
179 179
180 return (recv_ctx->sds_rings == NULL); 180 return recv_ctx->sds_rings == NULL;
181} 181}
182 182
183static void 183static void
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index fe6983af6918..4cd92421708d 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -283,7 +283,7 @@ static int niu_enable_interrupts(struct niu *np, int on)
283 283
284static u32 phy_encode(u32 type, int port) 284static u32 phy_encode(u32 type, int port)
285{ 285{
286 return (type << (port * 2)); 286 return type << (port * 2);
287} 287}
288 288
289static u32 phy_decode(u32 val, int port) 289static u32 phy_decode(u32 val, int port)
@@ -3043,8 +3043,7 @@ static int tcam_flush_all(struct niu *np)
3043 3043
3044static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) 3044static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
3045{ 3045{
3046 return ((u64)index | (num_entries == 1 ? 3046 return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
3047 HASH_TBL_ADDR_AUTOINC : 0));
3048} 3047}
3049 3048
3050#if 0 3049#if 0
@@ -3276,7 +3275,7 @@ static u16 tcam_get_index(struct niu *np, u16 idx)
3276 /* One entry reserved for IP fragment rule */ 3275 /* One entry reserved for IP fragment rule */
3277 if (idx >= (np->clas.tcam_sz - 1)) 3276 if (idx >= (np->clas.tcam_sz - 1))
3278 idx = 0; 3277 idx = 0;
3279 return (np->clas.tcam_top + ((idx+1) * np->parent->num_ports)); 3278 return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
3280} 3279}
3281 3280
3282static u16 tcam_get_size(struct niu *np) 3281static u16 tcam_get_size(struct niu *np)
@@ -3313,7 +3312,7 @@ static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
3313 a >>= PAGE_SHIFT; 3312 a >>= PAGE_SHIFT;
3314 a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); 3313 a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
3315 3314
3316 return (a & (MAX_RBR_RING_SIZE - 1)); 3315 return a & (MAX_RBR_RING_SIZE - 1);
3317} 3316}
3318 3317
3319static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, 3318static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
@@ -3484,7 +3483,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3484 RCR_ENTRY_ERROR))) 3483 RCR_ENTRY_ERROR)))
3485 skb->ip_summed = CHECKSUM_UNNECESSARY; 3484 skb->ip_summed = CHECKSUM_UNNECESSARY;
3486 else 3485 else
3487 skb->ip_summed = CHECKSUM_NONE; 3486 skb_checksum_none_assert(skb);
3488 } else if (!(val & RCR_ENTRY_MULTI)) 3487 } else if (!(val & RCR_ENTRY_MULTI))
3489 append_size = len - skb->len; 3488 append_size = len - skb->len;
3490 3489
@@ -4504,7 +4503,7 @@ static int niu_alloc_channels(struct niu *np)
4504 4503
4505 np->dev->real_num_tx_queues = np->num_tx_rings; 4504 np->dev->real_num_tx_queues = np->num_tx_rings;
4506 4505
4507 np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), 4506 np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
4508 GFP_KERNEL); 4507 GFP_KERNEL);
4509 err = -ENOMEM; 4508 err = -ENOMEM;
4510 if (!np->rx_rings) 4509 if (!np->rx_rings)
@@ -4538,7 +4537,7 @@ static int niu_alloc_channels(struct niu *np)
4538 return err; 4537 return err;
4539 } 4538 }
4540 4539
4541 np->tx_rings = kzalloc(np->num_tx_rings * sizeof(struct tx_ring_info), 4540 np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info),
4542 GFP_KERNEL); 4541 GFP_KERNEL);
4543 err = -ENOMEM; 4542 err = -ENOMEM;
4544 if (!np->tx_rings) 4543 if (!np->tx_rings)
@@ -7462,10 +7461,12 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
7462 if (fsp->flow_type == IP_USER_FLOW) { 7461 if (fsp->flow_type == IP_USER_FLOW) {
7463 int i; 7462 int i;
7464 int add_usr_cls = 0; 7463 int add_usr_cls = 0;
7465 int ipv6 = 0;
7466 struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; 7464 struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
7467 struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; 7465 struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
7468 7466
7467 if (uspec->ip_ver != ETH_RX_NFC_IP4)
7468 return -EINVAL;
7469
7469 niu_lock_parent(np, flags); 7470 niu_lock_parent(np, flags);
7470 7471
7471 for (i = 0; i < NIU_L3_PROG_CLS; i++) { 7472 for (i = 0; i < NIU_L3_PROG_CLS; i++) {
@@ -7494,9 +7495,7 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
7494 default: 7495 default:
7495 break; 7496 break;
7496 } 7497 }
7497 if (uspec->ip_ver == ETH_RX_NFC_IP6) 7498 ret = tcam_user_ip_class_set(np, class, 0,
7498 ipv6 = 1;
7499 ret = tcam_user_ip_class_set(np, class, ipv6,
7500 uspec->proto, 7499 uspec->proto,
7501 uspec->tos, 7500 uspec->tos,
7502 umask->tos); 7501 umask->tos);
@@ -7553,16 +7552,7 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
7553 ret = -EINVAL; 7552 ret = -EINVAL;
7554 goto out; 7553 goto out;
7555 case IP_USER_FLOW: 7554 case IP_USER_FLOW:
7556 if (fsp->h_u.usr_ip4_spec.ip_ver == ETH_RX_NFC_IP4) { 7555 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
7557 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table,
7558 class);
7559 } else {
7560 /* Not yet implemented */
7561 netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n",
7562 parent->index, __func__);
7563 ret = -EINVAL;
7564 goto out;
7565 }
7566 break; 7556 break;
7567 default: 7557 default:
7568 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", 7558 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
@@ -7805,11 +7795,11 @@ static int niu_get_sset_count(struct net_device *dev, int stringset)
7805 if (stringset != ETH_SS_STATS) 7795 if (stringset != ETH_SS_STATS)
7806 return -EINVAL; 7796 return -EINVAL;
7807 7797
7808 return ((np->flags & NIU_FLAGS_XMAC ? 7798 return (np->flags & NIU_FLAGS_XMAC ?
7809 NUM_XMAC_STAT_KEYS : 7799 NUM_XMAC_STAT_KEYS :
7810 NUM_BMAC_STAT_KEYS) + 7800 NUM_BMAC_STAT_KEYS) +
7811 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + 7801 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
7812 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS)); 7802 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
7813} 7803}
7814 7804
7815static void niu_get_ethtool_stats(struct net_device *dev, 7805static void niu_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 5a3488f76b38..3bbd0aab17e8 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -923,7 +923,7 @@ static void rx_irq(struct net_device *ndev)
923 if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) { 923 if ((extsts & 0x002a0000) && !(extsts & 0x00540000)) {
924 skb->ip_summed = CHECKSUM_UNNECESSARY; 924 skb->ip_summed = CHECKSUM_UNNECESSARY;
925 } else { 925 } else {
926 skb->ip_summed = CHECKSUM_NONE; 926 skb_checksum_none_assert(skb);
927 } 927 }
928 skb->protocol = eth_type_trans(skb, ndev); 928 skb->protocol = eth_type_trans(skb, ndev);
929#ifdef NS83820_VLAN_ACCEL_SUPPORT 929#ifdef NS83820_VLAN_ACCEL_SUPPORT
@@ -1246,7 +1246,6 @@ static int ns83820_get_settings(struct net_device *ndev,
1246{ 1246{
1247 struct ns83820 *dev = PRIV(ndev); 1247 struct ns83820 *dev = PRIV(ndev);
1248 u32 cfg, tanar, tbicr; 1248 u32 cfg, tanar, tbicr;
1249 int have_optical = 0;
1250 int fullduplex = 0; 1249 int fullduplex = 0;
1251 1250
1252 /* 1251 /*
@@ -1267,25 +1266,25 @@ static int ns83820_get_settings(struct net_device *ndev,
1267 tanar = readl(dev->base + TANAR); 1266 tanar = readl(dev->base + TANAR);
1268 tbicr = readl(dev->base + TBICR); 1267 tbicr = readl(dev->base + TBICR);
1269 1268
1270 if (dev->CFG_cache & CFG_TBI_EN) { 1269 fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
1271 /* we have an optical interface */
1272 have_optical = 1;
1273 fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
1274
1275 } else {
1276 /* We have copper */
1277 fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
1278 }
1279 1270
1280 cmd->supported = SUPPORTED_Autoneg; 1271 cmd->supported = SUPPORTED_Autoneg;
1281 1272
1282 /* we have optical interface */
1283 if (dev->CFG_cache & CFG_TBI_EN) { 1273 if (dev->CFG_cache & CFG_TBI_EN) {
1274 /* we have optical interface */
1284 cmd->supported |= SUPPORTED_1000baseT_Half | 1275 cmd->supported |= SUPPORTED_1000baseT_Half |
1285 SUPPORTED_1000baseT_Full | 1276 SUPPORTED_1000baseT_Full |
1286 SUPPORTED_FIBRE; 1277 SUPPORTED_FIBRE;
1287 cmd->port = PORT_FIBRE; 1278 cmd->port = PORT_FIBRE;
1288 } /* TODO: else copper related support */ 1279 } else {
1280 /* we have copper */
1281 cmd->supported |= SUPPORTED_10baseT_Half |
1282 SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half |
1283 SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half |
1284 SUPPORTED_1000baseT_Full |
1285 SUPPORTED_MII;
1286 cmd->port = PORT_MII;
1287 }
1289 1288
1290 cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF; 1289 cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
1291 switch (cfg / CFG_SPDSTS0 & 3) { 1290 switch (cfg / CFG_SPDSTS0 & 3) {
@@ -1299,7 +1298,8 @@ static int ns83820_get_settings(struct net_device *ndev,
1299 cmd->speed = SPEED_10; 1298 cmd->speed = SPEED_10;
1300 break; 1299 break;
1301 } 1300 }
1302 cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE) ? 1: 0; 1301 cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE)
1302 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1303 return 0; 1303 return 0;
1304} 1304}
1305 1305
@@ -1405,6 +1405,13 @@ static const struct ethtool_ops ops = {
1405 .get_link = ns83820_get_link 1405 .get_link = ns83820_get_link
1406}; 1406};
1407 1407
1408static inline void ns83820_disable_interrupts(struct ns83820 *dev)
1409{
1410 writel(0, dev->base + IMR);
1411 writel(0, dev->base + IER);
1412 readl(dev->base + IER);
1413}
1414
1408/* this function is called in irq context from the ISR */ 1415/* this function is called in irq context from the ISR */
1409static void ns83820_mib_isr(struct ns83820 *dev) 1416static void ns83820_mib_isr(struct ns83820 *dev)
1410{ 1417{
@@ -1557,10 +1564,7 @@ static int ns83820_stop(struct net_device *ndev)
1557 /* FIXME: protect against interrupt handler? */ 1564 /* FIXME: protect against interrupt handler? */
1558 del_timer_sync(&dev->tx_watchdog); 1565 del_timer_sync(&dev->tx_watchdog);
1559 1566
1560 /* disable interrupts */ 1567 ns83820_disable_interrupts(dev);
1561 writel(0, dev->base + IMR);
1562 writel(0, dev->base + IER);
1563 readl(dev->base + IER);
1564 1568
1565 dev->rx_info.up = 0; 1569 dev->rx_info.up = 0;
1566 synchronize_irq(dev->pci_dev->irq); 1570 synchronize_irq(dev->pci_dev->irq);
@@ -2023,10 +2027,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
2023 dev->tx_descs, (long)dev->tx_phy_descs, 2027 dev->tx_descs, (long)dev->tx_phy_descs,
2024 dev->rx_info.descs, (long)dev->rx_info.phy_descs); 2028 dev->rx_info.descs, (long)dev->rx_info.phy_descs);
2025 2029
2026 /* disable interrupts */ 2030 ns83820_disable_interrupts(dev);
2027 writel(0, dev->base + IMR);
2028 writel(0, dev->base + IER);
2029 readl(dev->base + IER);
2030 2031
2031 dev->IMR_cache = 0; 2032 dev->IMR_cache = 0;
2032 2033
@@ -2250,9 +2251,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
2250 return 0; 2251 return 0;
2251 2252
2252out_cleanup: 2253out_cleanup:
2253 writel(0, dev->base + IMR); /* paranoia */ 2254 ns83820_disable_interrupts(dev); /* paranoia */
2254 writel(0, dev->base + IER);
2255 readl(dev->base + IER);
2256out_free_irq: 2255out_free_irq:
2257 rtnl_unlock(); 2256 rtnl_unlock();
2258 free_irq(pci_dev->irq, ndev); 2257 free_irq(pci_dev->irq, ndev);
@@ -2277,9 +2276,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
2277 if (!ndev) /* paranoia */ 2276 if (!ndev) /* paranoia */
2278 return; 2277 return;
2279 2278
2280 writel(0, dev->base + IMR); /* paranoia */ 2279 ns83820_disable_interrupts(dev); /* paranoia */
2281 writel(0, dev->base + IER);
2282 readl(dev->base + IER);
2283 2280
2284 unregister_netdev(ndev); 2281 unregister_netdev(ndev);
2285 free_irq(dev->pci_dev->irq, ndev); 2282 free_irq(dev->pci_dev->irq, ndev);
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 8ab6ae0a6107..828e97cacdbf 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -808,7 +808,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
808 skb->csum = (macrx & XCT_MACRX_CSUM_M) >> 808 skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
809 XCT_MACRX_CSUM_S; 809 XCT_MACRX_CSUM_S;
810 } else 810 } else
811 skb->ip_summed = CHECKSUM_NONE; 811 skb_checksum_none_assert(skb);
812 812
813 packets++; 813 packets++;
814 tot_bytes += len; 814 tot_bytes += len;
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index fefa79e34b95..4825959a0efe 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -90,21 +90,6 @@ pasemi_mac_ethtool_set_settings(struct net_device *netdev,
90 return phy_ethtool_sset(phydev, cmd); 90 return phy_ethtool_sset(phydev, cmd);
91} 91}
92 92
93static void
94pasemi_mac_ethtool_get_drvinfo(struct net_device *netdev,
95 struct ethtool_drvinfo *drvinfo)
96{
97 struct pasemi_mac *mac;
98 mac = netdev_priv(netdev);
99
100 /* clear and fill out info */
101 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
102 strncpy(drvinfo->driver, "pasemi_mac", 12);
103 strcpy(drvinfo->version, "N/A");
104 strcpy(drvinfo->fw_version, "N/A");
105 strncpy(drvinfo->bus_info, pci_name(mac->pdev), 32);
106}
107
108static u32 93static u32
109pasemi_mac_ethtool_get_msglevel(struct net_device *netdev) 94pasemi_mac_ethtool_get_msglevel(struct net_device *netdev)
110{ 95{
@@ -164,7 +149,6 @@ static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset,
164const struct ethtool_ops pasemi_mac_ethtool_ops = { 149const struct ethtool_ops pasemi_mac_ethtool_ops = {
165 .get_settings = pasemi_mac_ethtool_get_settings, 150 .get_settings = pasemi_mac_ethtool_get_settings,
166 .set_settings = pasemi_mac_ethtool_set_settings, 151 .set_settings = pasemi_mac_ethtool_set_settings,
167 .get_drvinfo = pasemi_mac_ethtool_get_drvinfo,
168 .get_msglevel = pasemi_mac_ethtool_get_msglevel, 152 .get_msglevel = pasemi_mac_ethtool_get_msglevel,
169 .set_msglevel = pasemi_mac_ethtool_set_msglevel, 153 .set_msglevel = pasemi_mac_ethtool_set_msglevel,
170 .get_link = ethtool_op_get_link, 154 .get_link = ethtool_op_get_link,
diff --git a/drivers/net/pch_gbe/Makefile b/drivers/net/pch_gbe/Makefile
new file mode 100644
index 000000000000..31288d4ad248
--- /dev/null
+++ b/drivers/net/pch_gbe/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_PCH_GBE) += pch_gbe.o
2
3pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o
4pch_gbe-y += pch_gbe_api.o pch_gbe_main.o
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
new file mode 100644
index 000000000000..b925ab359fc3
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -0,0 +1,659 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#ifndef _PCH_GBE_H_
22#define _PCH_GBE_H_
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/mii.h>
27#include <linux/delay.h>
28#include <linux/pci.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/ethtool.h>
32#include <linux/vmalloc.h>
33#include <net/ip.h>
34
35/**
36 * pch_gbe_regs_mac_adr - Structure holding values of mac address registers
37 * @high Denotes the 1st to 4th byte from the initial of MAC address
38 * @low Denotes the 5th to 6th byte from the initial of MAC address
39 */
40struct pch_gbe_regs_mac_adr {
41 u32 high;
42 u32 low;
43};
44/**
45 * pch_udc_regs - Structure holding values of MAC registers
46 */
47struct pch_gbe_regs {
48 u32 INT_ST;
49 u32 INT_EN;
50 u32 MODE;
51 u32 RESET;
52 u32 TCPIP_ACC;
53 u32 EX_LIST;
54 u32 INT_ST_HOLD;
55 u32 PHY_INT_CTRL;
56 u32 MAC_RX_EN;
57 u32 RX_FCTRL;
58 u32 PAUSE_REQ;
59 u32 RX_MODE;
60 u32 TX_MODE;
61 u32 RX_FIFO_ST;
62 u32 TX_FIFO_ST;
63 u32 TX_FID;
64 u32 TX_RESULT;
65 u32 PAUSE_PKT1;
66 u32 PAUSE_PKT2;
67 u32 PAUSE_PKT3;
68 u32 PAUSE_PKT4;
69 u32 PAUSE_PKT5;
70 u32 reserve[2];
71 struct pch_gbe_regs_mac_adr mac_adr[16];
72 u32 ADDR_MASK;
73 u32 MIIM;
74 u32 reserve2;
75 u32 RGMII_ST;
76 u32 RGMII_CTRL;
77 u32 reserve3[3];
78 u32 DMA_CTRL;
79 u32 reserve4[3];
80 u32 RX_DSC_BASE;
81 u32 RX_DSC_SIZE;
82 u32 RX_DSC_HW_P;
83 u32 RX_DSC_HW_P_HLD;
84 u32 RX_DSC_SW_P;
85 u32 reserve5[3];
86 u32 TX_DSC_BASE;
87 u32 TX_DSC_SIZE;
88 u32 TX_DSC_HW_P;
89 u32 TX_DSC_HW_P_HLD;
90 u32 TX_DSC_SW_P;
91 u32 reserve6[3];
92 u32 RX_DMA_ST;
93 u32 TX_DMA_ST;
94 u32 reserve7[2];
95 u32 WOL_ST;
96 u32 WOL_CTRL;
97 u32 WOL_ADDR_MASK;
98};
99
100/* Interrupt Status */
101/* Interrupt Status Hold */
102/* Interrupt Enable */
103#define PCH_GBE_INT_RX_DMA_CMPLT 0x00000001 /* Receive DMA Transfer Complete */
104#define PCH_GBE_INT_RX_VALID 0x00000002 /* MAC Normal Receive Complete */
105#define PCH_GBE_INT_RX_FRAME_ERR 0x00000004 /* Receive frame error */
106#define PCH_GBE_INT_RX_FIFO_ERR 0x00000008 /* Receive FIFO Overflow */
107#define PCH_GBE_INT_RX_DMA_ERR 0x00000010 /* Receive DMA Transfer Error */
108#define PCH_GBE_INT_RX_DSC_EMP 0x00000020 /* Receive Descriptor Empty */
109#define PCH_GBE_INT_TX_CMPLT 0x00000100 /* MAC Transmission Complete */
110#define PCH_GBE_INT_TX_DMA_CMPLT 0x00000200 /* DMA Transfer Complete */
111#define PCH_GBE_INT_TX_FIFO_ERR 0x00000400 /* Transmission FIFO underflow. */
112#define PCH_GBE_INT_TX_DMA_ERR 0x00000800 /* Transmission DMA Error */
113#define PCH_GBE_INT_PAUSE_CMPLT 0x00001000 /* Pause Transmission complete */
114#define PCH_GBE_INT_MIIM_CMPLT 0x00010000 /* MIIM I/F Read completion */
115#define PCH_GBE_INT_PHY_INT 0x00100000 /* Interruption from PHY */
116#define PCH_GBE_INT_WOL_DET 0x01000000 /* Wake On LAN Event detection. */
117#define PCH_GBE_INT_TCPIP_ERR 0x10000000 /* TCP/IP Accelerator Error */
118
119/* Mode */
120#define PCH_GBE_MODE_MII_ETHER 0x00000000 /* GIGA Ethernet Mode [MII] */
121#define PCH_GBE_MODE_GMII_ETHER 0x80000000 /* GIGA Ethernet Mode [GMII] */
122#define PCH_GBE_MODE_HALF_DUPLEX 0x00000000 /* Duplex Mode [half duplex] */
123#define PCH_GBE_MODE_FULL_DUPLEX 0x40000000 /* Duplex Mode [full duplex] */
124#define PCH_GBE_MODE_FR_BST 0x04000000 /* Frame bursting is done */
125
126/* Reset */
127#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
128#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */
129#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */
130
131/* TCP/IP Accelerator Control */
132#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
133#define PCH_GBE_RX_TCPIPACC_OFF 0x00000004 /* RX TCP/IP ACC Disabled */
134#define PCH_GBE_TX_TCPIPACC_EN 0x00000002 /* TX TCP/IP ACC Enable */
135#define PCH_GBE_RX_TCPIPACC_EN 0x00000001 /* RX TCP/IP ACC Enable */
136
137/* MAC RX Enable */
138#define PCH_GBE_MRE_MAC_RX_EN 0x00000001 /* MAC Receive Enable */
139
140/* RX Flow Control */
141#define PCH_GBE_FL_CTRL_EN 0x80000000 /* Pause packet is enabled */
142
143/* Pause Packet Request */
144#define PCH_GBE_PS_PKT_RQ 0x80000000 /* Pause packet Request */
145
146/* RX Mode */
147#define PCH_GBE_ADD_FIL_EN 0x80000000 /* Address Filtering Enable */
148/* Multicast Filtering Enable */
149#define PCH_GBE_MLT_FIL_EN 0x40000000
150/* Receive Almost Empty Threshold */
151#define PCH_GBE_RH_ALM_EMP_4 0x00000000 /* 4 words */
152#define PCH_GBE_RH_ALM_EMP_8 0x00004000 /* 8 words */
153#define PCH_GBE_RH_ALM_EMP_16 0x00008000 /* 16 words */
154#define PCH_GBE_RH_ALM_EMP_32 0x0000C000 /* 32 words */
155/* Receive Almost Full Threshold */
156#define PCH_GBE_RH_ALM_FULL_4 0x00000000 /* 4 words */
157#define PCH_GBE_RH_ALM_FULL_8 0x00001000 /* 8 words */
158#define PCH_GBE_RH_ALM_FULL_16 0x00002000 /* 16 words */
159#define PCH_GBE_RH_ALM_FULL_32 0x00003000 /* 32 words */
160/* RX FIFO Read Triger Threshold */
161#define PCH_GBE_RH_RD_TRG_4 0x00000000 /* 4 words */
162#define PCH_GBE_RH_RD_TRG_8 0x00000200 /* 8 words */
163#define PCH_GBE_RH_RD_TRG_16 0x00000400 /* 16 words */
164#define PCH_GBE_RH_RD_TRG_32 0x00000600 /* 32 words */
165#define PCH_GBE_RH_RD_TRG_64 0x00000800 /* 64 words */
166#define PCH_GBE_RH_RD_TRG_128 0x00000A00 /* 128 words */
167#define PCH_GBE_RH_RD_TRG_256 0x00000C00 /* 256 words */
168#define PCH_GBE_RH_RD_TRG_512 0x00000E00 /* 512 words */
169
170/* Receive Descriptor bit definitions */
171#define PCH_GBE_RXD_ACC_STAT_BCAST 0x00000400
172#define PCH_GBE_RXD_ACC_STAT_MCAST 0x00000200
173#define PCH_GBE_RXD_ACC_STAT_UCAST 0x00000100
174#define PCH_GBE_RXD_ACC_STAT_TCPIPOK 0x000000C0
175#define PCH_GBE_RXD_ACC_STAT_IPOK 0x00000080
176#define PCH_GBE_RXD_ACC_STAT_TCPOK 0x00000040
177#define PCH_GBE_RXD_ACC_STAT_IP6ERR 0x00000020
178#define PCH_GBE_RXD_ACC_STAT_OFLIST 0x00000010
179#define PCH_GBE_RXD_ACC_STAT_TYPEIP 0x00000008
180#define PCH_GBE_RXD_ACC_STAT_MACL 0x00000004
181#define PCH_GBE_RXD_ACC_STAT_PPPOE 0x00000002
182#define PCH_GBE_RXD_ACC_STAT_VTAGT 0x00000001
183#define PCH_GBE_RXD_GMAC_STAT_PAUSE 0x0200
184#define PCH_GBE_RXD_GMAC_STAT_MARBR 0x0100
185#define PCH_GBE_RXD_GMAC_STAT_MARMLT 0x0080
186#define PCH_GBE_RXD_GMAC_STAT_MARIND 0x0040
187#define PCH_GBE_RXD_GMAC_STAT_MARNOTMT 0x0020
188#define PCH_GBE_RXD_GMAC_STAT_TLONG 0x0010
189#define PCH_GBE_RXD_GMAC_STAT_TSHRT 0x0008
190#define PCH_GBE_RXD_GMAC_STAT_NOTOCTAL 0x0004
191#define PCH_GBE_RXD_GMAC_STAT_NBLERR 0x0002
192#define PCH_GBE_RXD_GMAC_STAT_CRCERR 0x0001
193
194/* Transmit Descriptor bit definitions */
195#define PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF 0x0008
196#define PCH_GBE_TXD_CTRL_ITAG 0x0004
197#define PCH_GBE_TXD_CTRL_ICRC 0x0002
198#define PCH_GBE_TXD_CTRL_APAD 0x0001
199#define PCH_GBE_TXD_WORDS_SHIFT 2
200#define PCH_GBE_TXD_GMAC_STAT_CMPLT 0x2000
201#define PCH_GBE_TXD_GMAC_STAT_ABT 0x1000
202#define PCH_GBE_TXD_GMAC_STAT_EXCOL 0x0800
203#define PCH_GBE_TXD_GMAC_STAT_SNGCOL 0x0400
204#define PCH_GBE_TXD_GMAC_STAT_MLTCOL 0x0200
205#define PCH_GBE_TXD_GMAC_STAT_CRSER 0x0100
206#define PCH_GBE_TXD_GMAC_STAT_TLNG 0x0080
207#define PCH_GBE_TXD_GMAC_STAT_TSHRT 0x0040
208#define PCH_GBE_TXD_GMAC_STAT_LTCOL 0x0020
209#define PCH_GBE_TXD_GMAC_STAT_TFUNDFLW 0x0010
210#define PCH_GBE_TXD_GMAC_STAT_RTYCNT_MASK 0x000F
211
212/* TX Mode */
213#define PCH_GBE_TM_NO_RTRY 0x80000000 /* No Retransmission */
214#define PCH_GBE_TM_LONG_PKT 0x40000000 /* Long Packt TX Enable */
215#define PCH_GBE_TM_ST_AND_FD 0x20000000 /* Stare and Forward */
216#define PCH_GBE_TM_SHORT_PKT 0x10000000 /* Short Packet TX Enable */
217#define PCH_GBE_TM_LTCOL_RETX 0x08000000 /* Retransmission at Late Collision */
218/* Frame Start Threshold */
219#define PCH_GBE_TM_TH_TX_STRT_4 0x00000000 /* 4 words */
220#define PCH_GBE_TM_TH_TX_STRT_8 0x00004000 /* 8 words */
221#define PCH_GBE_TM_TH_TX_STRT_16 0x00008000 /* 16 words */
222#define PCH_GBE_TM_TH_TX_STRT_32 0x0000C000 /* 32 words */
223/* Transmit Almost Empty Threshold */
224#define PCH_GBE_TM_TH_ALM_EMP_4 0x00000000 /* 4 words */
225#define PCH_GBE_TM_TH_ALM_EMP_8 0x00000800 /* 8 words */
226#define PCH_GBE_TM_TH_ALM_EMP_16 0x00001000 /* 16 words */
227#define PCH_GBE_TM_TH_ALM_EMP_32 0x00001800 /* 32 words */
228#define PCH_GBE_TM_TH_ALM_EMP_64 0x00002000 /* 64 words */
229#define PCH_GBE_TM_TH_ALM_EMP_128 0x00002800 /* 128 words */
230#define PCH_GBE_TM_TH_ALM_EMP_256 0x00003000 /* 256 words */
231#define PCH_GBE_TM_TH_ALM_EMP_512 0x00003800 /* 512 words */
232/* Transmit Almost Full Threshold */
233#define PCH_GBE_TM_TH_ALM_FULL_4 0x00000000 /* 4 words */
234#define PCH_GBE_TM_TH_ALM_FULL_8 0x00000200 /* 8 words */
235#define PCH_GBE_TM_TH_ALM_FULL_16 0x00000400 /* 16 words */
236#define PCH_GBE_TM_TH_ALM_FULL_32 0x00000600 /* 32 words */
237
238/* RX FIFO Status */
239#define PCH_GBE_RF_ALM_FULL 0x80000000 /* RX FIFO is almost full. */
240#define PCH_GBE_RF_ALM_EMP 0x40000000 /* RX FIFO is almost empty. */
241#define PCH_GBE_RF_RD_TRG 0x20000000 /* Become more than RH_RD_TRG. */
242#define PCH_GBE_RF_STRWD 0x1FFE0000 /* The word count of RX FIFO. */
243#define PCH_GBE_RF_RCVING 0x00010000 /* Stored in RX FIFO. */
244
245/* MAC Address Mask */
246#define PCH_GBE_BUSY 0x80000000
247
248/* MIIM */
249#define PCH_GBE_MIIM_OPER_WRITE 0x04000000
250#define PCH_GBE_MIIM_OPER_READ 0x00000000
251#define PCH_GBE_MIIM_OPER_READY 0x04000000
252#define PCH_GBE_MIIM_PHY_ADDR_SHIFT 21
253#define PCH_GBE_MIIM_REG_ADDR_SHIFT 16
254
255/* RGMII Status */
256#define PCH_GBE_LINK_UP 0x80000008
257#define PCH_GBE_RXC_SPEED_MSK 0x00000006
258#define PCH_GBE_RXC_SPEED_2_5M 0x00000000 /* 2.5MHz */
259#define PCH_GBE_RXC_SPEED_25M 0x00000002 /* 25MHz */
260#define PCH_GBE_RXC_SPEED_125M 0x00000004 /* 100MHz */
261#define PCH_GBE_DUPLEX_FULL 0x00000001
262
263/* RGMII Control */
264#define PCH_GBE_CRS_SEL 0x00000010
265#define PCH_GBE_RGMII_RATE_125M 0x00000000
266#define PCH_GBE_RGMII_RATE_25M 0x00000008
267#define PCH_GBE_RGMII_RATE_2_5M 0x0000000C
268#define PCH_GBE_RGMII_MODE_GMII 0x00000000
269#define PCH_GBE_RGMII_MODE_RGMII 0x00000002
270#define PCH_GBE_CHIP_TYPE_EXTERNAL 0x00000000
271#define PCH_GBE_CHIP_TYPE_INTERNAL 0x00000001
272
273/* DMA Control */
274#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
275#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
276
277/* Wake On LAN Status */
278#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
279#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
280
281/* The Frame registered in Address Recognizer */
282#define PCH_GBE_WLS_IND 0x00000002
283#define PCH_GBE_WLS_MP 0x00000001 /* Magic packet Address */
284
285/* Wake On LAN Control */
286#define PCH_GBE_WLC_WOL_MODE 0x00010000
287#define PCH_GBE_WLC_IGN_TLONG 0x00000100
288#define PCH_GBE_WLC_IGN_TSHRT 0x00000080
289#define PCH_GBE_WLC_IGN_OCTER 0x00000040
290#define PCH_GBE_WLC_IGN_NBLER 0x00000020
291#define PCH_GBE_WLC_IGN_CRCER 0x00000010
292#define PCH_GBE_WLC_BR 0x00000008
293#define PCH_GBE_WLC_MLT 0x00000004
294#define PCH_GBE_WLC_IND 0x00000002
295#define PCH_GBE_WLC_MP 0x00000001
296
297/* Wake On LAN Address Mask */
298#define PCH_GBE_WLA_BUSY 0x80000000
299
300
301
302/* TX/RX descriptor defines */
303#define PCH_GBE_MAX_TXD 4096
304#define PCH_GBE_DEFAULT_TXD 256
305#define PCH_GBE_MIN_TXD 8
306#define PCH_GBE_MAX_RXD 4096
307#define PCH_GBE_DEFAULT_RXD 256
308#define PCH_GBE_MIN_RXD 8
309
310/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
311#define PCH_GBE_TX_DESC_MULTIPLE 8
312#define PCH_GBE_RX_DESC_MULTIPLE 8
313
314/* Read/Write operation is done through MII Management IF */
315#define PCH_GBE_HAL_MIIM_READ ((u32)0x00000000)
316#define PCH_GBE_HAL_MIIM_WRITE ((u32)0x04000000)
317
318/* flow control values */
319#define PCH_GBE_FC_NONE 0
320#define PCH_GBE_FC_RX_PAUSE 1
321#define PCH_GBE_FC_TX_PAUSE 2
322#define PCH_GBE_FC_FULL 3
323#define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL
324
325
326struct pch_gbe_hw;
327/**
328 * struct pch_gbe_functions - HAL APi function pointer
329 * @get_bus_info: for pch_gbe_hal_get_bus_info
330 * @init_hw: for pch_gbe_hal_init_hw
331 * @read_phy_reg: for pch_gbe_hal_read_phy_reg
332 * @write_phy_reg: for pch_gbe_hal_write_phy_reg
333 * @reset_phy: for pch_gbe_hal_phy_hw_reset
334 * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset
335 * @power_up_phy: for pch_gbe_hal_power_up_phy
336 * @power_down_phy: for pch_gbe_hal_power_down_phy
337 * @read_mac_addr: for pch_gbe_hal_read_mac_addr
338 */
339struct pch_gbe_functions {
340 void (*get_bus_info) (struct pch_gbe_hw *);
341 s32 (*init_hw) (struct pch_gbe_hw *);
342 s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *);
343 s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16);
344 void (*reset_phy) (struct pch_gbe_hw *);
345 void (*sw_reset_phy) (struct pch_gbe_hw *);
346 void (*power_up_phy) (struct pch_gbe_hw *hw);
347 void (*power_down_phy) (struct pch_gbe_hw *hw);
348 s32 (*read_mac_addr) (struct pch_gbe_hw *);
349};
350
351/**
352 * struct pch_gbe_mac_info - MAC infomation
353 * @addr[6]: Store the MAC address
354 * @fc: Mode of flow control
355 * @fc_autoneg: Auto negotiation enable for flow control setting
356 * @tx_fc_enable: Enable flag of Transmit flow control
357 * @max_frame_size: Max transmit frame size
358 * @min_frame_size: Min transmit frame size
359 * @autoneg: Auto negotiation enable
360 * @link_speed: Link speed
361 * @link_duplex: Link duplex
362 */
363struct pch_gbe_mac_info {
364 u8 addr[6];
365 u8 fc;
366 u8 fc_autoneg;
367 u8 tx_fc_enable;
368 u32 max_frame_size;
369 u32 min_frame_size;
370 u8 autoneg;
371 u16 link_speed;
372 u16 link_duplex;
373};
374
375/**
376 * struct pch_gbe_phy_info - PHY infomation
377 * @addr: PHY address
378 * @id: PHY's identifier
379 * @revision: PHY's revision
380 * @reset_delay_us: HW reset delay time[us]
381 * @autoneg_advertised: Autoneg advertised
382 */
383struct pch_gbe_phy_info {
384 u32 addr;
385 u32 id;
386 u32 revision;
387 u32 reset_delay_us;
388 u16 autoneg_advertised;
389};
390
391/*!
392 * @ingroup Gigabit Ether driver Layer
393 * @struct pch_gbe_bus_info
394 * @brief Bus infomation
395 */
396struct pch_gbe_bus_info {
397 u8 type;
398 u8 speed;
399 u8 width;
400};
401
402/*!
403 * @ingroup Gigabit Ether driver Layer
404 * @struct pch_gbe_hw
405 * @brief Hardware infomation
406 */
407struct pch_gbe_hw {
408 void *back;
409
410 struct pch_gbe_regs __iomem *reg;
411 spinlock_t miim_lock;
412
413 const struct pch_gbe_functions *func;
414 struct pch_gbe_mac_info mac;
415 struct pch_gbe_phy_info phy;
416 struct pch_gbe_bus_info bus;
417};
418
419/**
420 * struct pch_gbe_rx_desc - Receive Descriptor
421 * @buffer_addr: RX Frame Buffer Address
422 * @tcp_ip_status: TCP/IP Accelerator Status
423 * @rx_words_eob: RX word count and Byte position
424 * @gbec_status: GMAC Status
425 * @dma_status: DMA Status
426 * @reserved1: Reserved
427 * @reserved2: Reserved
428 */
429struct pch_gbe_rx_desc {
430 u32 buffer_addr;
431 u32 tcp_ip_status;
432 u16 rx_words_eob;
433 u16 gbec_status;
434 u8 dma_status;
435 u8 reserved1;
436 u16 reserved2;
437};
438
439/**
440 * struct pch_gbe_tx_desc - Transmit Descriptor
441 * @buffer_addr: TX Frame Buffer Address
442 * @length: Data buffer length
443 * @reserved1: Reserved
444 * @tx_words_eob: TX word count and Byte position
445 * @tx_frame_ctrl: TX Frame Control
446 * @dma_status: DMA Status
447 * @reserved2: Reserved
448 * @gbec_status: GMAC Status
449 */
450struct pch_gbe_tx_desc {
451 u32 buffer_addr;
452 u16 length;
453 u16 reserved1;
454 u16 tx_words_eob;
455 u16 tx_frame_ctrl;
456 u8 dma_status;
457 u8 reserved2;
458 u16 gbec_status;
459};
460
461
462/**
463 * struct pch_gbe_buffer - Buffer infomation
464 * @skb: pointer to a socket buffer
465 * @dma: DMA address
466 * @time_stamp: time stamp
467 * @length: data size
468 */
469struct pch_gbe_buffer {
470 struct sk_buff *skb;
471 dma_addr_t dma;
472 unsigned long time_stamp;
473 u16 length;
474 bool mapped;
475};
476
477/**
478 * struct pch_gbe_tx_ring - tx ring infomation
479 * @tx_lock: spinlock structs
480 * @desc: pointer to the descriptor ring memory
481 * @dma: physical address of the descriptor ring
482 * @size: length of descriptor ring in bytes
483 * @count: number of descriptors in the ring
484 * @next_to_use: next descriptor to associate a buffer with
485 * @next_to_clean: next descriptor to check for DD status bit
486 * @buffer_info: array of buffer information structs
487 */
488struct pch_gbe_tx_ring {
489 spinlock_t tx_lock;
490 struct pch_gbe_tx_desc *desc;
491 dma_addr_t dma;
492 unsigned int size;
493 unsigned int count;
494 unsigned int next_to_use;
495 unsigned int next_to_clean;
496 struct pch_gbe_buffer *buffer_info;
497};
498
499/**
500 * struct pch_gbe_rx_ring - rx ring infomation
501 * @desc: pointer to the descriptor ring memory
502 * @dma: physical address of the descriptor ring
503 * @size: length of descriptor ring in bytes
504 * @count: number of descriptors in the ring
505 * @next_to_use: next descriptor to associate a buffer with
506 * @next_to_clean: next descriptor to check for DD status bit
507 * @buffer_info: array of buffer information structs
508 */
509struct pch_gbe_rx_ring {
510 struct pch_gbe_rx_desc *desc;
511 dma_addr_t dma;
512 unsigned int size;
513 unsigned int count;
514 unsigned int next_to_use;
515 unsigned int next_to_clean;
516 struct pch_gbe_buffer *buffer_info;
517};
518
519/**
520 * struct pch_gbe_hw_stats - Statistics counters collected by the MAC
521 * @rx_packets: total packets received
522 * @tx_packets: total packets transmitted
523 * @rx_bytes: total bytes received
524 * @tx_bytes: total bytes transmitted
525 * @rx_errors: bad packets received
526 * @tx_errors: packet transmit problems
527 * @rx_dropped: no space in Linux buffers
528 * @tx_dropped: no space available in Linux
529 * @multicast: multicast packets received
530 * @collisions: collisions
531 * @rx_crc_errors: received packet with crc error
532 * @rx_frame_errors: received frame alignment error
533 * @rx_alloc_buff_failed: allocate failure of a receive buffer
534 * @tx_length_errors: transmit length error
535 * @tx_aborted_errors: transmit aborted error
536 * @tx_carrier_errors: transmit carrier error
537 * @tx_timeout_count: Number of transmit timeout
538 * @tx_restart_count: Number of transmit restert
539 * @intr_rx_dsc_empty_count: Interrupt count of receive descriptor empty
540 * @intr_rx_frame_err_count: Interrupt count of receive frame error
541 * @intr_rx_fifo_err_count: Interrupt count of receive FIFO error
542 * @intr_rx_dma_err_count: Interrupt count of receive DMA error
543 * @intr_tx_fifo_err_count: Interrupt count of transmit FIFO error
544 * @intr_tx_dma_err_count: Interrupt count of transmit DMA error
545 * @intr_tcpip_err_count: Interrupt count of TCP/IP Accelerator
546 */
547struct pch_gbe_hw_stats {
548 u32 rx_packets;
549 u32 tx_packets;
550 u32 rx_bytes;
551 u32 tx_bytes;
552 u32 rx_errors;
553 u32 tx_errors;
554 u32 rx_dropped;
555 u32 tx_dropped;
556 u32 multicast;
557 u32 collisions;
558 u32 rx_crc_errors;
559 u32 rx_frame_errors;
560 u32 rx_alloc_buff_failed;
561 u32 tx_length_errors;
562 u32 tx_aborted_errors;
563 u32 tx_carrier_errors;
564 u32 tx_timeout_count;
565 u32 tx_restart_count;
566 u32 intr_rx_dsc_empty_count;
567 u32 intr_rx_frame_err_count;
568 u32 intr_rx_fifo_err_count;
569 u32 intr_rx_dma_err_count;
570 u32 intr_tx_fifo_err_count;
571 u32 intr_tx_dma_err_count;
572 u32 intr_tcpip_err_count;
573};
574
575/**
576 * struct pch_gbe_adapter - board specific private data structure
577 * @stats_lock: Spinlock structure for status
578 * @tx_queue_lock: Spinlock structure for transmit
579 * @ethtool_lock: Spinlock structure for ethtool
580 * @irq_sem: Semaphore for interrupt
581 * @netdev: Pointer of network device structure
582 * @pdev: Pointer of pci device structure
583 * @polling_netdev: Pointer of polling network device structure
584 * @napi: NAPI structure
585 * @hw: Pointer of hardware structure
586 * @stats: Hardware status
587 * @reset_task: Reset task
588 * @mii: MII information structure
589 * @watchdog_timer: Watchdog timer list
590 * @wake_up_evt: Wake up event
591 * @config_space: Configuration space
592 * @msg_enable: Driver message level
593 * @led_status: LED status
594 * @tx_ring: Pointer of Tx descriptor ring structure
595 * @rx_ring: Pointer of Rx descriptor ring structure
596 * @rx_buffer_len: Receive buffer length
597 * @tx_queue_len: Transmit queue length
598 * @rx_csum: Receive TCP/IP checksum enable/disable
599 * @tx_csum: Transmit TCP/IP checksum enable/disable
600 * @have_msi: PCI MSI mode flag
601 */
602
603struct pch_gbe_adapter {
604 spinlock_t stats_lock;
605 spinlock_t tx_queue_lock;
606 spinlock_t ethtool_lock;
607 atomic_t irq_sem;
608 struct net_device *netdev;
609 struct pci_dev *pdev;
610 struct net_device *polling_netdev;
611 struct napi_struct napi;
612 struct pch_gbe_hw hw;
613 struct pch_gbe_hw_stats stats;
614 struct work_struct reset_task;
615 struct mii_if_info mii;
616 struct timer_list watchdog_timer;
617 u32 wake_up_evt;
618 u32 *config_space;
619 unsigned long led_status;
620 struct pch_gbe_tx_ring *tx_ring;
621 struct pch_gbe_rx_ring *rx_ring;
622 unsigned long rx_buffer_len;
623 unsigned long tx_queue_len;
624 bool rx_csum;
625 bool tx_csum;
626 bool have_msi;
627};
628
629extern const char pch_driver_version[];
630
631/* pch_gbe_main.c */
632extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
633extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
634extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
635extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
636extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
637 struct pch_gbe_tx_ring *txdr);
638extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
639 struct pch_gbe_rx_ring *rxdr);
640extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
641 struct pch_gbe_tx_ring *tx_ring);
642extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
643 struct pch_gbe_rx_ring *rx_ring);
644extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
645extern int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
646extern void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
647 int data);
648/* pch_gbe_param.c */
649extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
650
651/* pch_gbe_ethtool.c */
652extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
653
654/* pch_gbe_mac.c */
655extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
656extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
657extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
658 u32 addr, u32 dir, u32 reg, u16 data);
659#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/pch_gbe/pch_gbe_api.c b/drivers/net/pch_gbe/pch_gbe_api.c
new file mode 100644
index 000000000000..db53d2a943e0
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_api.c
@@ -0,0 +1,245 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20#include "pch_gbe.h"
21#include "pch_gbe_phy.h"
22
23/* bus type values */
24#define pch_gbe_bus_type_unknown 0
25#define pch_gbe_bus_type_pci 1
26#define pch_gbe_bus_type_pcix 2
27#define pch_gbe_bus_type_pci_express 3
28#define pch_gbe_bus_type_reserved 4
29
30/* bus speed values */
31#define pch_gbe_bus_speed_unknown 0
32#define pch_gbe_bus_speed_33 1
33#define pch_gbe_bus_speed_66 2
34#define pch_gbe_bus_speed_100 3
35#define pch_gbe_bus_speed_120 4
36#define pch_gbe_bus_speed_133 5
37#define pch_gbe_bus_speed_2500 6
38#define pch_gbe_bus_speed_reserved 7
39
40/* bus width values */
41#define pch_gbe_bus_width_unknown 0
42#define pch_gbe_bus_width_pcie_x1 1
43#define pch_gbe_bus_width_pcie_x2 2
44#define pch_gbe_bus_width_pcie_x4 4
45#define pch_gbe_bus_width_32 5
46#define pch_gbe_bus_width_64 6
47#define pch_gbe_bus_width_reserved 7
48
49/**
50 * pch_gbe_plat_get_bus_info - Obtain bus information for adapter
51 * @hw: Pointer to the HW structure
52 */
53static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
54{
55 hw->bus.type = pch_gbe_bus_type_pci_express;
56 hw->bus.speed = pch_gbe_bus_speed_2500;
57 hw->bus.width = pch_gbe_bus_width_pcie_x1;
58}
59
60/**
61 * pch_gbe_plat_init_hw - Initialize hardware
62 * @hw: Pointer to the HW structure
63 * Returns
64 * 0: Successfully
65 * Negative value: Failed-EBUSY
66 */
67static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw)
68{
69 s32 ret_val;
70
71 ret_val = pch_gbe_phy_get_id(hw);
72 if (ret_val) {
73 pr_err("pch_gbe_phy_get_id error\n");
74 return ret_val;
75 }
76 pch_gbe_phy_init_setting(hw);
77 /* Setup Mac interface option RGMII */
78#ifdef PCH_GBE_MAC_IFOP_RGMII
79 pch_gbe_phy_set_rgmii(hw);
80#endif
81 return ret_val;
82}
83
84static const struct pch_gbe_functions pch_gbe_ops = {
85 .get_bus_info = pch_gbe_plat_get_bus_info,
86 .init_hw = pch_gbe_plat_init_hw,
87 .read_phy_reg = pch_gbe_phy_read_reg_miic,
88 .write_phy_reg = pch_gbe_phy_write_reg_miic,
89 .reset_phy = pch_gbe_phy_hw_reset,
90 .sw_reset_phy = pch_gbe_phy_sw_reset,
91 .power_up_phy = pch_gbe_phy_power_up,
92 .power_down_phy = pch_gbe_phy_power_down,
93 .read_mac_addr = pch_gbe_mac_read_mac_addr
94};
95
96/**
97 * pch_gbe_plat_init_function_pointers - Init func ptrs
98 * @hw: Pointer to the HW structure
99 */
100void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
101{
102 /* Set PHY parameter */
103 hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
104 /* Set function pointers */
105 hw->func = &pch_gbe_ops;
106}
107
108/**
109 * pch_gbe_hal_setup_init_funcs - Initializes function pointers
110 * @hw: Pointer to the HW structure
111 * Returns
112 * 0: Successfully
113 * ENOSYS: Function is not registered
114 */
115inline s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
116{
117 if (!hw->reg) {
118 pr_err("ERROR: Registers not mapped\n");
119 return -ENOSYS;
120 }
121 pch_gbe_plat_init_function_pointers(hw);
122 return 0;
123}
124
125/**
126 * pch_gbe_hal_get_bus_info - Obtain bus information for adapter
127 * @hw: Pointer to the HW structure
128 */
129inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
130{
131 if (!hw->func->get_bus_info)
132 pr_err("ERROR: configuration\n");
133 else
134 hw->func->get_bus_info(hw);
135}
136
137/**
138 * pch_gbe_hal_init_hw - Initialize hardware
139 * @hw: Pointer to the HW structure
140 * Returns
141 * 0: Successfully
142 * ENOSYS: Function is not registered
143 */
144inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
145{
146 if (!hw->func->init_hw) {
147 pr_err("ERROR: configuration\n");
148 return -ENOSYS;
149 }
150 return hw->func->init_hw(hw);
151}
152
153/**
154 * pch_gbe_hal_read_phy_reg - Reads PHY register
155 * @hw: Pointer to the HW structure
156 * @offset: The register to read
157 * @data: The buffer to store the 16-bit read.
158 * Returns
159 * 0: Successfully
160 * Negative value: Failed
161 */
162inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
163 u16 *data)
164{
165 if (!hw->func->read_phy_reg)
166 return 0;
167 return hw->func->read_phy_reg(hw, offset, data);
168}
169
170/**
171 * pch_gbe_hal_write_phy_reg - Writes PHY register
172 * @hw: Pointer to the HW structure
173 * @offset: The register to read
174 * @data: The value to write.
175 * Returns
176 * 0: Successfully
177 * Negative value: Failed
178 */
179inline s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset,
180 u16 data)
181{
182 if (!hw->func->write_phy_reg)
183 return 0;
184 return hw->func->write_phy_reg(hw, offset, data);
185}
186
187/**
188 * pch_gbe_hal_phy_hw_reset - Hard PHY reset
189 * @hw: Pointer to the HW structure
190 */
191inline void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw)
192{
193 if (!hw->func->reset_phy)
194 pr_err("ERROR: configuration\n");
195 else
196 hw->func->reset_phy(hw);
197}
198
199/**
200 * pch_gbe_hal_phy_sw_reset - Soft PHY reset
201 * @hw: Pointer to the HW structure
202 */
203inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
204{
205 if (!hw->func->sw_reset_phy)
206 pr_err("ERROR: configuration\n");
207 else
208 hw->func->sw_reset_phy(hw);
209}
210
211/**
212 * pch_gbe_hal_read_mac_addr - Reads MAC address
213 * @hw: Pointer to the HW structure
214 * Returns
215 * 0: Successfully
216 * ENOSYS: Function is not registered
217 */
218inline s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
219{
220 if (!hw->func->read_mac_addr) {
221 pr_err("ERROR: configuration\n");
222 return -ENOSYS;
223 }
224 return hw->func->read_mac_addr(hw);
225}
226
227/**
228 * pch_gbe_hal_power_up_phy - Power up PHY
229 * @hw: Pointer to the HW structure
230 */
231inline void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw)
232{
233 if (hw->func->power_up_phy)
234 hw->func->power_up_phy(hw);
235}
236
237/**
238 * pch_gbe_hal_power_down_phy - Power down PHY
239 * @hw: Pointer to the HW structure
240 */
241inline void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw)
242{
243 if (hw->func->power_down_phy)
244 hw->func->power_down_phy(hw);
245}
diff --git a/drivers/net/pch_gbe/pch_gbe_api.h b/drivers/net/pch_gbe/pch_gbe_api.h
new file mode 100644
index 000000000000..94aaac5b057b
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_api.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20#ifndef _PCH_GBE_API_H_
21#define _PCH_GBE_API_H_
22
23#include "pch_gbe_phy.h"
24
25s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw);
26void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw);
27s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw);
28s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data);
29s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data);
30void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw);
31void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw);
32s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw);
33void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw);
34void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw);
35
36#endif
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c
new file mode 100644
index 000000000000..e06c6aea4527
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c
@@ -0,0 +1,584 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20#include "pch_gbe.h"
21#include "pch_gbe_api.h"
22
23/**
24 * pch_gbe_stats - Stats item infomation
25 */
26struct pch_gbe_stats {
27 char string[ETH_GSTRING_LEN];
28 size_t size;
29 size_t offset;
30};
31
32#define PCH_GBE_STAT(m) \
33{ \
34 .string = #m, \
35 .size = FIELD_SIZEOF(struct pch_gbe_hw_stats, m), \
36 .offset = offsetof(struct pch_gbe_hw_stats, m), \
37}
38
39/**
40 * pch_gbe_gstrings_stats - ethtool information status name list
41 */
42static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
43 PCH_GBE_STAT(rx_packets),
44 PCH_GBE_STAT(tx_packets),
45 PCH_GBE_STAT(rx_bytes),
46 PCH_GBE_STAT(tx_bytes),
47 PCH_GBE_STAT(rx_errors),
48 PCH_GBE_STAT(tx_errors),
49 PCH_GBE_STAT(rx_dropped),
50 PCH_GBE_STAT(tx_dropped),
51 PCH_GBE_STAT(multicast),
52 PCH_GBE_STAT(collisions),
53 PCH_GBE_STAT(rx_crc_errors),
54 PCH_GBE_STAT(rx_frame_errors),
55 PCH_GBE_STAT(rx_alloc_buff_failed),
56 PCH_GBE_STAT(tx_length_errors),
57 PCH_GBE_STAT(tx_aborted_errors),
58 PCH_GBE_STAT(tx_carrier_errors),
59 PCH_GBE_STAT(tx_timeout_count),
60 PCH_GBE_STAT(tx_restart_count),
61 PCH_GBE_STAT(intr_rx_dsc_empty_count),
62 PCH_GBE_STAT(intr_rx_frame_err_count),
63 PCH_GBE_STAT(intr_rx_fifo_err_count),
64 PCH_GBE_STAT(intr_rx_dma_err_count),
65 PCH_GBE_STAT(intr_tx_fifo_err_count),
66 PCH_GBE_STAT(intr_tx_dma_err_count),
67 PCH_GBE_STAT(intr_tcpip_err_count)
68};
69
70#define PCH_GBE_QUEUE_STATS_LEN 0
71#define PCH_GBE_GLOBAL_STATS_LEN ARRAY_SIZE(pch_gbe_gstrings_stats)
72#define PCH_GBE_STATS_LEN (PCH_GBE_GLOBAL_STATS_LEN + PCH_GBE_QUEUE_STATS_LEN)
73
74#define PCH_GBE_MAC_REGS_LEN (sizeof(struct pch_gbe_regs) / 4)
75#define PCH_GBE_REGS_LEN (PCH_GBE_MAC_REGS_LEN + PCH_GBE_PHY_REGS_LEN)
76/**
77 * pch_gbe_get_settings - Get device-specific settings
78 * @netdev: Network interface device structure
79 * @ecmd: Ethtool command
80 * Returns
81 * 0: Successful.
82 * Negative value: Failed.
83 */
84static int pch_gbe_get_settings(struct net_device *netdev,
85 struct ethtool_cmd *ecmd)
86{
87 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
88 int ret;
89
90 ret = mii_ethtool_gset(&adapter->mii, ecmd);
91 ecmd->supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half);
92 ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
93
94 if (!netif_carrier_ok(adapter->netdev))
95 ecmd->speed = -1;
96 return ret;
97}
98
99/**
100 * pch_gbe_set_settings - Set device-specific settings
101 * @netdev: Network interface device structure
102 * @ecmd: Ethtool command
103 * Returns
104 * 0: Successful.
105 * Negative value: Failed.
106 */
107static int pch_gbe_set_settings(struct net_device *netdev,
108 struct ethtool_cmd *ecmd)
109{
110 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
111 struct pch_gbe_hw *hw = &adapter->hw;
112 int ret;
113
114 pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
115
116 if (ecmd->speed == -1)
117 ecmd->speed = SPEED_1000;
118 ecmd->duplex = DUPLEX_FULL;
119 ret = mii_ethtool_sset(&adapter->mii, ecmd);
120 if (ret) {
121 pr_err("Error: mii_ethtool_sset\n");
122 return ret;
123 }
124 hw->mac.link_speed = ecmd->speed;
125 hw->mac.link_duplex = ecmd->duplex;
126 hw->phy.autoneg_advertised = ecmd->advertising;
127 hw->mac.autoneg = ecmd->autoneg;
128 pch_gbe_hal_phy_sw_reset(hw);
129
130 /* reset the link */
131 if (netif_running(adapter->netdev)) {
132 pch_gbe_down(adapter);
133 ret = pch_gbe_up(adapter);
134 } else {
135 pch_gbe_reset(adapter);
136 }
137 return ret;
138}
139
140/**
141 * pch_gbe_get_regs_len - Report the size of device registers
142 * @netdev: Network interface device structure
143 * Returns: the size of device registers.
144 */
145static int pch_gbe_get_regs_len(struct net_device *netdev)
146{
147 return PCH_GBE_REGS_LEN * (int)sizeof(u32);
148}
149
150/**
151 * pch_gbe_get_drvinfo - Report driver information
152 * @netdev: Network interface device structure
153 * @drvinfo: Driver information structure
154 */
155static void pch_gbe_get_drvinfo(struct net_device *netdev,
156 struct ethtool_drvinfo *drvinfo)
157{
158 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
159
160 strcpy(drvinfo->driver, KBUILD_MODNAME);
161 strcpy(drvinfo->version, pch_driver_version);
162 strcpy(drvinfo->fw_version, "N/A");
163 strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
164 drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
165}
166
167/**
168 * pch_gbe_get_regs - Get device registers
169 * @netdev: Network interface device structure
170 * @regs: Ethtool register structure
171 * @p: Buffer pointer of read device register date
172 */
173static void pch_gbe_get_regs(struct net_device *netdev,
174 struct ethtool_regs *regs, void *p)
175{
176 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
177 struct pch_gbe_hw *hw = &adapter->hw;
178 struct pci_dev *pdev = adapter->pdev;
179 u32 *regs_buff = p;
180 u16 i, tmp;
181
182 regs->version = 0x1000000 | (__u32)pdev->revision << 16 | pdev->device;
183 for (i = 0; i < PCH_GBE_MAC_REGS_LEN; i++)
184 *regs_buff++ = ioread32(&hw->reg->INT_ST + i);
185 /* PHY register */
186 for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) {
187 pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp);
188 *regs_buff++ = tmp;
189 }
190}
191
192/**
193 * pch_gbe_get_wol - Report whether Wake-on-Lan is enabled
194 * @netdev: Network interface device structure
195 * @wol: Wake-on-Lan information
196 */
197static void pch_gbe_get_wol(struct net_device *netdev,
198 struct ethtool_wolinfo *wol)
199{
200 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
201
202 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
203 wol->wolopts = 0;
204
205 if ((adapter->wake_up_evt & PCH_GBE_WLC_IND))
206 wol->wolopts |= WAKE_UCAST;
207 if ((adapter->wake_up_evt & PCH_GBE_WLC_MLT))
208 wol->wolopts |= WAKE_MCAST;
209 if ((adapter->wake_up_evt & PCH_GBE_WLC_BR))
210 wol->wolopts |= WAKE_BCAST;
211 if ((adapter->wake_up_evt & PCH_GBE_WLC_MP))
212 wol->wolopts |= WAKE_MAGIC;
213}
214
215/**
216 * pch_gbe_set_wol - Turn Wake-on-Lan on or off
217 * @netdev: Network interface device structure
218 * @wol: Pointer of wake-on-Lan information straucture
219 * Returns
220 * 0: Successful.
221 * Negative value: Failed.
222 */
223static int pch_gbe_set_wol(struct net_device *netdev,
224 struct ethtool_wolinfo *wol)
225{
226 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
227
228 if ((wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)))
229 return -EOPNOTSUPP;
230 /* these settings will always override what we currently have */
231 adapter->wake_up_evt = 0;
232
233 if ((wol->wolopts & WAKE_UCAST))
234 adapter->wake_up_evt |= PCH_GBE_WLC_IND;
235 if ((wol->wolopts & WAKE_MCAST))
236 adapter->wake_up_evt |= PCH_GBE_WLC_MLT;
237 if ((wol->wolopts & WAKE_BCAST))
238 adapter->wake_up_evt |= PCH_GBE_WLC_BR;
239 if ((wol->wolopts & WAKE_MAGIC))
240 adapter->wake_up_evt |= PCH_GBE_WLC_MP;
241 return 0;
242}
243
244/**
245 * pch_gbe_nway_reset - Restart autonegotiation
246 * @netdev: Network interface device structure
247 * Returns
248 * 0: Successful.
249 * Negative value: Failed.
250 */
251static int pch_gbe_nway_reset(struct net_device *netdev)
252{
253 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
254
255 return mii_nway_restart(&adapter->mii);
256}
257
258/**
259 * pch_gbe_get_ringparam - Report ring sizes
260 * @netdev: Network interface device structure
261 * @ring: Ring param structure
262 */
263static void pch_gbe_get_ringparam(struct net_device *netdev,
264 struct ethtool_ringparam *ring)
265{
266 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
267 struct pch_gbe_tx_ring *txdr = adapter->tx_ring;
268 struct pch_gbe_rx_ring *rxdr = adapter->rx_ring;
269
270 ring->rx_max_pending = PCH_GBE_MAX_RXD;
271 ring->tx_max_pending = PCH_GBE_MAX_TXD;
272 ring->rx_mini_max_pending = 0;
273 ring->rx_jumbo_max_pending = 0;
274 ring->rx_pending = rxdr->count;
275 ring->tx_pending = txdr->count;
276 ring->rx_mini_pending = 0;
277 ring->rx_jumbo_pending = 0;
278}
279
280/**
281 * pch_gbe_set_ringparam - Set ring sizes
282 * @netdev: Network interface device structure
283 * @ring: Ring param structure
284 * Returns
285 * 0: Successful.
286 * Negative value: Failed.
287 */
288static int pch_gbe_set_ringparam(struct net_device *netdev,
289 struct ethtool_ringparam *ring)
290{
291 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
292 struct pch_gbe_tx_ring *txdr, *tx_old;
293 struct pch_gbe_rx_ring *rxdr, *rx_old;
294 int tx_ring_size, rx_ring_size;
295 int err = 0;
296
297 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
298 return -EINVAL;
299 tx_ring_size = (int)sizeof(struct pch_gbe_tx_ring);
300 rx_ring_size = (int)sizeof(struct pch_gbe_rx_ring);
301
302 if ((netif_running(adapter->netdev)))
303 pch_gbe_down(adapter);
304 tx_old = adapter->tx_ring;
305 rx_old = adapter->rx_ring;
306
307 txdr = kzalloc(tx_ring_size, GFP_KERNEL);
308 if (!txdr) {
309 err = -ENOMEM;
310 goto err_alloc_tx;
311 }
312 rxdr = kzalloc(rx_ring_size, GFP_KERNEL);
313 if (!rxdr) {
314 err = -ENOMEM;
315 goto err_alloc_rx;
316 }
317 adapter->tx_ring = txdr;
318 adapter->rx_ring = rxdr;
319
320 rxdr->count =
321 clamp_val(ring->rx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD);
322 rxdr->count = roundup(rxdr->count, PCH_GBE_RX_DESC_MULTIPLE);
323
324 txdr->count =
325 clamp_val(ring->tx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD);
326 txdr->count = roundup(txdr->count, PCH_GBE_TX_DESC_MULTIPLE);
327
328 if ((netif_running(adapter->netdev))) {
329 /* Try to get new resources before deleting old */
330 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
331 if (err)
332 goto err_setup_rx;
333 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
334 if (err)
335 goto err_setup_tx;
336 /* save the new, restore the old in order to free it,
337 * then restore the new back again */
338#ifdef RINGFREE
339 adapter->rx_ring = rx_old;
340 adapter->tx_ring = tx_old;
341 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
342 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
343 kfree(tx_old);
344 kfree(rx_old);
345 adapter->rx_ring = rxdr;
346 adapter->tx_ring = txdr;
347#else
348 pch_gbe_free_rx_resources(adapter, rx_old);
349 pch_gbe_free_tx_resources(adapter, tx_old);
350 kfree(tx_old);
351 kfree(rx_old);
352 adapter->rx_ring = rxdr;
353 adapter->tx_ring = txdr;
354#endif
355 err = pch_gbe_up(adapter);
356 }
357 return err;
358
359err_setup_tx:
360 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
361err_setup_rx:
362 adapter->rx_ring = rx_old;
363 adapter->tx_ring = tx_old;
364 kfree(rxdr);
365err_alloc_rx:
366 kfree(txdr);
367err_alloc_tx:
368 if (netif_running(adapter->netdev))
369 pch_gbe_up(adapter);
370 return err;
371}
372
373/**
374 * pch_gbe_get_pauseparam - Report pause parameters
375 * @netdev: Network interface device structure
376 * @pause: Pause parameters structure
377 */
378static void pch_gbe_get_pauseparam(struct net_device *netdev,
379 struct ethtool_pauseparam *pause)
380{
381 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
382 struct pch_gbe_hw *hw = &adapter->hw;
383
384 pause->autoneg =
385 ((hw->mac.fc_autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE);
386
387 if (hw->mac.fc == PCH_GBE_FC_RX_PAUSE) {
388 pause->rx_pause = 1;
389 } else if (hw->mac.fc == PCH_GBE_FC_TX_PAUSE) {
390 pause->tx_pause = 1;
391 } else if (hw->mac.fc == PCH_GBE_FC_FULL) {
392 pause->rx_pause = 1;
393 pause->tx_pause = 1;
394 }
395}
396
397/**
398 * pch_gbe_set_pauseparam - Set pause paramters
399 * @netdev: Network interface device structure
400 * @pause: Pause parameters structure
401 * Returns
402 * 0: Successful.
403 * Negative value: Failed.
404 */
405static int pch_gbe_set_pauseparam(struct net_device *netdev,
406 struct ethtool_pauseparam *pause)
407{
408 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
409 struct pch_gbe_hw *hw = &adapter->hw;
410 int ret = 0;
411
412 hw->mac.fc_autoneg = pause->autoneg;
413 if ((pause->rx_pause) && (pause->tx_pause))
414 hw->mac.fc = PCH_GBE_FC_FULL;
415 else if ((pause->rx_pause) && (!pause->tx_pause))
416 hw->mac.fc = PCH_GBE_FC_RX_PAUSE;
417 else if ((!pause->rx_pause) && (pause->tx_pause))
418 hw->mac.fc = PCH_GBE_FC_TX_PAUSE;
419 else if ((!pause->rx_pause) && (!pause->tx_pause))
420 hw->mac.fc = PCH_GBE_FC_NONE;
421
422 if (hw->mac.fc_autoneg == AUTONEG_ENABLE) {
423 if ((netif_running(adapter->netdev))) {
424 pch_gbe_down(adapter);
425 ret = pch_gbe_up(adapter);
426 } else {
427 pch_gbe_reset(adapter);
428 }
429 } else {
430 ret = pch_gbe_mac_force_mac_fc(hw);
431 }
432 return ret;
433}
434
435/**
436 * pch_gbe_get_rx_csum - Report whether receive checksums are turned on or off
437 * @netdev: Network interface device structure
438 * Returns
439 * true(1): Checksum On
440 * false(0): Checksum Off
441 */
442static u32 pch_gbe_get_rx_csum(struct net_device *netdev)
443{
444 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
445
446 return adapter->rx_csum;
447}
448
449/**
450 * pch_gbe_set_rx_csum - Turn receive checksum on or off
451 * @netdev: Network interface device structure
452 * @data: Checksum On[true] or Off[false]
453 * Returns
454 * 0: Successful.
455 * Negative value: Failed.
456 */
457static int pch_gbe_set_rx_csum(struct net_device *netdev, u32 data)
458{
459 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
460
461 adapter->rx_csum = data;
462 if ((netif_running(netdev)))
463 pch_gbe_reinit_locked(adapter);
464 else
465 pch_gbe_reset(adapter);
466
467 return 0;
468}
469
470/**
471 * pch_gbe_get_tx_csum - Report whether transmit checksums are turned on or off
472 * @netdev: Network interface device structure
473 * Returns
474 * true(1): Checksum On
475 * false(0): Checksum Off
476 */
477static u32 pch_gbe_get_tx_csum(struct net_device *netdev)
478{
479 return (netdev->features & NETIF_F_HW_CSUM) != 0;
480}
481
482/**
483 * pch_gbe_set_tx_csum - Turn transmit checksums on or off
484 * @netdev: Network interface device structure
485 * @data: Checksum on[true] or off[false]
486 * Returns
487 * 0: Successful.
488 * Negative value: Failed.
489 */
490static int pch_gbe_set_tx_csum(struct net_device *netdev, u32 data)
491{
492 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
493
494 adapter->tx_csum = data;
495 if (data)
496 netdev->features |= NETIF_F_HW_CSUM;
497 else
498 netdev->features &= ~NETIF_F_HW_CSUM;
499 return 0;
500}
501
502/**
503 * pch_gbe_get_strings - Return a set of strings that describe the requested
504 * objects
505 * @netdev: Network interface device structure
506 * @stringset: Select the stringset. [ETH_SS_TEST] [ETH_SS_STATS]
507 * @data: Pointer of read string data.
508 */
509static void pch_gbe_get_strings(struct net_device *netdev, u32 stringset,
510 u8 *data)
511{
512 u8 *p = data;
513 int i;
514
515 switch (stringset) {
516 case (u32) ETH_SS_STATS:
517 for (i = 0; i < PCH_GBE_GLOBAL_STATS_LEN; i++) {
518 memcpy(p, pch_gbe_gstrings_stats[i].string,
519 ETH_GSTRING_LEN);
520 p += ETH_GSTRING_LEN;
521 }
522 break;
523 }
524}
525
526/**
527 * pch_gbe_get_ethtool_stats - Return statistics about the device
528 * @netdev: Network interface device structure
529 * @stats: Ethtool statue structure
530 * @data: Pointer of read status area
531 */
532static void pch_gbe_get_ethtool_stats(struct net_device *netdev,
533 struct ethtool_stats *stats, u64 *data)
534{
535 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
536 int i;
537 const struct pch_gbe_stats *gstats = pch_gbe_gstrings_stats;
538 char *hw_stats = (char *)&adapter->stats;
539
540 pch_gbe_update_stats(adapter);
541 for (i = 0; i < PCH_GBE_GLOBAL_STATS_LEN; i++) {
542 char *p = hw_stats + gstats->offset;
543 data[i] = gstats->size == sizeof(u64) ? *(u64 *)p:(*(u32 *)p);
544 gstats++;
545 }
546}
547
548static int pch_gbe_get_sset_count(struct net_device *netdev, int sset)
549{
550 switch (sset) {
551 case ETH_SS_STATS:
552 return PCH_GBE_STATS_LEN;
553 default:
554 return -EOPNOTSUPP;
555 }
556}
557
558static const struct ethtool_ops pch_gbe_ethtool_ops = {
559 .get_settings = pch_gbe_get_settings,
560 .set_settings = pch_gbe_set_settings,
561 .get_drvinfo = pch_gbe_get_drvinfo,
562 .get_regs_len = pch_gbe_get_regs_len,
563 .get_regs = pch_gbe_get_regs,
564 .get_wol = pch_gbe_get_wol,
565 .set_wol = pch_gbe_set_wol,
566 .nway_reset = pch_gbe_nway_reset,
567 .get_link = ethtool_op_get_link,
568 .get_ringparam = pch_gbe_get_ringparam,
569 .set_ringparam = pch_gbe_set_ringparam,
570 .get_pauseparam = pch_gbe_get_pauseparam,
571 .set_pauseparam = pch_gbe_set_pauseparam,
572 .get_rx_csum = pch_gbe_get_rx_csum,
573 .set_rx_csum = pch_gbe_set_rx_csum,
574 .get_tx_csum = pch_gbe_get_tx_csum,
575 .set_tx_csum = pch_gbe_set_tx_csum,
576 .get_strings = pch_gbe_get_strings,
577 .get_ethtool_stats = pch_gbe_get_ethtool_stats,
578 .get_sset_count = pch_gbe_get_sset_count,
579};
580
581void pch_gbe_set_ethtool_ops(struct net_device *netdev)
582{
583 SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops);
584}
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
new file mode 100644
index 000000000000..53c56cf8aca2
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -0,0 +1,2473 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include "pch_gbe.h"
22#include "pch_gbe_api.h"
23
24#define DRV_VERSION "1.00"
25const char pch_driver_version[] = DRV_VERSION;
26
27#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
28#define PCH_GBE_MAR_ENTRIES 16
29#define PCH_GBE_SHORT_PKT 64
30#define DSC_INIT16 0xC000
31#define PCH_GBE_DMA_ALIGN 0
32#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
33#define PCH_GBE_COPYBREAK_DEFAULT 256
34#define PCH_GBE_PCI_BAR 1
35
36#define PCH_GBE_TX_WEIGHT 64
37#define PCH_GBE_RX_WEIGHT 64
38#define PCH_GBE_RX_BUFFER_WRITE 16
39
40/* Initialize the wake-on-LAN settings */
41#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
42
43#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
44 PCH_GBE_CHIP_TYPE_INTERNAL | \
45 PCH_GBE_RGMII_MODE_RGMII | \
46 PCH_GBE_CRS_SEL \
47 )
48
49/* Ethertype field values */
50#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
51#define PCH_GBE_FRAME_SIZE_2048 2048
52#define PCH_GBE_FRAME_SIZE_4096 4096
53#define PCH_GBE_FRAME_SIZE_8192 8192
54
55#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
56#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
57#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
58#define PCH_GBE_DESC_UNUSED(R) \
59 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
60 (R)->next_to_clean - (R)->next_to_use - 1)
61
62/* Pause packet value */
63#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
64#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
65#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
66#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
67
68#define PCH_GBE_ETH_ALEN 6
69
70/* This defines the bits that are set in the Interrupt Mask
71 * Set/Read Register. Each bit is documented below:
72 * o RXT0 = Receiver Timer Interrupt (ring 0)
73 * o TXDW = Transmit Descriptor Written Back
74 * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
75 * o RXSEQ = Receive Sequence Error
76 * o LSC = Link Status Change
77 */
78#define PCH_GBE_INT_ENABLE_MASK ( \
79 PCH_GBE_INT_RX_DMA_CMPLT | \
80 PCH_GBE_INT_RX_DSC_EMP | \
81 PCH_GBE_INT_WOL_DET | \
82 PCH_GBE_INT_TX_CMPLT \
83 )
84
85
86static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
87
88/**
89 * pch_gbe_mac_read_mac_addr - Read MAC address
90 * @hw: Pointer to the HW structure
91 * Returns
92 * 0: Successful.
93 */
94s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
95{
96 u32 adr1a, adr1b;
97
98 adr1a = ioread32(&hw->reg->mac_adr[0].high);
99 adr1b = ioread32(&hw->reg->mac_adr[0].low);
100
101 hw->mac.addr[0] = (u8)(adr1a & 0xFF);
102 hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
103 hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
104 hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
105 hw->mac.addr[4] = (u8)(adr1b & 0xFF);
106 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
107
108 pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
109 return 0;
110}
111
112/**
113 * pch_gbe_wait_clr_bit - Wait to clear a bit
114 * @reg: Pointer of register
115 * @busy: Busy bit
116 */
117void pch_gbe_wait_clr_bit(void *reg, u32 bit)
118{
119 u32 tmp;
120 /* wait busy */
121 tmp = 1000;
122 while ((ioread32(reg) & bit) && --tmp)
123 cpu_relax();
124 if (!tmp)
125 pr_err("Error: busy bit is not cleared\n");
126}
127/**
128 * pch_gbe_mac_mar_set - Set MAC address register
129 * @hw: Pointer to the HW structure
130 * @addr: Pointer to the MAC address
131 * @index: MAC address array register
132 */
133void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
134{
135 u32 mar_low, mar_high, adrmask;
136
137 pr_debug("index : 0x%x\n", index);
138
139 /*
140 * HW expects these in little endian so we reverse the byte order
141 * from network order (big endian) to little endian
142 */
143 mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
144 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
145 mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
146 /* Stop the MAC Address of index. */
147 adrmask = ioread32(&hw->reg->ADDR_MASK);
148 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
149 /* wait busy */
150 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
151 /* Set the MAC address to the MAC address 1A/1B register */
152 iowrite32(mar_high, &hw->reg->mac_adr[index].high);
153 iowrite32(mar_low, &hw->reg->mac_adr[index].low);
154 /* Start the MAC address of index */
155 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
156 /* wait busy */
157 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
158}
159
160/**
161 * pch_gbe_mac_reset_hw - Reset hardware
162 * @hw: Pointer to the HW structure
163 */
164void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
165{
166 /* Read the MAC address. and store to the private data */
167 pch_gbe_mac_read_mac_addr(hw);
168 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
169#ifdef PCH_GBE_MAC_IFOP_RGMII
170 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
171#endif
172 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
173 /* Setup the receive address */
174 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
175 return;
176}
177
178/**
179 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
180 * @hw: Pointer to the HW structure
181 * @mar_count: Receive address registers
182 */
183void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
184{
185 u32 i;
186
187 /* Setup the receive address */
188 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
189
190 /* Zero out the other receive addresses */
191 for (i = 1; i < mar_count; i++) {
192 iowrite32(0, &hw->reg->mac_adr[i].high);
193 iowrite32(0, &hw->reg->mac_adr[i].low);
194 }
195 iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
196 /* wait busy */
197 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
198}
199
200
201/**
202 * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
203 * @hw: Pointer to the HW structure
204 * @mc_addr_list: Array of multicast addresses to program
205 * @mc_addr_count: Number of multicast addresses to program
206 * @mar_used_count: The first MAC Address register free to program
207 * @mar_total_num: Total number of supported MAC Address Registers
208 */
209void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
210 u8 *mc_addr_list, u32 mc_addr_count,
211 u32 mar_used_count, u32 mar_total_num)
212{
213 u32 i, adrmask;
214
215 /* Load the first set of multicast addresses into the exact
216 * filters (RAR). If there are not enough to fill the RAR
217 * array, clear the filters.
218 */
219 for (i = mar_used_count; i < mar_total_num; i++) {
220 if (mc_addr_count) {
221 pch_gbe_mac_mar_set(hw, mc_addr_list, i);
222 mc_addr_count--;
223 mc_addr_list += PCH_GBE_ETH_ALEN;
224 } else {
225 /* Clear MAC address mask */
226 adrmask = ioread32(&hw->reg->ADDR_MASK);
227 iowrite32((adrmask | (0x0001 << i)),
228 &hw->reg->ADDR_MASK);
229 /* wait busy */
230 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
231 /* Clear MAC address */
232 iowrite32(0, &hw->reg->mac_adr[i].high);
233 iowrite32(0, &hw->reg->mac_adr[i].low);
234 }
235 }
236}
237
238/**
239 * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
240 * @hw: Pointer to the HW structure
241 * Returns
242 * 0: Successful.
243 * Negative value: Failed.
244 */
245s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
246{
247 struct pch_gbe_mac_info *mac = &hw->mac;
248 u32 rx_fctrl;
249
250 pr_debug("mac->fc = %u\n", mac->fc);
251
252 rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
253
254 switch (mac->fc) {
255 case PCH_GBE_FC_NONE:
256 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
257 mac->tx_fc_enable = false;
258 break;
259 case PCH_GBE_FC_RX_PAUSE:
260 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
261 mac->tx_fc_enable = false;
262 break;
263 case PCH_GBE_FC_TX_PAUSE:
264 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
265 mac->tx_fc_enable = true;
266 break;
267 case PCH_GBE_FC_FULL:
268 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
269 mac->tx_fc_enable = true;
270 break;
271 default:
272 pr_err("Flow control param set incorrectly\n");
273 return -EINVAL;
274 }
275 if (mac->link_duplex == DUPLEX_HALF)
276 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
277 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
278 pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
279 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
280 return 0;
281}
282
283/**
284 * pch_gbe_mac_set_wol_event - Set wake-on-lan event
285 * @hw: Pointer to the HW structure
286 * @wu_evt: Wake up event
287 */
288void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
289{
290 u32 addr_mask;
291
292 pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
293 wu_evt, ioread32(&hw->reg->ADDR_MASK));
294
295 if (wu_evt) {
296 /* Set Wake-On-Lan address mask */
297 addr_mask = ioread32(&hw->reg->ADDR_MASK);
298 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
299 /* wait busy */
300 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
301 iowrite32(0, &hw->reg->WOL_ST);
302 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
303 iowrite32(0x02, &hw->reg->TCPIP_ACC);
304 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
305 } else {
306 iowrite32(0, &hw->reg->WOL_CTRL);
307 iowrite32(0, &hw->reg->WOL_ST);
308 }
309 return;
310}
311
312/**
313 * pch_gbe_mac_ctrl_miim - Control MIIM interface
314 * @hw: Pointer to the HW structure
315 * @addr: Address of PHY
316 * @dir: Operetion. (Write or Read)
317 * @reg: Access register of PHY
318 * @data: Write data.
319 *
320 * Returns: Read date.
321 */
322u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
323 u16 data)
324{
325 u32 data_out = 0;
326 unsigned int i;
327 unsigned long flags;
328
329 spin_lock_irqsave(&hw->miim_lock, flags);
330
331 for (i = 100; i; --i) {
332 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
333 break;
334 udelay(20);
335 }
336 if (i == 0) {
337 pr_err("pch-gbe.miim won't go Ready\n");
338 spin_unlock_irqrestore(&hw->miim_lock, flags);
339 return 0; /* No way to indicate timeout error */
340 }
341 iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
342 (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
343 dir | data), &hw->reg->MIIM);
344 for (i = 0; i < 100; i++) {
345 udelay(20);
346 data_out = ioread32(&hw->reg->MIIM);
347 if ((data_out & PCH_GBE_MIIM_OPER_READY))
348 break;
349 }
350 spin_unlock_irqrestore(&hw->miim_lock, flags);
351
352 pr_debug("PHY %s: reg=%d, data=0x%04X\n",
353 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
354 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
355 return (u16) data_out;
356}
357
358/**
359 * pch_gbe_mac_set_pause_packet - Set pause packet
360 * @hw: Pointer to the HW structure
361 */
362void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
363{
364 unsigned long tmp2, tmp3;
365
366 /* Set Pause packet */
367 tmp2 = hw->mac.addr[1];
368 tmp2 = (tmp2 << 8) | hw->mac.addr[0];
369 tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
370
371 tmp3 = hw->mac.addr[5];
372 tmp3 = (tmp3 << 8) | hw->mac.addr[4];
373 tmp3 = (tmp3 << 8) | hw->mac.addr[3];
374 tmp3 = (tmp3 << 8) | hw->mac.addr[2];
375
376 iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
377 iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
378 iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
379 iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
380 iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
381
382 /* Transmit Pause Packet */
383 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
384
385 pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
386 ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
387 ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
388 ioread32(&hw->reg->PAUSE_PKT5));
389
390 return;
391}
392
393
394/**
395 * pch_gbe_alloc_queues - Allocate memory for all rings
396 * @adapter: Board private structure to initialize
397 * Returns
398 * 0: Successfully
399 * Negative value: Failed
400 */
401static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
402{
403 int size;
404
405 size = (int)sizeof(struct pch_gbe_tx_ring);
406 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
407 if (!adapter->tx_ring)
408 return -ENOMEM;
409 size = (int)sizeof(struct pch_gbe_rx_ring);
410 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
411 if (!adapter->rx_ring) {
412 kfree(adapter->tx_ring);
413 return -ENOMEM;
414 }
415 return 0;
416}
417
418/**
419 * pch_gbe_init_stats - Initialize status
420 * @adapter: Board private structure to initialize
421 */
422static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
423{
424 memset(&adapter->stats, 0, sizeof(adapter->stats));
425 return;
426}
427
428/**
429 * pch_gbe_init_phy - Initialize PHY
430 * @adapter: Board private structure to initialize
431 * Returns
432 * 0: Successfully
433 * Negative value: Failed
434 */
435static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
436{
437 struct net_device *netdev = adapter->netdev;
438 u32 addr;
439 u16 bmcr, stat;
440
441 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
442 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
443 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
444 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
445 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
446 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
447 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
448 break;
449 }
450 adapter->hw.phy.addr = adapter->mii.phy_id;
451 pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
452 if (addr == 32)
453 return -EAGAIN;
454 /* Selected the phy and isolate the rest */
455 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
456 if (addr != adapter->mii.phy_id) {
457 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
458 BMCR_ISOLATE);
459 } else {
460 bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
461 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
462 bmcr & ~BMCR_ISOLATE);
463 }
464 }
465
466 /* MII setup */
467 adapter->mii.phy_id_mask = 0x1F;
468 adapter->mii.reg_num_mask = 0x1F;
469 adapter->mii.dev = adapter->netdev;
470 adapter->mii.mdio_read = pch_gbe_mdio_read;
471 adapter->mii.mdio_write = pch_gbe_mdio_write;
472 adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
473 return 0;
474}
475
476/**
477 * pch_gbe_mdio_read - The read function for mii
478 * @netdev: Network interface device structure
479 * @addr: Phy ID
480 * @reg: Access location
481 * Returns
482 * 0: Successfully
483 * Negative value: Failed
484 */
485int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
486{
487 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
488 struct pch_gbe_hw *hw = &adapter->hw;
489
490 return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
491 (u16) 0);
492}
493
494/**
495 * pch_gbe_mdio_write - The write function for mii
496 * @netdev: Network interface device structure
497 * @addr: Phy ID (not used)
498 * @reg: Access location
499 * @data: Write data
500 */
501void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, int data)
502{
503 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
504 struct pch_gbe_hw *hw = &adapter->hw;
505
506 pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
507}
508
509/**
510 * pch_gbe_reset_task - Reset processing at the time of transmission timeout
511 * @work: Pointer of board private structure
512 */
513static void pch_gbe_reset_task(struct work_struct *work)
514{
515 struct pch_gbe_adapter *adapter;
516 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
517
518 pch_gbe_reinit_locked(adapter);
519}
520
521/**
522 * pch_gbe_reinit_locked- Re-initialization
523 * @adapter: Board private structure
524 */
525void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
526{
527 struct net_device *netdev = adapter->netdev;
528
529 rtnl_lock();
530 if (netif_running(netdev)) {
531 pch_gbe_down(adapter);
532 pch_gbe_up(adapter);
533 }
534 rtnl_unlock();
535}
536
537/**
538 * pch_gbe_reset - Reset GbE
539 * @adapter: Board private structure
540 */
541void pch_gbe_reset(struct pch_gbe_adapter *adapter)
542{
543 pch_gbe_mac_reset_hw(&adapter->hw);
544 /* Setup the receive address. */
545 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
546 if (pch_gbe_hal_init_hw(&adapter->hw))
547 pr_err("Hardware Error\n");
548}
549
550/**
551 * pch_gbe_free_irq - Free an interrupt
552 * @adapter: Board private structure
553 */
554static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
555{
556 struct net_device *netdev = adapter->netdev;
557
558 free_irq(adapter->pdev->irq, netdev);
559 if (adapter->have_msi) {
560 pci_disable_msi(adapter->pdev);
561 pr_debug("call pci_disable_msi\n");
562 }
563}
564
565/**
566 * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
567 * @adapter: Board private structure
568 */
569static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
570{
571 struct pch_gbe_hw *hw = &adapter->hw;
572
573 atomic_inc(&adapter->irq_sem);
574 iowrite32(0, &hw->reg->INT_EN);
575 ioread32(&hw->reg->INT_ST);
576 synchronize_irq(adapter->pdev->irq);
577
578 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
579}
580
581/**
582 * pch_gbe_irq_enable - Enable default interrupt generation settings
583 * @adapter: Board private structure
584 */
585static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
586{
587 struct pch_gbe_hw *hw = &adapter->hw;
588
589 if (likely(atomic_dec_and_test(&adapter->irq_sem)))
590 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
591 ioread32(&hw->reg->INT_ST);
592 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
593}
594
595
596
597/**
598 * pch_gbe_setup_tctl - configure the Transmit control registers
599 * @adapter: Board private structure
600 */
601static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
602{
603 struct pch_gbe_hw *hw = &adapter->hw;
604 u32 tx_mode, tcpip;
605
606 tx_mode = PCH_GBE_TM_LONG_PKT |
607 PCH_GBE_TM_ST_AND_FD |
608 PCH_GBE_TM_SHORT_PKT |
609 PCH_GBE_TM_TH_TX_STRT_8 |
610 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
611
612 iowrite32(tx_mode, &hw->reg->TX_MODE);
613
614 tcpip = ioread32(&hw->reg->TCPIP_ACC);
615 tcpip |= PCH_GBE_TX_TCPIPACC_EN;
616 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
617 return;
618}
619
620/**
621 * pch_gbe_configure_tx - Configure Transmit Unit after Reset
622 * @adapter: Board private structure
623 */
624static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
625{
626 struct pch_gbe_hw *hw = &adapter->hw;
627 u32 tdba, tdlen, dctrl;
628
629 pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
630 (unsigned long long)adapter->tx_ring->dma,
631 adapter->tx_ring->size);
632
633 /* Setup the HW Tx Head and Tail descriptor pointers */
634 tdba = adapter->tx_ring->dma;
635 tdlen = adapter->tx_ring->size - 0x10;
636 iowrite32(tdba, &hw->reg->TX_DSC_BASE);
637 iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
638 iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
639
640 /* Enables Transmission DMA */
641 dctrl = ioread32(&hw->reg->DMA_CTRL);
642 dctrl |= PCH_GBE_TX_DMA_EN;
643 iowrite32(dctrl, &hw->reg->DMA_CTRL);
644}
645
646/**
647 * pch_gbe_setup_rctl - Configure the receive control registers
648 * @adapter: Board private structure
649 */
650static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
651{
652 struct pch_gbe_hw *hw = &adapter->hw;
653 u32 rx_mode, tcpip;
654
655 rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
656 PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
657
658 iowrite32(rx_mode, &hw->reg->RX_MODE);
659
660 tcpip = ioread32(&hw->reg->TCPIP_ACC);
661
662 if (adapter->rx_csum) {
663 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
664 tcpip |= PCH_GBE_RX_TCPIPACC_EN;
665 } else {
666 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
667 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
668 }
669 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
670 return;
671}
672
673/**
674 * pch_gbe_configure_rx - Configure Receive Unit after Reset
675 * @adapter: Board private structure
676 */
677static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
678{
679 struct pch_gbe_hw *hw = &adapter->hw;
680 u32 rdba, rdlen, rctl, rxdma;
681
682 pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
683 (unsigned long long)adapter->rx_ring->dma,
684 adapter->rx_ring->size);
685
686 pch_gbe_mac_force_mac_fc(hw);
687
688 /* Disables Receive MAC */
689 rctl = ioread32(&hw->reg->MAC_RX_EN);
690 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
691
692 /* Disables Receive DMA */
693 rxdma = ioread32(&hw->reg->DMA_CTRL);
694 rxdma &= ~PCH_GBE_RX_DMA_EN;
695 iowrite32(rxdma, &hw->reg->DMA_CTRL);
696
697 pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
698 ioread32(&hw->reg->MAC_RX_EN),
699 ioread32(&hw->reg->DMA_CTRL));
700
701 /* Setup the HW Rx Head and Tail Descriptor Pointers and
702 * the Base and Length of the Rx Descriptor Ring */
703 rdba = adapter->rx_ring->dma;
704 rdlen = adapter->rx_ring->size - 0x10;
705 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
706 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
707 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
708
709 /* Enables Receive DMA */
710 rxdma = ioread32(&hw->reg->DMA_CTRL);
711 rxdma |= PCH_GBE_RX_DMA_EN;
712 iowrite32(rxdma, &hw->reg->DMA_CTRL);
713 /* Enables Receive */
714 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
715}
716
717/**
718 * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
719 * @adapter: Board private structure
720 * @buffer_info: Buffer information structure
721 */
722static void pch_gbe_unmap_and_free_tx_resource(
723 struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
724{
725 if (buffer_info->mapped) {
726 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
727 buffer_info->length, DMA_TO_DEVICE);
728 buffer_info->mapped = false;
729 }
730 if (buffer_info->skb) {
731 dev_kfree_skb_any(buffer_info->skb);
732 buffer_info->skb = NULL;
733 }
734}
735
736/**
737 * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
738 * @adapter: Board private structure
739 * @buffer_info: Buffer information structure
740 */
741static void pch_gbe_unmap_and_free_rx_resource(
742 struct pch_gbe_adapter *adapter,
743 struct pch_gbe_buffer *buffer_info)
744{
745 if (buffer_info->mapped) {
746 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
747 buffer_info->length, DMA_FROM_DEVICE);
748 buffer_info->mapped = false;
749 }
750 if (buffer_info->skb) {
751 dev_kfree_skb_any(buffer_info->skb);
752 buffer_info->skb = NULL;
753 }
754}
755
756/**
757 * pch_gbe_clean_tx_ring - Free Tx Buffers
758 * @adapter: Board private structure
759 * @tx_ring: Ring to be cleaned
760 */
761static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
762 struct pch_gbe_tx_ring *tx_ring)
763{
764 struct pch_gbe_hw *hw = &adapter->hw;
765 struct pch_gbe_buffer *buffer_info;
766 unsigned long size;
767 unsigned int i;
768
769 /* Free all the Tx ring sk_buffs */
770 for (i = 0; i < tx_ring->count; i++) {
771 buffer_info = &tx_ring->buffer_info[i];
772 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
773 }
774 pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
775
776 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
777 memset(tx_ring->buffer_info, 0, size);
778
779 /* Zero out the descriptor ring */
780 memset(tx_ring->desc, 0, tx_ring->size);
781 tx_ring->next_to_use = 0;
782 tx_ring->next_to_clean = 0;
783 iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
784 iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
785}
786
787/**
788 * pch_gbe_clean_rx_ring - Free Rx Buffers
789 * @adapter: Board private structure
790 * @rx_ring: Ring to free buffers from
791 */
792static void
793pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
794 struct pch_gbe_rx_ring *rx_ring)
795{
796 struct pch_gbe_hw *hw = &adapter->hw;
797 struct pch_gbe_buffer *buffer_info;
798 unsigned long size;
799 unsigned int i;
800
801 /* Free all the Rx ring sk_buffs */
802 for (i = 0; i < rx_ring->count; i++) {
803 buffer_info = &rx_ring->buffer_info[i];
804 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
805 }
806 pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
807 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
808 memset(rx_ring->buffer_info, 0, size);
809
810 /* Zero out the descriptor ring */
811 memset(rx_ring->desc, 0, rx_ring->size);
812 rx_ring->next_to_clean = 0;
813 rx_ring->next_to_use = 0;
814 iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
815 iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
816}
817
818static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
819 u16 duplex)
820{
821 struct pch_gbe_hw *hw = &adapter->hw;
822 unsigned long rgmii = 0;
823
824 /* Set the RGMII control. */
825#ifdef PCH_GBE_MAC_IFOP_RGMII
826 switch (speed) {
827 case SPEED_10:
828 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
829 PCH_GBE_MAC_RGMII_CTRL_SETTING);
830 break;
831 case SPEED_100:
832 rgmii = (PCH_GBE_RGMII_RATE_25M |
833 PCH_GBE_MAC_RGMII_CTRL_SETTING);
834 break;
835 case SPEED_1000:
836 rgmii = (PCH_GBE_RGMII_RATE_125M |
837 PCH_GBE_MAC_RGMII_CTRL_SETTING);
838 break;
839 }
840 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
841#else /* GMII */
842 rgmii = 0;
843 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
844#endif
845}
846static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
847 u16 duplex)
848{
849 struct net_device *netdev = adapter->netdev;
850 struct pch_gbe_hw *hw = &adapter->hw;
851 unsigned long mode = 0;
852
853 /* Set the communication mode */
854 switch (speed) {
855 case SPEED_10:
856 mode = PCH_GBE_MODE_MII_ETHER;
857 netdev->tx_queue_len = 10;
858 break;
859 case SPEED_100:
860 mode = PCH_GBE_MODE_MII_ETHER;
861 netdev->tx_queue_len = 100;
862 break;
863 case SPEED_1000:
864 mode = PCH_GBE_MODE_GMII_ETHER;
865 break;
866 }
867 if (duplex == DUPLEX_FULL)
868 mode |= PCH_GBE_MODE_FULL_DUPLEX;
869 else
870 mode |= PCH_GBE_MODE_HALF_DUPLEX;
871 iowrite32(mode, &hw->reg->MODE);
872}
873
874/**
875 * pch_gbe_watchdog - Watchdog process
876 * @data: Board private structure
877 */
878static void pch_gbe_watchdog(unsigned long data)
879{
880 struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
881 struct net_device *netdev = adapter->netdev;
882 struct pch_gbe_hw *hw = &adapter->hw;
883 struct ethtool_cmd cmd;
884
885 pr_debug("right now = %ld\n", jiffies);
886
887 pch_gbe_update_stats(adapter);
888 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
889 netdev->tx_queue_len = adapter->tx_queue_len;
890 /* mii library handles link maintenance tasks */
891 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
892 pr_err("ethtool get setting Error\n");
893 mod_timer(&adapter->watchdog_timer,
894 round_jiffies(jiffies +
895 PCH_GBE_WATCHDOG_PERIOD));
896 return;
897 }
898 hw->mac.link_speed = cmd.speed;
899 hw->mac.link_duplex = cmd.duplex;
900 /* Set the RGMII control. */
901 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
902 hw->mac.link_duplex);
903 /* Set the communication mode */
904 pch_gbe_set_mode(adapter, hw->mac.link_speed,
905 hw->mac.link_duplex);
906 netdev_dbg(netdev,
907 "Link is Up %d Mbps %s-Duplex\n",
908 cmd.speed,
909 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
910 netif_carrier_on(netdev);
911 netif_wake_queue(netdev);
912 } else if ((!mii_link_ok(&adapter->mii)) &&
913 (netif_carrier_ok(netdev))) {
914 netdev_dbg(netdev, "NIC Link is Down\n");
915 hw->mac.link_speed = SPEED_10;
916 hw->mac.link_duplex = DUPLEX_HALF;
917 netif_carrier_off(netdev);
918 netif_stop_queue(netdev);
919 }
920 mod_timer(&adapter->watchdog_timer,
921 round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
922}
923
924/**
925 * pch_gbe_tx_queue - Carry out queuing of the transmission data
926 * @adapter: Board private structure
927 * @tx_ring: Tx descriptor ring structure
928 * @skb: Sockt buffer structure
929 */
930static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
931 struct pch_gbe_tx_ring *tx_ring,
932 struct sk_buff *skb)
933{
934 struct pch_gbe_hw *hw = &adapter->hw;
935 struct pch_gbe_tx_desc *tx_desc;
936 struct pch_gbe_buffer *buffer_info;
937 struct sk_buff *tmp_skb;
938 unsigned int frame_ctrl;
939 unsigned int ring_num;
940 unsigned long flags;
941
942 /*-- Set frame control --*/
943 frame_ctrl = 0;
944 if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
945 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
946 if (unlikely(!adapter->tx_csum))
947 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
948
949 /* Performs checksum processing */
950 /*
951 * It is because the hardware accelerator does not support a checksum,
952 * when the received data size is less than 64 bytes.
953 */
954 if ((skb->len < PCH_GBE_SHORT_PKT) && (adapter->tx_csum)) {
955 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
956 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
957 if (skb->protocol == htons(ETH_P_IP)) {
958 struct iphdr *iph = ip_hdr(skb);
959 unsigned int offset;
960 iph->check = 0;
961 iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
962 offset = skb_transport_offset(skb);
963 if (iph->protocol == IPPROTO_TCP) {
964 skb->csum = 0;
965 tcp_hdr(skb)->check = 0;
966 skb->csum = skb_checksum(skb, offset,
967 skb->len - offset, 0);
968 tcp_hdr(skb)->check =
969 csum_tcpudp_magic(iph->saddr,
970 iph->daddr,
971 skb->len - offset,
972 IPPROTO_TCP,
973 skb->csum);
974 } else if (iph->protocol == IPPROTO_UDP) {
975 skb->csum = 0;
976 udp_hdr(skb)->check = 0;
977 skb->csum =
978 skb_checksum(skb, offset,
979 skb->len - offset, 0);
980 udp_hdr(skb)->check =
981 csum_tcpudp_magic(iph->saddr,
982 iph->daddr,
983 skb->len - offset,
984 IPPROTO_UDP,
985 skb->csum);
986 }
987 }
988 }
989 spin_lock_irqsave(&tx_ring->tx_lock, flags);
990 ring_num = tx_ring->next_to_use;
991 if (unlikely((ring_num + 1) == tx_ring->count))
992 tx_ring->next_to_use = 0;
993 else
994 tx_ring->next_to_use = ring_num + 1;
995
996 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
997 buffer_info = &tx_ring->buffer_info[ring_num];
998 tmp_skb = buffer_info->skb;
999
1000 /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
1001 memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1002 tmp_skb->data[ETH_HLEN] = 0x00;
1003 tmp_skb->data[ETH_HLEN + 1] = 0x00;
1004 tmp_skb->len = skb->len;
1005 memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1006 (skb->len - ETH_HLEN));
1007 /*-- Set Buffer infomation --*/
1008 buffer_info->length = tmp_skb->len;
1009 buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1010 buffer_info->length,
1011 DMA_TO_DEVICE);
1012 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1013 pr_err("TX DMA map failed\n");
1014 buffer_info->dma = 0;
1015 buffer_info->time_stamp = 0;
1016 tx_ring->next_to_use = ring_num;
1017 return;
1018 }
1019 buffer_info->mapped = true;
1020 buffer_info->time_stamp = jiffies;
1021
1022 /*-- Set Tx descriptor --*/
1023 tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1024 tx_desc->buffer_addr = (buffer_info->dma);
1025 tx_desc->length = (tmp_skb->len);
1026 tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1027 tx_desc->tx_frame_ctrl = (frame_ctrl);
1028 tx_desc->gbec_status = (DSC_INIT16);
1029
1030 if (unlikely(++ring_num == tx_ring->count))
1031 ring_num = 0;
1032
1033 /* Update software pointer of TX descriptor */
1034 iowrite32(tx_ring->dma +
1035 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1036 &hw->reg->TX_DSC_SW_P);
1037 dev_kfree_skb_any(skb);
1038}
1039
1040/**
1041 * pch_gbe_update_stats - Update the board statistics counters
1042 * @adapter: Board private structure
1043 */
1044void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1045{
1046 struct net_device *netdev = adapter->netdev;
1047 struct pci_dev *pdev = adapter->pdev;
1048 struct pch_gbe_hw_stats *stats = &adapter->stats;
1049 unsigned long flags;
1050
1051 /*
1052 * Prevent stats update while adapter is being reset, or if the pci
1053 * connection is down.
1054 */
1055 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1056 return;
1057
1058 spin_lock_irqsave(&adapter->stats_lock, flags);
1059
1060 /* Update device status "adapter->stats" */
1061 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1062 stats->tx_errors = stats->tx_length_errors +
1063 stats->tx_aborted_errors +
1064 stats->tx_carrier_errors + stats->tx_timeout_count;
1065
1066 /* Update network device status "adapter->net_stats" */
1067 netdev->stats.rx_packets = stats->rx_packets;
1068 netdev->stats.rx_bytes = stats->rx_bytes;
1069 netdev->stats.rx_dropped = stats->rx_dropped;
1070 netdev->stats.tx_packets = stats->tx_packets;
1071 netdev->stats.tx_bytes = stats->tx_bytes;
1072 netdev->stats.tx_dropped = stats->tx_dropped;
1073 /* Fill out the OS statistics structure */
1074 netdev->stats.multicast = stats->multicast;
1075 netdev->stats.collisions = stats->collisions;
1076 /* Rx Errors */
1077 netdev->stats.rx_errors = stats->rx_errors;
1078 netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1079 netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1080 /* Tx Errors */
1081 netdev->stats.tx_errors = stats->tx_errors;
1082 netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1083 netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1084
1085 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1086}
1087
1088/**
1089 * pch_gbe_intr - Interrupt Handler
1090 * @irq: Interrupt number
1091 * @data: Pointer to a network interface device structure
1092 * Returns
1093 * - IRQ_HANDLED: Our interrupt
1094 * - IRQ_NONE: Not our interrupt
1095 */
1096static irqreturn_t pch_gbe_intr(int irq, void *data)
1097{
1098 struct net_device *netdev = data;
1099 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1100 struct pch_gbe_hw *hw = &adapter->hw;
1101 u32 int_st;
1102 u32 int_en;
1103
1104 /* Check request status */
1105 int_st = ioread32(&hw->reg->INT_ST);
1106 int_st = int_st & ioread32(&hw->reg->INT_EN);
1107 /* When request status is no interruption factor */
1108 if (unlikely(!int_st))
1109 return IRQ_NONE; /* Not our interrupt. End processing. */
1110 pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1111 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1112 adapter->stats.intr_rx_frame_err_count++;
1113 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1114 adapter->stats.intr_rx_fifo_err_count++;
1115 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1116 adapter->stats.intr_rx_dma_err_count++;
1117 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1118 adapter->stats.intr_tx_fifo_err_count++;
1119 if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1120 adapter->stats.intr_tx_dma_err_count++;
1121 if (int_st & PCH_GBE_INT_TCPIP_ERR)
1122 adapter->stats.intr_tcpip_err_count++;
1123 /* When Rx descriptor is empty */
1124 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1125 adapter->stats.intr_rx_dsc_empty_count++;
1126 pr_err("Rx descriptor is empty\n");
1127 int_en = ioread32(&hw->reg->INT_EN);
1128 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1129 if (hw->mac.tx_fc_enable) {
1130 /* Set Pause packet */
1131 pch_gbe_mac_set_pause_packet(hw);
1132 }
1133 if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
1134 == 0) {
1135 return IRQ_HANDLED;
1136 }
1137 }
1138
1139 /* When request status is Receive interruption */
1140 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
1141 if (likely(napi_schedule_prep(&adapter->napi))) {
1142 /* Enable only Rx Descriptor empty */
1143 atomic_inc(&adapter->irq_sem);
1144 int_en = ioread32(&hw->reg->INT_EN);
1145 int_en &=
1146 ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1147 iowrite32(int_en, &hw->reg->INT_EN);
1148 /* Start polling for NAPI */
1149 __napi_schedule(&adapter->napi);
1150 }
1151 }
1152 pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
1153 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1154 return IRQ_HANDLED;
1155}
1156
1157/**
1158 * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1159 * @adapter: Board private structure
1160 * @rx_ring: Rx descriptor ring
1161 * @cleaned_count: Cleaned count
1162 */
1163static void
1164pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1165 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1166{
1167 struct net_device *netdev = adapter->netdev;
1168 struct pci_dev *pdev = adapter->pdev;
1169 struct pch_gbe_hw *hw = &adapter->hw;
1170 struct pch_gbe_rx_desc *rx_desc;
1171 struct pch_gbe_buffer *buffer_info;
1172 struct sk_buff *skb;
1173 unsigned int i;
1174 unsigned int bufsz;
1175
1176 bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
1177 i = rx_ring->next_to_use;
1178
1179 while ((cleaned_count--)) {
1180 buffer_info = &rx_ring->buffer_info[i];
1181 skb = buffer_info->skb;
1182 if (skb) {
1183 skb_trim(skb, 0);
1184 } else {
1185 skb = netdev_alloc_skb(netdev, bufsz);
1186 if (unlikely(!skb)) {
1187 /* Better luck next round */
1188 adapter->stats.rx_alloc_buff_failed++;
1189 break;
1190 }
1191 /* 64byte align */
1192 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1193
1194 buffer_info->skb = skb;
1195 buffer_info->length = adapter->rx_buffer_len;
1196 }
1197 buffer_info->dma = dma_map_single(&pdev->dev,
1198 skb->data,
1199 buffer_info->length,
1200 DMA_FROM_DEVICE);
1201 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1202 dev_kfree_skb(skb);
1203 buffer_info->skb = NULL;
1204 buffer_info->dma = 0;
1205 adapter->stats.rx_alloc_buff_failed++;
1206 break; /* while !buffer_info->skb */
1207 }
1208 buffer_info->mapped = true;
1209 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1210 rx_desc->buffer_addr = (buffer_info->dma);
1211 rx_desc->gbec_status = DSC_INIT16;
1212
1213 pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1214 i, (unsigned long long)buffer_info->dma,
1215 buffer_info->length);
1216
1217 if (unlikely(++i == rx_ring->count))
1218 i = 0;
1219 }
1220 if (likely(rx_ring->next_to_use != i)) {
1221 rx_ring->next_to_use = i;
1222 if (unlikely(i-- == 0))
1223 i = (rx_ring->count - 1);
1224 iowrite32(rx_ring->dma +
1225 (int)sizeof(struct pch_gbe_rx_desc) * i,
1226 &hw->reg->RX_DSC_SW_P);
1227 }
1228 return;
1229}
1230
1231/**
1232 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1233 * @adapter: Board private structure
1234 * @tx_ring: Tx descriptor ring
1235 */
1236static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1237 struct pch_gbe_tx_ring *tx_ring)
1238{
1239 struct pch_gbe_buffer *buffer_info;
1240 struct sk_buff *skb;
1241 unsigned int i;
1242 unsigned int bufsz;
1243 struct pch_gbe_tx_desc *tx_desc;
1244
1245 bufsz =
1246 adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1247
1248 for (i = 0; i < tx_ring->count; i++) {
1249 buffer_info = &tx_ring->buffer_info[i];
1250 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1251 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1252 buffer_info->skb = skb;
1253 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1254 tx_desc->gbec_status = (DSC_INIT16);
1255 }
1256 return;
1257}
1258
1259/**
1260 * pch_gbe_clean_tx - Reclaim resources after transmit completes
1261 * @adapter: Board private structure
1262 * @tx_ring: Tx descriptor ring
1263 * Returns
1264 * true: Cleaned the descriptor
1265 * false: Not cleaned the descriptor
1266 */
1267static bool
1268pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1269 struct pch_gbe_tx_ring *tx_ring)
1270{
1271 struct pch_gbe_tx_desc *tx_desc;
1272 struct pch_gbe_buffer *buffer_info;
1273 struct sk_buff *skb;
1274 unsigned int i;
1275 unsigned int cleaned_count = 0;
1276 bool cleaned = false;
1277
1278 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1279
1280 i = tx_ring->next_to_clean;
1281 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1282 pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
1283 tx_desc->gbec_status, tx_desc->dma_status);
1284
1285 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1286 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1287 cleaned = true;
1288 buffer_info = &tx_ring->buffer_info[i];
1289 skb = buffer_info->skb;
1290
1291 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1292 adapter->stats.tx_aborted_errors++;
1293 pr_err("Transfer Abort Error\n");
1294 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1295 ) {
1296 adapter->stats.tx_carrier_errors++;
1297 pr_err("Transfer Carrier Sense Error\n");
1298 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1299 ) {
1300 adapter->stats.tx_aborted_errors++;
1301 pr_err("Transfer Collision Abort Error\n");
1302 } else if ((tx_desc->gbec_status &
1303 (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1304 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1305 adapter->stats.collisions++;
1306 adapter->stats.tx_packets++;
1307 adapter->stats.tx_bytes += skb->len;
1308 pr_debug("Transfer Collision\n");
1309 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1310 ) {
1311 adapter->stats.tx_packets++;
1312 adapter->stats.tx_bytes += skb->len;
1313 }
1314 if (buffer_info->mapped) {
1315 pr_debug("unmap buffer_info->dma : %d\n", i);
1316 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1317 buffer_info->length, DMA_TO_DEVICE);
1318 buffer_info->mapped = false;
1319 }
1320 if (buffer_info->skb) {
1321 pr_debug("trim buffer_info->skb : %d\n", i);
1322 skb_trim(buffer_info->skb, 0);
1323 }
1324 tx_desc->gbec_status = DSC_INIT16;
1325 if (unlikely(++i == tx_ring->count))
1326 i = 0;
1327 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1328
1329 /* weight of a sort for tx, to avoid endless transmit cleanup */
1330 if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
1331 break;
1332 }
1333 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1334 cleaned_count);
1335 /* Recover from running out of Tx resources in xmit_frame */
1336 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
1337 netif_wake_queue(adapter->netdev);
1338 adapter->stats.tx_restart_count++;
1339 pr_debug("Tx wake queue\n");
1340 }
1341 spin_lock(&adapter->tx_queue_lock);
1342 tx_ring->next_to_clean = i;
1343 spin_unlock(&adapter->tx_queue_lock);
1344 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1345 return cleaned;
1346}
1347
1348/**
1349 * pch_gbe_clean_rx - Send received data up the network stack; legacy
1350 * @adapter: Board private structure
1351 * @rx_ring: Rx descriptor ring
1352 * @work_done: Completed count
1353 * @work_to_do: Request count
1354 * Returns
1355 * true: Cleaned the descriptor
1356 * false: Not cleaned the descriptor
1357 */
1358static bool
1359pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1360 struct pch_gbe_rx_ring *rx_ring,
1361 int *work_done, int work_to_do)
1362{
1363 struct net_device *netdev = adapter->netdev;
1364 struct pci_dev *pdev = adapter->pdev;
1365 struct pch_gbe_buffer *buffer_info;
1366 struct pch_gbe_rx_desc *rx_desc;
1367 u32 length;
1368 unsigned char tmp_packet[ETH_HLEN];
1369 unsigned int i;
1370 unsigned int cleaned_count = 0;
1371 bool cleaned = false;
1372 struct sk_buff *skb;
1373 u8 dma_status;
1374 u16 gbec_status;
1375 u32 tcp_ip_status;
1376 u8 skb_copy_flag = 0;
1377 u8 skb_padding_flag = 0;
1378
1379 i = rx_ring->next_to_clean;
1380
1381 while (*work_done < work_to_do) {
1382 /* Check Rx descriptor status */
1383 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1384 if (rx_desc->gbec_status == DSC_INIT16)
1385 break;
1386 cleaned = true;
1387 cleaned_count++;
1388
1389 dma_status = rx_desc->dma_status;
1390 gbec_status = rx_desc->gbec_status;
1391 tcp_ip_status = rx_desc->tcp_ip_status;
1392 rx_desc->gbec_status = DSC_INIT16;
1393 buffer_info = &rx_ring->buffer_info[i];
1394 skb = buffer_info->skb;
1395
1396 /* unmap dma */
1397 dma_unmap_single(&pdev->dev, buffer_info->dma,
1398 buffer_info->length, DMA_FROM_DEVICE);
1399 buffer_info->mapped = false;
1400 /* Prefetch the packet */
1401 prefetch(skb->data);
1402
1403 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1404 "TCP:0x%08x] BufInf = 0x%p\n",
1405 i, dma_status, gbec_status, tcp_ip_status,
1406 buffer_info);
1407 /* Error check */
1408 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1409 adapter->stats.rx_frame_errors++;
1410 pr_err("Receive Not Octal Error\n");
1411 } else if (unlikely(gbec_status &
1412 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1413 adapter->stats.rx_frame_errors++;
1414 pr_err("Receive Nibble Error\n");
1415 } else if (unlikely(gbec_status &
1416 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1417 adapter->stats.rx_crc_errors++;
1418 pr_err("Receive CRC Error\n");
1419 } else {
1420 /* get receive length */
1421 /* length convert[-3], padding[-2] */
1422 length = (rx_desc->rx_words_eob) - 3 - 2;
1423
1424 /* Decide the data conversion method */
1425 if (!adapter->rx_csum) {
1426 /* [Header:14][payload] */
1427 skb_padding_flag = 0;
1428 skb_copy_flag = 1;
1429 } else {
1430 /* [Header:14][padding:2][payload] */
1431 skb_padding_flag = 1;
1432 if (length < copybreak)
1433 skb_copy_flag = 1;
1434 else
1435 skb_copy_flag = 0;
1436 }
1437
1438 /* Data conversion */
1439 if (skb_copy_flag) { /* recycle skb */
1440 struct sk_buff *new_skb;
1441 new_skb =
1442 netdev_alloc_skb(netdev,
1443 length + NET_IP_ALIGN);
1444 if (new_skb) {
1445 if (!skb_padding_flag) {
1446 skb_reserve(new_skb,
1447 NET_IP_ALIGN);
1448 }
1449 memcpy(new_skb->data, skb->data,
1450 length);
1451 /* save the skb
1452 * in buffer_info as good */
1453 skb = new_skb;
1454 } else if (!skb_padding_flag) {
1455 /* dorrop error */
1456 pr_err("New skb allocation Error\n");
1457 goto dorrop;
1458 }
1459 } else {
1460 buffer_info->skb = NULL;
1461 }
1462 if (skb_padding_flag) {
1463 memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
1464 memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
1465 ETH_HLEN);
1466 skb_reserve(skb, NET_IP_ALIGN);
1467
1468 }
1469
1470 /* update status of driver */
1471 adapter->stats.rx_bytes += length;
1472 adapter->stats.rx_packets++;
1473 if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1474 adapter->stats.multicast++;
1475 /* Write meta date of skb */
1476 skb_put(skb, length);
1477 skb->protocol = eth_type_trans(skb, netdev);
1478 if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) ==
1479 PCH_GBE_RXD_ACC_STAT_TCPIPOK) {
1480 skb->ip_summed = CHECKSUM_UNNECESSARY;
1481 } else {
1482 skb->ip_summed = CHECKSUM_NONE;
1483 }
1484 napi_gro_receive(&adapter->napi, skb);
1485 (*work_done)++;
1486 pr_debug("Receive skb->ip_summed: %d length: %d\n",
1487 skb->ip_summed, length);
1488 }
1489dorrop:
1490 /* return some buffers to hardware, one at a time is too slow */
1491 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1492 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1493 cleaned_count);
1494 cleaned_count = 0;
1495 }
1496 if (++i == rx_ring->count)
1497 i = 0;
1498 }
1499 rx_ring->next_to_clean = i;
1500 if (cleaned_count)
1501 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1502 return cleaned;
1503}
1504
1505/**
1506 * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1507 * @adapter: Board private structure
1508 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
1509 * Returns
1510 * 0: Successfully
1511 * Negative value: Failed
1512 */
1513int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1514 struct pch_gbe_tx_ring *tx_ring)
1515{
1516 struct pci_dev *pdev = adapter->pdev;
1517 struct pch_gbe_tx_desc *tx_desc;
1518 int size;
1519 int desNo;
1520
1521 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1522 tx_ring->buffer_info = vmalloc(size);
1523 if (!tx_ring->buffer_info) {
1524 pr_err("Unable to allocate memory for the buffer infomation\n");
1525 return -ENOMEM;
1526 }
1527 memset(tx_ring->buffer_info, 0, size);
1528
1529 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1530
1531 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1532 &tx_ring->dma, GFP_KERNEL);
1533 if (!tx_ring->desc) {
1534 vfree(tx_ring->buffer_info);
1535 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1536 return -ENOMEM;
1537 }
1538 memset(tx_ring->desc, 0, tx_ring->size);
1539
1540 tx_ring->next_to_use = 0;
1541 tx_ring->next_to_clean = 0;
1542 spin_lock_init(&tx_ring->tx_lock);
1543
1544 for (desNo = 0; desNo < tx_ring->count; desNo++) {
1545 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1546 tx_desc->gbec_status = DSC_INIT16;
1547 }
1548 pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
1549 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1550 tx_ring->desc, (unsigned long long)tx_ring->dma,
1551 tx_ring->next_to_clean, tx_ring->next_to_use);
1552 return 0;
1553}
1554
1555/**
1556 * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1557 * @adapter: Board private structure
1558 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1559 * Returns
1560 * 0: Successfully
1561 * Negative value: Failed
1562 */
1563int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1564 struct pch_gbe_rx_ring *rx_ring)
1565{
1566 struct pci_dev *pdev = adapter->pdev;
1567 struct pch_gbe_rx_desc *rx_desc;
1568 int size;
1569 int desNo;
1570
1571 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1572 rx_ring->buffer_info = vmalloc(size);
1573 if (!rx_ring->buffer_info) {
1574 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1575 return -ENOMEM;
1576 }
1577 memset(rx_ring->buffer_info, 0, size);
1578 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1579 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1580 &rx_ring->dma, GFP_KERNEL);
1581
1582 if (!rx_ring->desc) {
1583 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1584 vfree(rx_ring->buffer_info);
1585 return -ENOMEM;
1586 }
1587 memset(rx_ring->desc, 0, rx_ring->size);
1588 rx_ring->next_to_clean = 0;
1589 rx_ring->next_to_use = 0;
1590 for (desNo = 0; desNo < rx_ring->count; desNo++) {
1591 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1592 rx_desc->gbec_status = DSC_INIT16;
1593 }
1594 pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
1595 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1596 rx_ring->desc, (unsigned long long)rx_ring->dma,
1597 rx_ring->next_to_clean, rx_ring->next_to_use);
1598 return 0;
1599}
1600
1601/**
1602 * pch_gbe_free_tx_resources - Free Tx Resources
1603 * @adapter: Board private structure
1604 * @tx_ring: Tx descriptor ring for a specific queue
1605 */
1606void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1607 struct pch_gbe_tx_ring *tx_ring)
1608{
1609 struct pci_dev *pdev = adapter->pdev;
1610
1611 pch_gbe_clean_tx_ring(adapter, tx_ring);
1612 vfree(tx_ring->buffer_info);
1613 tx_ring->buffer_info = NULL;
1614 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1615 tx_ring->desc = NULL;
1616}
1617
1618/**
1619 * pch_gbe_free_rx_resources - Free Rx Resources
1620 * @adapter: Board private structure
1621 * @rx_ring: Ring to clean the resources from
1622 */
1623void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1624 struct pch_gbe_rx_ring *rx_ring)
1625{
1626 struct pci_dev *pdev = adapter->pdev;
1627
1628 pch_gbe_clean_rx_ring(adapter, rx_ring);
1629 vfree(rx_ring->buffer_info);
1630 rx_ring->buffer_info = NULL;
1631 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1632 rx_ring->desc = NULL;
1633}
1634
1635/**
1636 * pch_gbe_request_irq - Allocate an interrupt line
1637 * @adapter: Board private structure
1638 * Returns
1639 * 0: Successfully
1640 * Negative value: Failed
1641 */
1642static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1643{
1644 struct net_device *netdev = adapter->netdev;
1645 int err;
1646 int flags;
1647
1648 flags = IRQF_SHARED;
1649 adapter->have_msi = false;
1650 err = pci_enable_msi(adapter->pdev);
1651 pr_debug("call pci_enable_msi\n");
1652 if (err) {
1653 pr_debug("call pci_enable_msi - Error: %d\n", err);
1654 } else {
1655 flags = 0;
1656 adapter->have_msi = true;
1657 }
1658 err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1659 flags, netdev->name, netdev);
1660 if (err)
1661 pr_err("Unable to allocate interrupt Error: %d\n", err);
1662 pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
1663 adapter->have_msi, flags, err);
1664 return err;
1665}
1666
1667
1668static void pch_gbe_set_multi(struct net_device *netdev);
1669/**
1670 * pch_gbe_up - Up GbE network device
1671 * @adapter: Board private structure
1672 * Returns
1673 * 0: Successfully
1674 * Negative value: Failed
1675 */
1676int pch_gbe_up(struct pch_gbe_adapter *adapter)
1677{
1678 struct net_device *netdev = adapter->netdev;
1679 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1680 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1681 int err;
1682
1683 /* hardware has been reset, we need to reload some things */
1684 pch_gbe_set_multi(netdev);
1685
1686 pch_gbe_setup_tctl(adapter);
1687 pch_gbe_configure_tx(adapter);
1688 pch_gbe_setup_rctl(adapter);
1689 pch_gbe_configure_rx(adapter);
1690
1691 err = pch_gbe_request_irq(adapter);
1692 if (err) {
1693 pr_err("Error: can't bring device up\n");
1694 return err;
1695 }
1696 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1697 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1698 adapter->tx_queue_len = netdev->tx_queue_len;
1699
1700 mod_timer(&adapter->watchdog_timer, jiffies);
1701
1702 napi_enable(&adapter->napi);
1703 pch_gbe_irq_enable(adapter);
1704 netif_start_queue(adapter->netdev);
1705
1706 return 0;
1707}
1708
1709/**
1710 * pch_gbe_down - Down GbE network device
1711 * @adapter: Board private structure
1712 */
1713void pch_gbe_down(struct pch_gbe_adapter *adapter)
1714{
1715 struct net_device *netdev = adapter->netdev;
1716
1717 /* signal that we're down so the interrupt handler does not
1718 * reschedule our watchdog timer */
1719 napi_disable(&adapter->napi);
1720 atomic_set(&adapter->irq_sem, 0);
1721
1722 pch_gbe_irq_disable(adapter);
1723 pch_gbe_free_irq(adapter);
1724
1725 del_timer_sync(&adapter->watchdog_timer);
1726
1727 netdev->tx_queue_len = adapter->tx_queue_len;
1728 netif_carrier_off(netdev);
1729 netif_stop_queue(netdev);
1730
1731 pch_gbe_reset(adapter);
1732 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1733 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1734}
1735
1736/**
1737 * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
1738 * @adapter: Board private structure to initialize
1739 * Returns
1740 * 0: Successfully
1741 * Negative value: Failed
1742 */
1743static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
1744{
1745 struct pch_gbe_hw *hw = &adapter->hw;
1746 struct net_device *netdev = adapter->netdev;
1747
1748 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1749 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1750 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1751
1752 /* Initialize the hardware-specific values */
1753 if (pch_gbe_hal_setup_init_funcs(hw)) {
1754 pr_err("Hardware Initialization Failure\n");
1755 return -EIO;
1756 }
1757 if (pch_gbe_alloc_queues(adapter)) {
1758 pr_err("Unable to allocate memory for queues\n");
1759 return -ENOMEM;
1760 }
1761 spin_lock_init(&adapter->hw.miim_lock);
1762 spin_lock_init(&adapter->tx_queue_lock);
1763 spin_lock_init(&adapter->stats_lock);
1764 spin_lock_init(&adapter->ethtool_lock);
1765 atomic_set(&adapter->irq_sem, 0);
1766 pch_gbe_irq_disable(adapter);
1767
1768 pch_gbe_init_stats(adapter);
1769
1770 pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
1771 (u32) adapter->rx_buffer_len,
1772 hw->mac.min_frame_size, hw->mac.max_frame_size);
1773 return 0;
1774}
1775
1776/**
1777 * pch_gbe_open - Called when a network interface is made active
1778 * @netdev: Network interface device structure
1779 * Returns
1780 * 0: Successfully
1781 * Negative value: Failed
1782 */
1783static int pch_gbe_open(struct net_device *netdev)
1784{
1785 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1786 struct pch_gbe_hw *hw = &adapter->hw;
1787 int err;
1788
1789 /* allocate transmit descriptors */
1790 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
1791 if (err)
1792 goto err_setup_tx;
1793 /* allocate receive descriptors */
1794 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
1795 if (err)
1796 goto err_setup_rx;
1797 pch_gbe_hal_power_up_phy(hw);
1798 err = pch_gbe_up(adapter);
1799 if (err)
1800 goto err_up;
1801 pr_debug("Success End\n");
1802 return 0;
1803
1804err_up:
1805 if (!adapter->wake_up_evt)
1806 pch_gbe_hal_power_down_phy(hw);
1807 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
1808err_setup_rx:
1809 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
1810err_setup_tx:
1811 pch_gbe_reset(adapter);
1812 pr_err("Error End\n");
1813 return err;
1814}
1815
1816/**
1817 * pch_gbe_stop - Disables a network interface
1818 * @netdev: Network interface device structure
1819 * Returns
1820 * 0: Successfully
1821 */
1822static int pch_gbe_stop(struct net_device *netdev)
1823{
1824 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1825 struct pch_gbe_hw *hw = &adapter->hw;
1826
1827 pch_gbe_down(adapter);
1828 if (!adapter->wake_up_evt)
1829 pch_gbe_hal_power_down_phy(hw);
1830 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
1831 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
1832 return 0;
1833}
1834
1835/**
1836 * pch_gbe_xmit_frame - Packet transmitting start
1837 * @skb: Socket buffer structure
1838 * @netdev: Network interface device structure
1839 * Returns
1840 * - NETDEV_TX_OK: Normal end
1841 * - NETDEV_TX_BUSY: Error end
1842 */
1843static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1844{
1845 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1846 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1847 unsigned long flags;
1848
1849 if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
1850 dev_kfree_skb_any(skb);
1851 pr_err("Transfer length Error: skb len: %d > max: %d\n",
1852 skb->len, adapter->hw.mac.max_frame_size);
1853 adapter->stats.tx_length_errors++;
1854 return NETDEV_TX_OK;
1855 }
1856 if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
1857 /* Collision - tell upper layer to requeue */
1858 return NETDEV_TX_LOCKED;
1859 }
1860 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
1861 netif_stop_queue(netdev);
1862 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1863 pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
1864 tx_ring->next_to_use, tx_ring->next_to_clean);
1865 return NETDEV_TX_BUSY;
1866 }
1867 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1868
1869 /* CRC,ITAG no support */
1870 pch_gbe_tx_queue(adapter, tx_ring, skb);
1871 return NETDEV_TX_OK;
1872}
1873
1874/**
1875 * pch_gbe_get_stats - Get System Network Statistics
1876 * @netdev: Network interface device structure
1877 * Returns: The current stats
1878 */
1879static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
1880{
1881 /* only return the current stats */
1882 return &netdev->stats;
1883}
1884
1885/**
1886 * pch_gbe_set_multi - Multicast and Promiscuous mode set
1887 * @netdev: Network interface device structure
1888 */
1889static void pch_gbe_set_multi(struct net_device *netdev)
1890{
1891 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1892 struct pch_gbe_hw *hw = &adapter->hw;
1893 struct netdev_hw_addr *ha;
1894 u8 *mta_list;
1895 u32 rctl;
1896 int i;
1897 int mc_count;
1898
1899 pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
1900
1901 /* Check for Promiscuous and All Multicast modes */
1902 rctl = ioread32(&hw->reg->RX_MODE);
1903 mc_count = netdev_mc_count(netdev);
1904 if ((netdev->flags & IFF_PROMISC)) {
1905 rctl &= ~PCH_GBE_ADD_FIL_EN;
1906 rctl &= ~PCH_GBE_MLT_FIL_EN;
1907 } else if ((netdev->flags & IFF_ALLMULTI)) {
1908 /* all the multicasting receive permissions */
1909 rctl |= PCH_GBE_ADD_FIL_EN;
1910 rctl &= ~PCH_GBE_MLT_FIL_EN;
1911 } else {
1912 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
1913 /* all the multicasting receive permissions */
1914 rctl |= PCH_GBE_ADD_FIL_EN;
1915 rctl &= ~PCH_GBE_MLT_FIL_EN;
1916 } else {
1917 rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
1918 }
1919 }
1920 iowrite32(rctl, &hw->reg->RX_MODE);
1921
1922 if (mc_count >= PCH_GBE_MAR_ENTRIES)
1923 return;
1924 mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
1925 if (!mta_list)
1926 return;
1927
1928 /* The shared function expects a packed array of only addresses. */
1929 i = 0;
1930 netdev_for_each_mc_addr(ha, netdev) {
1931 if (i == mc_count)
1932 break;
1933 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
1934 }
1935 pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
1936 PCH_GBE_MAR_ENTRIES);
1937 kfree(mta_list);
1938
1939 pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
1940 ioread32(&hw->reg->RX_MODE), mc_count);
1941}
1942
1943/**
1944 * pch_gbe_set_mac - Change the Ethernet Address of the NIC
1945 * @netdev: Network interface device structure
1946 * @addr: Pointer to an address structure
1947 * Returns
1948 * 0: Successfully
1949 * -EADDRNOTAVAIL: Failed
1950 */
1951static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
1952{
1953 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1954 struct sockaddr *skaddr = addr;
1955 int ret_val;
1956
1957 if (!is_valid_ether_addr(skaddr->sa_data)) {
1958 ret_val = -EADDRNOTAVAIL;
1959 } else {
1960 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
1961 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
1962 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1963 ret_val = 0;
1964 }
1965 pr_debug("ret_val : 0x%08x\n", ret_val);
1966 pr_debug("dev_addr : %pM\n", netdev->dev_addr);
1967 pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
1968 pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
1969 ioread32(&adapter->hw.reg->mac_adr[0].high),
1970 ioread32(&adapter->hw.reg->mac_adr[0].low));
1971 return ret_val;
1972}
1973
1974/**
1975 * pch_gbe_change_mtu - Change the Maximum Transfer Unit
1976 * @netdev: Network interface device structure
1977 * @new_mtu: New value for maximum frame size
1978 * Returns
1979 * 0: Successfully
1980 * -EINVAL: Failed
1981 */
1982static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
1983{
1984 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1985 int max_frame;
1986
1987 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1988 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
1989 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
1990 pr_err("Invalid MTU setting\n");
1991 return -EINVAL;
1992 }
1993 if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
1994 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1995 else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
1996 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
1997 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
1998 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
1999 else
2000 adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
2001 netdev->mtu = new_mtu;
2002 adapter->hw.mac.max_frame_size = max_frame;
2003
2004 if (netif_running(netdev))
2005 pch_gbe_reinit_locked(adapter);
2006 else
2007 pch_gbe_reset(adapter);
2008
2009 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2010 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2011 adapter->hw.mac.max_frame_size);
2012 return 0;
2013}
2014
2015/**
2016 * pch_gbe_ioctl - Controls register through a MII interface
2017 * @netdev: Network interface device structure
2018 * @ifr: Pointer to ifr structure
2019 * @cmd: Control command
2020 * Returns
2021 * 0: Successfully
2022 * Negative value: Failed
2023 */
2024static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2025{
2026 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2027
2028 pr_debug("cmd : 0x%04x\n", cmd);
2029
2030 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2031}
2032
2033/**
2034 * pch_gbe_tx_timeout - Respond to a Tx Hang
2035 * @netdev: Network interface device structure
2036 */
2037static void pch_gbe_tx_timeout(struct net_device *netdev)
2038{
2039 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2040
2041 /* Do the reset outside of interrupt context */
2042 adapter->stats.tx_timeout_count++;
2043 schedule_work(&adapter->reset_task);
2044}
2045
2046/**
2047 * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2048 * @napi: Pointer of polling device struct
2049 * @budget: The maximum number of a packet
2050 * Returns
2051 * false: Exit the polling mode
2052 * true: Continue the polling mode
2053 */
2054static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2055{
2056 struct pch_gbe_adapter *adapter =
2057 container_of(napi, struct pch_gbe_adapter, napi);
2058 struct net_device *netdev = adapter->netdev;
2059 int work_done = 0;
2060 bool poll_end_flag = false;
2061 bool cleaned = false;
2062
2063 pr_debug("budget : %d\n", budget);
2064
2065 /* Keep link state information with original netdev */
2066 if (!netif_carrier_ok(netdev)) {
2067 poll_end_flag = true;
2068 } else {
2069 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2070 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2071
2072 if (cleaned)
2073 work_done = budget;
2074 /* If no Tx and not enough Rx work done,
2075 * exit the polling mode
2076 */
2077 if ((work_done < budget) || !netif_running(netdev))
2078 poll_end_flag = true;
2079 }
2080
2081 if (poll_end_flag) {
2082 napi_complete(napi);
2083 pch_gbe_irq_enable(adapter);
2084 }
2085
2086 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2087 poll_end_flag, work_done, budget);
2088
2089 return work_done;
2090}
2091
2092#ifdef CONFIG_NET_POLL_CONTROLLER
2093/**
2094 * pch_gbe_netpoll - Used by things like netconsole to send skbs
2095 * @netdev: Network interface device structure
2096 */
2097static void pch_gbe_netpoll(struct net_device *netdev)
2098{
2099 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2100
2101 disable_irq(adapter->pdev->irq);
2102 pch_gbe_intr(adapter->pdev->irq, netdev);
2103 enable_irq(adapter->pdev->irq);
2104}
2105#endif
2106
2107static const struct net_device_ops pch_gbe_netdev_ops = {
2108 .ndo_open = pch_gbe_open,
2109 .ndo_stop = pch_gbe_stop,
2110 .ndo_start_xmit = pch_gbe_xmit_frame,
2111 .ndo_get_stats = pch_gbe_get_stats,
2112 .ndo_set_mac_address = pch_gbe_set_mac,
2113 .ndo_tx_timeout = pch_gbe_tx_timeout,
2114 .ndo_change_mtu = pch_gbe_change_mtu,
2115 .ndo_do_ioctl = pch_gbe_ioctl,
2116 .ndo_set_multicast_list = &pch_gbe_set_multi,
2117#ifdef CONFIG_NET_POLL_CONTROLLER
2118 .ndo_poll_controller = pch_gbe_netpoll,
2119#endif
2120};
2121
2122static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2123 pci_channel_state_t state)
2124{
2125 struct net_device *netdev = pci_get_drvdata(pdev);
2126 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2127
2128 netif_device_detach(netdev);
2129 if (netif_running(netdev))
2130 pch_gbe_down(adapter);
2131 pci_disable_device(pdev);
2132 /* Request a slot slot reset. */
2133 return PCI_ERS_RESULT_NEED_RESET;
2134}
2135
2136static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2137{
2138 struct net_device *netdev = pci_get_drvdata(pdev);
2139 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2140 struct pch_gbe_hw *hw = &adapter->hw;
2141
2142 if (pci_enable_device(pdev)) {
2143 pr_err("Cannot re-enable PCI device after reset\n");
2144 return PCI_ERS_RESULT_DISCONNECT;
2145 }
2146 pci_set_master(pdev);
2147 pci_enable_wake(pdev, PCI_D0, 0);
2148 pch_gbe_hal_power_up_phy(hw);
2149 pch_gbe_reset(adapter);
2150 /* Clear wake up status */
2151 pch_gbe_mac_set_wol_event(hw, 0);
2152
2153 return PCI_ERS_RESULT_RECOVERED;
2154}
2155
2156static void pch_gbe_io_resume(struct pci_dev *pdev)
2157{
2158 struct net_device *netdev = pci_get_drvdata(pdev);
2159 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2160
2161 if (netif_running(netdev)) {
2162 if (pch_gbe_up(adapter)) {
2163 pr_debug("can't bring device back up after reset\n");
2164 return;
2165 }
2166 }
2167 netif_device_attach(netdev);
2168}
2169
2170static int __pch_gbe_suspend(struct pci_dev *pdev)
2171{
2172 struct net_device *netdev = pci_get_drvdata(pdev);
2173 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2174 struct pch_gbe_hw *hw = &adapter->hw;
2175 u32 wufc = adapter->wake_up_evt;
2176 int retval = 0;
2177
2178 netif_device_detach(netdev);
2179 if (netif_running(netdev))
2180 pch_gbe_down(adapter);
2181 if (wufc) {
2182 pch_gbe_set_multi(netdev);
2183 pch_gbe_setup_rctl(adapter);
2184 pch_gbe_configure_rx(adapter);
2185 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2186 hw->mac.link_duplex);
2187 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2188 hw->mac.link_duplex);
2189 pch_gbe_mac_set_wol_event(hw, wufc);
2190 pci_disable_device(pdev);
2191 } else {
2192 pch_gbe_hal_power_down_phy(hw);
2193 pch_gbe_mac_set_wol_event(hw, wufc);
2194 pci_disable_device(pdev);
2195 }
2196 return retval;
2197}
2198
2199#ifdef CONFIG_PM
2200static int pch_gbe_suspend(struct device *device)
2201{
2202 struct pci_dev *pdev = to_pci_dev(device);
2203
2204 return __pch_gbe_suspend(pdev);
2205}
2206
2207static int pch_gbe_resume(struct device *device)
2208{
2209 struct pci_dev *pdev = to_pci_dev(device);
2210 struct net_device *netdev = pci_get_drvdata(pdev);
2211 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2212 struct pch_gbe_hw *hw = &adapter->hw;
2213 u32 err;
2214
2215 err = pci_enable_device(pdev);
2216 if (err) {
2217 pr_err("Cannot enable PCI device from suspend\n");
2218 return err;
2219 }
2220 pci_set_master(pdev);
2221 pch_gbe_hal_power_up_phy(hw);
2222 pch_gbe_reset(adapter);
2223 /* Clear wake on lan control and status */
2224 pch_gbe_mac_set_wol_event(hw, 0);
2225
2226 if (netif_running(netdev))
2227 pch_gbe_up(adapter);
2228 netif_device_attach(netdev);
2229
2230 return 0;
2231}
2232#endif /* CONFIG_PM */
2233
2234static void pch_gbe_shutdown(struct pci_dev *pdev)
2235{
2236 __pch_gbe_suspend(pdev);
2237 if (system_state == SYSTEM_POWER_OFF) {
2238 pci_wake_from_d3(pdev, true);
2239 pci_set_power_state(pdev, PCI_D3hot);
2240 }
2241}
2242
2243static void pch_gbe_remove(struct pci_dev *pdev)
2244{
2245 struct net_device *netdev = pci_get_drvdata(pdev);
2246 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2247
2248 flush_scheduled_work();
2249 unregister_netdev(netdev);
2250
2251 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2252
2253 kfree(adapter->tx_ring);
2254 kfree(adapter->rx_ring);
2255
2256 iounmap(adapter->hw.reg);
2257 pci_release_regions(pdev);
2258 free_netdev(netdev);
2259 pci_disable_device(pdev);
2260}
2261
2262static int pch_gbe_probe(struct pci_dev *pdev,
2263 const struct pci_device_id *pci_id)
2264{
2265 struct net_device *netdev;
2266 struct pch_gbe_adapter *adapter;
2267 int ret;
2268
2269 ret = pci_enable_device(pdev);
2270 if (ret)
2271 return ret;
2272
2273 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2274 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2275 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2276 if (ret) {
2277 ret = pci_set_consistent_dma_mask(pdev,
2278 DMA_BIT_MASK(32));
2279 if (ret) {
2280 dev_err(&pdev->dev, "ERR: No usable DMA "
2281 "configuration, aborting\n");
2282 goto err_disable_device;
2283 }
2284 }
2285 }
2286
2287 ret = pci_request_regions(pdev, KBUILD_MODNAME);
2288 if (ret) {
2289 dev_err(&pdev->dev,
2290 "ERR: Can't reserve PCI I/O and memory resources\n");
2291 goto err_disable_device;
2292 }
2293 pci_set_master(pdev);
2294
2295 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2296 if (!netdev) {
2297 ret = -ENOMEM;
2298 dev_err(&pdev->dev,
2299 "ERR: Can't allocate and set up an Ethernet device\n");
2300 goto err_release_pci;
2301 }
2302 SET_NETDEV_DEV(netdev, &pdev->dev);
2303
2304 pci_set_drvdata(pdev, netdev);
2305 adapter = netdev_priv(netdev);
2306 adapter->netdev = netdev;
2307 adapter->pdev = pdev;
2308 adapter->hw.back = adapter;
2309 adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2310 if (!adapter->hw.reg) {
2311 ret = -EIO;
2312 dev_err(&pdev->dev, "Can't ioremap\n");
2313 goto err_free_netdev;
2314 }
2315
2316 netdev->netdev_ops = &pch_gbe_netdev_ops;
2317 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2318 netif_napi_add(netdev, &adapter->napi,
2319 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2320 netdev->features = NETIF_F_HW_CSUM | NETIF_F_GRO;
2321 pch_gbe_set_ethtool_ops(netdev);
2322
2323 pch_gbe_mac_reset_hw(&adapter->hw);
2324
2325 /* setup the private structure */
2326 ret = pch_gbe_sw_init(adapter);
2327 if (ret)
2328 goto err_iounmap;
2329
2330 /* Initialize PHY */
2331 ret = pch_gbe_init_phy(adapter);
2332 if (ret) {
2333 dev_err(&pdev->dev, "PHY initialize error\n");
2334 goto err_free_adapter;
2335 }
2336 pch_gbe_hal_get_bus_info(&adapter->hw);
2337
2338 /* Read the MAC address. and store to the private data */
2339 ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2340 if (ret) {
2341 dev_err(&pdev->dev, "MAC address Read Error\n");
2342 goto err_free_adapter;
2343 }
2344
2345 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2346 if (!is_valid_ether_addr(netdev->dev_addr)) {
2347 dev_err(&pdev->dev, "Invalid MAC Address\n");
2348 ret = -EIO;
2349 goto err_free_adapter;
2350 }
2351 setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2352 (unsigned long)adapter);
2353
2354 INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2355
2356 pch_gbe_check_options(adapter);
2357
2358 if (adapter->tx_csum)
2359 netdev->features |= NETIF_F_HW_CSUM;
2360 else
2361 netdev->features &= ~NETIF_F_HW_CSUM;
2362
2363 /* initialize the wol settings based on the eeprom settings */
2364 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2365 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2366
2367 /* reset the hardware with the new settings */
2368 pch_gbe_reset(adapter);
2369
2370 ret = register_netdev(netdev);
2371 if (ret)
2372 goto err_free_adapter;
2373 /* tell the stack to leave us alone until pch_gbe_open() is called */
2374 netif_carrier_off(netdev);
2375 netif_stop_queue(netdev);
2376
2377 dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
2378
2379 device_set_wakeup_enable(&pdev->dev, 1);
2380 return 0;
2381
2382err_free_adapter:
2383 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2384 kfree(adapter->tx_ring);
2385 kfree(adapter->rx_ring);
2386err_iounmap:
2387 iounmap(adapter->hw.reg);
2388err_free_netdev:
2389 free_netdev(netdev);
2390err_release_pci:
2391 pci_release_regions(pdev);
2392err_disable_device:
2393 pci_disable_device(pdev);
2394 return ret;
2395}
2396
2397static const struct pci_device_id pch_gbe_pcidev_id[] = {
2398 {.vendor = PCI_VENDOR_ID_INTEL,
2399 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2400 .subvendor = PCI_ANY_ID,
2401 .subdevice = PCI_ANY_ID,
2402 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2403 .class_mask = (0xFFFF00)
2404 },
2405 /* required last entry */
2406 {0}
2407};
2408
2409#ifdef CONFIG_PM
2410static const struct dev_pm_ops pch_gbe_pm_ops = {
2411 .suspend = pch_gbe_suspend,
2412 .resume = pch_gbe_resume,
2413 .freeze = pch_gbe_suspend,
2414 .thaw = pch_gbe_resume,
2415 .poweroff = pch_gbe_suspend,
2416 .restore = pch_gbe_resume,
2417};
2418#endif
2419
2420static struct pci_error_handlers pch_gbe_err_handler = {
2421 .error_detected = pch_gbe_io_error_detected,
2422 .slot_reset = pch_gbe_io_slot_reset,
2423 .resume = pch_gbe_io_resume
2424};
2425
2426static struct pci_driver pch_gbe_pcidev = {
2427 .name = KBUILD_MODNAME,
2428 .id_table = pch_gbe_pcidev_id,
2429 .probe = pch_gbe_probe,
2430 .remove = pch_gbe_remove,
2431#ifdef CONFIG_PM_OPS
2432 .driver.pm = &pch_gbe_pm_ops,
2433#endif
2434 .shutdown = pch_gbe_shutdown,
2435 .err_handler = &pch_gbe_err_handler
2436};
2437
2438
2439static int __init pch_gbe_init_module(void)
2440{
2441 int ret;
2442
2443 ret = pci_register_driver(&pch_gbe_pcidev);
2444 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2445 if (copybreak == 0) {
2446 pr_info("copybreak disabled\n");
2447 } else {
2448 pr_info("copybreak enabled for packets <= %u bytes\n",
2449 copybreak);
2450 }
2451 }
2452 return ret;
2453}
2454
2455static void __exit pch_gbe_exit_module(void)
2456{
2457 pci_unregister_driver(&pch_gbe_pcidev);
2458}
2459
2460module_init(pch_gbe_init_module);
2461module_exit(pch_gbe_exit_module);
2462
2463MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver");
2464MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>");
2465MODULE_LICENSE("GPL");
2466MODULE_VERSION(DRV_VERSION);
2467MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2468
2469module_param(copybreak, uint, 0644);
2470MODULE_PARM_DESC(copybreak,
2471 "Maximum size of packet that is copied to a new buffer on receive");
2472
2473/* pch_gbe_main.c */
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
new file mode 100644
index 000000000000..2510146fc560
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -0,0 +1,499 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include "pch_gbe.h"
22
23#define OPTION_UNSET -1
24#define OPTION_DISABLED 0
25#define OPTION_ENABLED 1
26
27/**
28 * TxDescriptors - Transmit Descriptor Count
29 * @Valid Range: PCH_GBE_MIN_TXD - PCH_GBE_MAX_TXD
30 * @Default Value: PCH_GBE_DEFAULT_TXD
31 */
32static int TxDescriptors = OPTION_UNSET;
33module_param(TxDescriptors, int, 0);
34MODULE_PARM_DESC(TxDescriptors, "Number of transmit descriptors");
35
36/**
37 * RxDescriptors -Receive Descriptor Count
38 * @Valid Range: PCH_GBE_MIN_RXD - PCH_GBE_MAX_RXD
39 * @Default Value: PCH_GBE_DEFAULT_RXD
40 */
41static int RxDescriptors = OPTION_UNSET;
42module_param(RxDescriptors, int, 0);
43MODULE_PARM_DESC(RxDescriptors, "Number of receive descriptors");
44
45/**
46 * Speed - User Specified Speed Override
47 * @Valid Range: 0, 10, 100, 1000
48 * - 0: auto-negotiate at all supported speeds
49 * - 10: only link at 10 Mbps
50 * - 100: only link at 100 Mbps
51 * - 1000: only link at 1000 Mbps
52 * @Default Value: 0
53 */
54static int Speed = OPTION_UNSET;
55module_param(Speed, int, 0);
56MODULE_PARM_DESC(Speed, "Speed setting");
57
58/**
59 * Duplex - User Specified Duplex Override
60 * @Valid Range: 0-2
61 * - 0: auto-negotiate for duplex
62 * - 1: only link at half duplex
63 * - 2: only link at full duplex
64 * @Default Value: 0
65 */
66static int Duplex = OPTION_UNSET;
67module_param(Duplex, int, 0);
68MODULE_PARM_DESC(Duplex, "Duplex setting");
69
70#define HALF_DUPLEX 1
71#define FULL_DUPLEX 2
72
73/**
74 * AutoNeg - Auto-negotiation Advertisement Override
75 * @Valid Range: 0x01-0x0F, 0x20-0x2F
76 *
77 * The AutoNeg value is a bit mask describing which speed and duplex
78 * combinations should be advertised during auto-negotiation.
79 * The supported speed and duplex modes are listed below
80 *
81 * Bit 7 6 5 4 3 2 1 0
82 * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
83 * Duplex Full Full Half Full Half
84 *
85 * @Default Value: 0x2F (copper)
86 */
87static int AutoNeg = OPTION_UNSET;
88module_param(AutoNeg, int, 0);
89MODULE_PARM_DESC(AutoNeg, "Advertised auto-negotiation setting");
90
91#define PHY_ADVERTISE_10_HALF 0x0001
92#define PHY_ADVERTISE_10_FULL 0x0002
93#define PHY_ADVERTISE_100_HALF 0x0004
94#define PHY_ADVERTISE_100_FULL 0x0008
95#define PHY_ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
96#define PHY_ADVERTISE_1000_FULL 0x0020
97#define PCH_AUTONEG_ADVERTISE_DEFAULT 0x2F
98
99/**
100 * FlowControl - User Specified Flow Control Override
101 * @Valid Range: 0-3
102 * - 0: No Flow Control
103 * - 1: Rx only, respond to PAUSE frames but do not generate them
104 * - 2: Tx only, generate PAUSE frames but ignore them on receive
105 * - 3: Full Flow Control Support
106 * @Default Value: Read flow control settings from the EEPROM
107 */
108static int FlowControl = OPTION_UNSET;
109module_param(FlowControl, int, 0);
110MODULE_PARM_DESC(FlowControl, "Flow Control setting");
111
112/*
113 * XsumRX - Receive Checksum Offload Enable/Disable
114 * @Valid Range: 0, 1
115 * - 0: disables all checksum offload
116 * - 1: enables receive IP/TCP/UDP checksum offload
117 * @Default Value: PCH_GBE_DEFAULT_RX_CSUM
118 */
119static int XsumRX = OPTION_UNSET;
120module_param(XsumRX, int, 0);
121MODULE_PARM_DESC(XsumRX, "Disable or enable Receive Checksum offload");
122
123#define PCH_GBE_DEFAULT_RX_CSUM true /* trueorfalse */
124
125/*
126 * XsumTX - Transmit Checksum Offload Enable/Disable
127 * @Valid Range: 0, 1
128 * - 0: disables all checksum offload
129 * - 1: enables transmit IP/TCP/UDP checksum offload
130 * @Default Value: PCH_GBE_DEFAULT_TX_CSUM
131 */
132static int XsumTX = OPTION_UNSET;
133module_param(XsumTX, int, 0);
134MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
135
136#define PCH_GBE_DEFAULT_TX_CSUM true /* trueorfalse */
137
138/**
139 * pch_gbe_option - Force the MAC's flow control settings
140 * @hw: Pointer to the HW structure
141 * Returns
142 * 0: Successful.
143 * Negative value: Failed.
144 */
145struct pch_gbe_option {
146 enum { enable_option, range_option, list_option } type;
147 char *name;
148 char *err;
149 int def;
150 union {
151 struct { /* range_option info */
152 int min;
153 int max;
154 } r;
155 struct { /* list_option info */
156 int nr;
157 const struct pch_gbe_opt_list { int i; char *str; } *p;
158 } l;
159 } arg;
160};
161
162static const struct pch_gbe_opt_list speed_list[] = {
163 { 0, "" },
164 { SPEED_10, "" },
165 { SPEED_100, "" },
166 { SPEED_1000, "" }
167};
168
169static const struct pch_gbe_opt_list dplx_list[] = {
170 { 0, "" },
171 { HALF_DUPLEX, "" },
172 { FULL_DUPLEX, "" }
173};
174
175static const struct pch_gbe_opt_list an_list[] =
176 #define AA "AutoNeg advertising "
177 {{ 0x01, AA "10/HD" },
178 { 0x02, AA "10/FD" },
179 { 0x03, AA "10/FD, 10/HD" },
180 { 0x04, AA "100/HD" },
181 { 0x05, AA "100/HD, 10/HD" },
182 { 0x06, AA "100/HD, 10/FD" },
183 { 0x07, AA "100/HD, 10/FD, 10/HD" },
184 { 0x08, AA "100/FD" },
185 { 0x09, AA "100/FD, 10/HD" },
186 { 0x0a, AA "100/FD, 10/FD" },
187 { 0x0b, AA "100/FD, 10/FD, 10/HD" },
188 { 0x0c, AA "100/FD, 100/HD" },
189 { 0x0d, AA "100/FD, 100/HD, 10/HD" },
190 { 0x0e, AA "100/FD, 100/HD, 10/FD" },
191 { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
192 { 0x20, AA "1000/FD" },
193 { 0x21, AA "1000/FD, 10/HD" },
194 { 0x22, AA "1000/FD, 10/FD" },
195 { 0x23, AA "1000/FD, 10/FD, 10/HD" },
196 { 0x24, AA "1000/FD, 100/HD" },
197 { 0x25, AA "1000/FD, 100/HD, 10/HD" },
198 { 0x26, AA "1000/FD, 100/HD, 10/FD" },
199 { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
200 { 0x28, AA "1000/FD, 100/FD" },
201 { 0x29, AA "1000/FD, 100/FD, 10/HD" },
202 { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
203 { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
204 { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
205 { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
206 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
207 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }
208};
209
210static const struct pch_gbe_opt_list fc_list[] = {
211 { PCH_GBE_FC_NONE, "Flow Control Disabled" },
212 { PCH_GBE_FC_RX_PAUSE, "Flow Control Receive Only" },
213 { PCH_GBE_FC_TX_PAUSE, "Flow Control Transmit Only" },
214 { PCH_GBE_FC_FULL, "Flow Control Enabled" }
215};
216
217/**
218 * pch_gbe_validate_option - Validate option
219 * @value: value
220 * @opt: option
221 * @adapter: Board private structure
222 * Returns
223 * 0: Successful.
224 * Negative value: Failed.
225 */
226static int pch_gbe_validate_option(int *value,
227 const struct pch_gbe_option *opt,
228 struct pch_gbe_adapter *adapter)
229{
230 if (*value == OPTION_UNSET) {
231 *value = opt->def;
232 return 0;
233 }
234
235 switch (opt->type) {
236 case enable_option:
237 switch (*value) {
238 case OPTION_ENABLED:
239 pr_debug("%s Enabled\n", opt->name);
240 return 0;
241 case OPTION_DISABLED:
242 pr_debug("%s Disabled\n", opt->name);
243 return 0;
244 }
245 break;
246 case range_option:
247 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
248 pr_debug("%s set to %i\n", opt->name, *value);
249 return 0;
250 }
251 break;
252 case list_option: {
253 int i;
254 const struct pch_gbe_opt_list *ent;
255
256 for (i = 0; i < opt->arg.l.nr; i++) {
257 ent = &opt->arg.l.p[i];
258 if (*value == ent->i) {
259 if (ent->str[0] != '\0')
260 pr_debug("%s\n", ent->str);
261 return 0;
262 }
263 }
264 }
265 break;
266 default:
267 BUG();
268 }
269
270 pr_debug("Invalid %s value specified (%i) %s\n",
271 opt->name, *value, opt->err);
272 *value = opt->def;
273 return -1;
274}
275
276/**
277 * pch_gbe_check_copper_options - Range Checking for Link Options, Copper Version
278 * @adapter: Board private structure
279 */
280static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
281{
282 struct pch_gbe_hw *hw = &adapter->hw;
283 int speed, dplx;
284
285 { /* Speed */
286 static const struct pch_gbe_option opt = {
287 .type = list_option,
288 .name = "Speed",
289 .err = "parameter ignored",
290 .def = 0,
291 .arg = { .l = { .nr = (int)ARRAY_SIZE(speed_list),
292 .p = speed_list } }
293 };
294 speed = Speed;
295 pch_gbe_validate_option(&speed, &opt, adapter);
296 }
297 { /* Duplex */
298 static const struct pch_gbe_option opt = {
299 .type = list_option,
300 .name = "Duplex",
301 .err = "parameter ignored",
302 .def = 0,
303 .arg = { .l = { .nr = (int)ARRAY_SIZE(dplx_list),
304 .p = dplx_list } }
305 };
306 dplx = Duplex;
307 pch_gbe_validate_option(&dplx, &opt, adapter);
308 }
309
310 { /* Autoneg */
311 static const struct pch_gbe_option opt = {
312 .type = list_option,
313 .name = "AutoNeg",
314 .err = "parameter ignored",
315 .def = PCH_AUTONEG_ADVERTISE_DEFAULT,
316 .arg = { .l = { .nr = (int)ARRAY_SIZE(an_list),
317 .p = an_list} }
318 };
319 if (speed || dplx) {
320 pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
321 hw->phy.autoneg_advertised = opt.def;
322 } else {
323 hw->phy.autoneg_advertised = AutoNeg;
324 pch_gbe_validate_option(
325 (int *)(&hw->phy.autoneg_advertised),
326 &opt, adapter);
327 }
328 }
329
330 switch (speed + dplx) {
331 case 0:
332 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
333 if ((speed || dplx))
334 pr_debug("Speed and duplex autonegotiation enabled\n");
335 hw->mac.link_speed = SPEED_10;
336 hw->mac.link_duplex = DUPLEX_HALF;
337 break;
338 case HALF_DUPLEX:
339 pr_debug("Half Duplex specified without Speed\n");
340 pr_debug("Using Autonegotiation at Half Duplex only\n");
341 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
342 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
343 PHY_ADVERTISE_100_HALF;
344 hw->mac.link_speed = SPEED_10;
345 hw->mac.link_duplex = DUPLEX_HALF;
346 break;
347 case FULL_DUPLEX:
348 pr_debug("Full Duplex specified without Speed\n");
349 pr_debug("Using Autonegotiation at Full Duplex only\n");
350 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
351 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL |
352 PHY_ADVERTISE_100_FULL |
353 PHY_ADVERTISE_1000_FULL;
354 hw->mac.link_speed = SPEED_10;
355 hw->mac.link_duplex = DUPLEX_FULL;
356 break;
357 case SPEED_10:
358 pr_debug("10 Mbps Speed specified without Duplex\n");
359 pr_debug("Using Autonegotiation at 10 Mbps only\n");
360 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
361 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
362 PHY_ADVERTISE_10_FULL;
363 hw->mac.link_speed = SPEED_10;
364 hw->mac.link_duplex = DUPLEX_HALF;
365 break;
366 case SPEED_10 + HALF_DUPLEX:
367 pr_debug("Forcing to 10 Mbps Half Duplex\n");
368 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
369 hw->phy.autoneg_advertised = 0;
370 hw->mac.link_speed = SPEED_10;
371 hw->mac.link_duplex = DUPLEX_HALF;
372 break;
373 case SPEED_10 + FULL_DUPLEX:
374 pr_debug("Forcing to 10 Mbps Full Duplex\n");
375 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
376 hw->phy.autoneg_advertised = 0;
377 hw->mac.link_speed = SPEED_10;
378 hw->mac.link_duplex = DUPLEX_FULL;
379 break;
380 case SPEED_100:
381 pr_debug("100 Mbps Speed specified without Duplex\n");
382 pr_debug("Using Autonegotiation at 100 Mbps only\n");
383 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
384 hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF |
385 PHY_ADVERTISE_100_FULL;
386 hw->mac.link_speed = SPEED_100;
387 hw->mac.link_duplex = DUPLEX_HALF;
388 break;
389 case SPEED_100 + HALF_DUPLEX:
390 pr_debug("Forcing to 100 Mbps Half Duplex\n");
391 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
392 hw->phy.autoneg_advertised = 0;
393 hw->mac.link_speed = SPEED_100;
394 hw->mac.link_duplex = DUPLEX_HALF;
395 break;
396 case SPEED_100 + FULL_DUPLEX:
397 pr_debug("Forcing to 100 Mbps Full Duplex\n");
398 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
399 hw->phy.autoneg_advertised = 0;
400 hw->mac.link_speed = SPEED_100;
401 hw->mac.link_duplex = DUPLEX_FULL;
402 break;
403 case SPEED_1000:
404 pr_debug("1000 Mbps Speed specified without Duplex\n");
405 goto full_duplex_only;
406 case SPEED_1000 + HALF_DUPLEX:
407 pr_debug("Half Duplex is not supported at 1000 Mbps\n");
408 /* fall through */
409 case SPEED_1000 + FULL_DUPLEX:
410full_duplex_only:
411 pr_debug("Using Autonegotiation at 1000 Mbps Full Duplex only\n");
412 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
413 hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL;
414 hw->mac.link_speed = SPEED_1000;
415 hw->mac.link_duplex = DUPLEX_FULL;
416 break;
417 default:
418 BUG();
419 }
420}
421
422/**
423 * pch_gbe_check_options - Range Checking for Command Line Parameters
424 * @adapter: Board private structure
425 */
426void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
427{
428 struct pch_gbe_hw *hw = &adapter->hw;
429
430 { /* Transmit Descriptor Count */
431 static const struct pch_gbe_option opt = {
432 .type = range_option,
433 .name = "Transmit Descriptors",
434 .err = "using default of "
435 __MODULE_STRING(PCH_GBE_DEFAULT_TXD),
436 .def = PCH_GBE_DEFAULT_TXD,
437 .arg = { .r = { .min = PCH_GBE_MIN_TXD } },
438 .arg = { .r = { .max = PCH_GBE_MAX_TXD } }
439 };
440 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
441 tx_ring->count = TxDescriptors;
442 pch_gbe_validate_option(&tx_ring->count, &opt, adapter);
443 tx_ring->count = roundup(tx_ring->count,
444 PCH_GBE_TX_DESC_MULTIPLE);
445 }
446 { /* Receive Descriptor Count */
447 static const struct pch_gbe_option opt = {
448 .type = range_option,
449 .name = "Receive Descriptors",
450 .err = "using default of "
451 __MODULE_STRING(PCH_GBE_DEFAULT_RXD),
452 .def = PCH_GBE_DEFAULT_RXD,
453 .arg = { .r = { .min = PCH_GBE_MIN_RXD } },
454 .arg = { .r = { .max = PCH_GBE_MAX_RXD } }
455 };
456 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
457 rx_ring->count = RxDescriptors;
458 pch_gbe_validate_option(&rx_ring->count, &opt, adapter);
459 rx_ring->count = roundup(rx_ring->count,
460 PCH_GBE_RX_DESC_MULTIPLE);
461 }
462 { /* Checksum Offload Enable/Disable */
463 static const struct pch_gbe_option opt = {
464 .type = enable_option,
465 .name = "Checksum Offload",
466 .err = "defaulting to Enabled",
467 .def = PCH_GBE_DEFAULT_RX_CSUM
468 };
469 adapter->rx_csum = XsumRX;
470 pch_gbe_validate_option((int *)(&adapter->rx_csum),
471 &opt, adapter);
472 }
473 { /* Checksum Offload Enable/Disable */
474 static const struct pch_gbe_option opt = {
475 .type = enable_option,
476 .name = "Checksum Offload",
477 .err = "defaulting to Enabled",
478 .def = PCH_GBE_DEFAULT_TX_CSUM
479 };
480 adapter->tx_csum = XsumTX;
481 pch_gbe_validate_option((int *)(&adapter->tx_csum),
482 &opt, adapter);
483 }
484 { /* Flow Control */
485 static const struct pch_gbe_option opt = {
486 .type = list_option,
487 .name = "Flow Control",
488 .err = "reading default settings from EEPROM",
489 .def = PCH_GBE_FC_DEFAULT,
490 .arg = { .l = { .nr = (int)ARRAY_SIZE(fc_list),
491 .p = fc_list } }
492 };
493 hw->mac.fc = FlowControl;
494 pch_gbe_validate_option((int *)(&hw->mac.fc),
495 &opt, adapter);
496 }
497
498 pch_gbe_check_copper_options(adapter);
499}
diff --git a/drivers/net/pch_gbe/pch_gbe_phy.c b/drivers/net/pch_gbe/pch_gbe_phy.c
new file mode 100644
index 000000000000..923a687acd30
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_phy.c
@@ -0,0 +1,274 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include "pch_gbe.h"
22#include "pch_gbe_phy.h"
23
24#define PHY_MAX_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
25
26/* PHY 1000 MII Register/Bit Definitions */
27/* PHY Registers defined by IEEE */
28#define PHY_CONTROL 0x00 /* Control Register */
29#define PHY_STATUS 0x01 /* Status Regiser */
30#define PHY_ID1 0x02 /* Phy Id Register (word 1) */
31#define PHY_ID2 0x03 /* Phy Id Register (word 2) */
32#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
33#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
34#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Register */
35#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
36#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
37#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Register */
38#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Register */
39#define PHY_EXT_STATUS 0x0F /* Extended Status Register */
40#define PHY_PHYSP_CONTROL 0x10 /* PHY Specific Control Register */
41#define PHY_EXT_PHYSP_CONTROL 0x14 /* Extended PHY Specific Control Register */
42#define PHY_LED_CONTROL 0x18 /* LED Control Register */
43#define PHY_EXT_PHYSP_STATUS 0x1B /* Extended PHY Specific Status Register */
44
45/* PHY Control Register */
46#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
47#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
48#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
49#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
50#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
51#define MII_CR_POWER_DOWN 0x0800 /* Power down */
52#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
53#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
54#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
55#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
56#define MII_CR_SPEED_1000 0x0040
57#define MII_CR_SPEED_100 0x2000
58#define MII_CR_SPEED_10 0x0000
59
60/* PHY Status Register */
61#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
62#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
63#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
64#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
65#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
66#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
67#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
68#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
69#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
70#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
71#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
72#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
73#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
74#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
75#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
76
77/* Phy Id Register (word 2) */
78#define PHY_REVISION_MASK 0x000F
79
80/* PHY Specific Control Register */
81#define PHYSP_CTRL_ASSERT_CRS_TX 0x0800
82
83
84/* Default value of PHY register */
85#define PHY_CONTROL_DEFAULT 0x1140 /* Control Register */
86#define PHY_AUTONEG_ADV_DEFAULT 0x01e0 /* Autoneg Advertisement */
87#define PHY_NEXT_PAGE_TX_DEFAULT 0x2001 /* Next Page TX */
88#define PHY_1000T_CTRL_DEFAULT 0x0300 /* 1000Base-T Control Register */
89#define PHY_PHYSP_CONTROL_DEFAULT 0x01EE /* PHY Specific Control Register */
90
91/**
92 * pch_gbe_phy_get_id - Retrieve the PHY ID and revision
93 * @hw: Pointer to the HW structure
94 * Returns
95 * 0: Successful.
96 * Negative value: Failed.
97 */
98s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw)
99{
100 struct pch_gbe_phy_info *phy = &hw->phy;
101 s32 ret;
102 u16 phy_id1;
103 u16 phy_id2;
104
105 ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID1, &phy_id1);
106 if (ret)
107 return ret;
108 ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID2, &phy_id2);
109 if (ret)
110 return ret;
111 /*
112 * PHY_ID1: [bit15-0:ID(21-6)]
113 * PHY_ID2: [bit15-10:ID(5-0)][bit9-4:Model][bit3-0:revision]
114 */
115 phy->id = (u32)phy_id1;
116 phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10));
117 phy->revision = (u32) (phy_id2 & 0x000F);
118 pr_debug("phy->id : 0x%08x phy->revision : 0x%08x\n",
119 phy->id, phy->revision);
120 return 0;
121}
122
123/**
124 * pch_gbe_phy_read_reg_miic - Read MII control register
125 * @hw: Pointer to the HW structure
126 * @offset: Register offset to be read
127 * @data: Pointer to the read data
128 * Returns
129 * 0: Successful.
130 * -EINVAL: Invalid argument.
131 */
132s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data)
133{
134 struct pch_gbe_phy_info *phy = &hw->phy;
135
136 if (offset > PHY_MAX_REG_ADDRESS) {
137 pr_err("PHY Address %d is out of range\n", offset);
138 return -EINVAL;
139 }
140 *data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ,
141 offset, (u16)0);
142 return 0;
143}
144
145/**
146 * pch_gbe_phy_write_reg_miic - Write MII control register
147 * @hw: Pointer to the HW structure
148 * @offset: Register offset to be read
149 * @data: data to write to register at offset
150 * Returns
151 * 0: Successful.
152 * -EINVAL: Invalid argument.
153 */
154s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data)
155{
156 struct pch_gbe_phy_info *phy = &hw->phy;
157
158 if (offset > PHY_MAX_REG_ADDRESS) {
159 pr_err("PHY Address %d is out of range\n", offset);
160 return -EINVAL;
161 }
162 pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE,
163 offset, data);
164 return 0;
165}
166
167/**
168 * pch_gbe_phy_sw_reset - PHY software reset
169 * @hw: Pointer to the HW structure
170 */
171void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
172{
173 u16 phy_ctrl;
174
175 pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &phy_ctrl);
176 phy_ctrl |= MII_CR_RESET;
177 pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, phy_ctrl);
178 udelay(1);
179}
180
181/**
182 * pch_gbe_phy_hw_reset - PHY hardware reset
183 * @hw: Pointer to the HW structure
184 */
185void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw)
186{
187 pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, PHY_CONTROL_DEFAULT);
188 pch_gbe_phy_write_reg_miic(hw, PHY_AUTONEG_ADV,
189 PHY_AUTONEG_ADV_DEFAULT);
190 pch_gbe_phy_write_reg_miic(hw, PHY_NEXT_PAGE_TX,
191 PHY_NEXT_PAGE_TX_DEFAULT);
192 pch_gbe_phy_write_reg_miic(hw, PHY_1000T_CTRL, PHY_1000T_CTRL_DEFAULT);
193 pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL,
194 PHY_PHYSP_CONTROL_DEFAULT);
195}
196
197/**
198 * pch_gbe_phy_power_up - restore link in case the phy was powered down
199 * @hw: Pointer to the HW structure
200 */
201void pch_gbe_phy_power_up(struct pch_gbe_hw *hw)
202{
203 u16 mii_reg;
204
205 mii_reg = 0;
206 /* Just clear the power down bit to wake the phy back up */
207 /* according to the manual, the phy will retain its
208 * settings across a power-down/up cycle */
209 pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg);
210 mii_reg &= ~MII_CR_POWER_DOWN;
211 pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg);
212}
213
214/**
215 * pch_gbe_phy_power_down - Power down PHY
216 * @hw: Pointer to the HW structure
217 */
218void pch_gbe_phy_power_down(struct pch_gbe_hw *hw)
219{
220 u16 mii_reg;
221
222 mii_reg = 0;
223 /* Power down the PHY so no link is implied when interface is down *
224 * The PHY cannot be powered down if any of the following is TRUE *
225 * (a) WoL is enabled
226 * (b) AMT is active
227 */
228 pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg);
229 mii_reg |= MII_CR_POWER_DOWN;
230 pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg);
231 mdelay(1);
232}
233
234/**
235 * pch_gbe_phy_set_rgmii - RGMII interface setting
236 * @hw: Pointer to the HW structure
237 */
238inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
239{
240 pch_gbe_phy_sw_reset(hw);
241}
242
243/**
244 * pch_gbe_phy_init_setting - PHY initial setting
245 * @hw: Pointer to the HW structure
246 */
247void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
248{
249 struct pch_gbe_adapter *adapter;
250 struct ethtool_cmd cmd;
251 int ret;
252 u16 mii_reg;
253
254 adapter = container_of(hw, struct pch_gbe_adapter, hw);
255 ret = mii_ethtool_gset(&adapter->mii, &cmd);
256 if (ret)
257 pr_err("Error: mii_ethtool_gset\n");
258
259 cmd.speed = hw->mac.link_speed;
260 cmd.duplex = hw->mac.link_duplex;
261 cmd.advertising = hw->phy.autoneg_advertised;
262 cmd.autoneg = hw->mac.autoneg;
263 pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
264 ret = mii_ethtool_sset(&adapter->mii, &cmd);
265 if (ret)
266 pr_err("Error: mii_ethtool_sset\n");
267
268 pch_gbe_phy_sw_reset(hw);
269
270 pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
271 mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
272 pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
273
274}
diff --git a/drivers/net/pch_gbe/pch_gbe_phy.h b/drivers/net/pch_gbe/pch_gbe_phy.h
new file mode 100644
index 000000000000..03264dc7b5ec
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_phy.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20#ifndef _PCH_GBE_PHY_H_
21#define _PCH_GBE_PHY_H_
22
23#define PCH_GBE_PHY_REGS_LEN 32
24#define PCH_GBE_PHY_RESET_DELAY_US 10
25#define PCH_GBE_MAC_IFOP_RGMII
26
27s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw);
28s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data);
29s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data);
30void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw);
31void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw);
32void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
33void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
34void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw);
35void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw);
36
37#endif /* _PCH_GBE_PHY_H_ */
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 56f3fc45dbaa..8dd03439d994 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1125,7 +1125,7 @@ static int netdrv_open(struct net_device *dev)
1125 init_timer(&tp->timer); 1125 init_timer(&tp->timer);
1126 tp->timer.expires = jiffies + 3 * HZ; 1126 tp->timer.expires = jiffies + 3 * HZ;
1127 tp->timer.data = (unsigned long) dev; 1127 tp->timer.data = (unsigned long) dev;
1128 tp->timer.function = &netdrv_timer; 1128 tp->timer.function = netdrv_timer;
1129 add_timer(&tp->timer); 1129 add_timer(&tp->timer);
1130 1130
1131 DPRINTK("EXIT, returning 0\n"); 1131 DPRINTK("EXIT, returning 0\n");
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index c683f77c6f42..042f6777e6b9 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -69,6 +69,8 @@ earlier 3Com products.
69 69
70*/ 70*/
71 71
72#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
73
72#include <linux/module.h> 74#include <linux/module.h>
73#include <linux/kernel.h> 75#include <linux/kernel.h>
74#include <linux/init.h> 76#include <linux/init.h>
@@ -83,7 +85,6 @@ earlier 3Com products.
83#include <linux/skbuff.h> 85#include <linux/skbuff.h>
84#include <linux/if_arp.h> 86#include <linux/if_arp.h>
85#include <linux/ioport.h> 87#include <linux/ioport.h>
86#include <linux/ethtool.h>
87#include <linux/bitops.h> 88#include <linux/bitops.h>
88#include <linux/mii.h> 89#include <linux/mii.h>
89 90
@@ -238,7 +239,6 @@ static int el3_rx(struct net_device *dev, int worklimit);
238static int el3_close(struct net_device *dev); 239static int el3_close(struct net_device *dev);
239static void el3_tx_timeout(struct net_device *dev); 240static void el3_tx_timeout(struct net_device *dev);
240static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 241static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
241static const struct ethtool_ops netdev_ethtool_ops;
242static void set_rx_mode(struct net_device *dev); 242static void set_rx_mode(struct net_device *dev);
243static void set_multicast_list(struct net_device *dev); 243static void set_multicast_list(struct net_device *dev);
244 244
@@ -285,7 +285,6 @@ static int tc574_probe(struct pcmcia_device *link)
285 link->conf.ConfigIndex = 1; 285 link->conf.ConfigIndex = 1;
286 286
287 dev->netdev_ops = &el3_netdev_ops; 287 dev->netdev_ops = &el3_netdev_ops;
288 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
289 dev->watchdog_timeo = TX_TIMEOUT; 288 dev->watchdog_timeo = TX_TIMEOUT;
290 289
291 return tc574_config(link); 290 return tc574_config(link);
@@ -376,8 +375,8 @@ static int tc574_config(struct pcmcia_device *link)
376 for (i = 0; i < 3; i++) 375 for (i = 0; i < 3; i++)
377 phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); 376 phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
378 if (phys_addr[0] == htons(0x6060)) { 377 if (phys_addr[0] == htons(0x6060)) {
379 printk(KERN_NOTICE "3c574_cs: IO port conflict at 0x%03lx" 378 pr_notice("IO port conflict at 0x%03lx-0x%03lx\n",
380 "-0x%03lx\n", dev->base_addr, dev->base_addr+15); 379 dev->base_addr, dev->base_addr+15);
381 goto failed; 380 goto failed;
382 } 381 }
383 } 382 }
@@ -391,7 +390,7 @@ static int tc574_config(struct pcmcia_device *link)
391 outw(2<<11, ioaddr + RunnerRdCtrl); 390 outw(2<<11, ioaddr + RunnerRdCtrl);
392 mcr = inb(ioaddr + 2); 391 mcr = inb(ioaddr + 2);
393 outw(0<<11, ioaddr + RunnerRdCtrl); 392 outw(0<<11, ioaddr + RunnerRdCtrl);
394 printk(KERN_INFO " ASIC rev %d,", mcr>>3); 393 pr_info(" ASIC rev %d,", mcr>>3);
395 EL3WINDOW(3); 394 EL3WINDOW(3);
396 config = inl(ioaddr + Wn3_Config); 395 config = inl(ioaddr + Wn3_Config);
397 lp->default_media = (config & Xcvr) >> Xcvr_shift; 396 lp->default_media = (config & Xcvr) >> Xcvr_shift;
@@ -428,7 +427,7 @@ static int tc574_config(struct pcmcia_device *link)
428 } 427 }
429 } 428 }
430 if (phy > 32) { 429 if (phy > 32) {
431 printk(KERN_NOTICE " No MII transceivers found!\n"); 430 pr_notice(" No MII transceivers found!\n");
432 goto failed; 431 goto failed;
433 } 432 }
434 i = mdio_read(ioaddr, lp->phys, 16) | 0x40; 433 i = mdio_read(ioaddr, lp->phys, 16) | 0x40;
@@ -444,18 +443,16 @@ static int tc574_config(struct pcmcia_device *link)
444 SET_NETDEV_DEV(dev, &link->dev); 443 SET_NETDEV_DEV(dev, &link->dev);
445 444
446 if (register_netdev(dev) != 0) { 445 if (register_netdev(dev) != 0) {
447 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n"); 446 pr_notice("register_netdev() failed\n");
448 goto failed; 447 goto failed;
449 } 448 }
450 449
451 printk(KERN_INFO "%s: %s at io %#3lx, irq %d, " 450 netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n",
452 "hw_addr %pM.\n", 451 cardname, dev->base_addr, dev->irq, dev->dev_addr);
453 dev->name, cardname, dev->base_addr, dev->irq, 452 netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n",
454 dev->dev_addr); 453 8 << config & Ram_size,
455 printk(" %dK FIFO split %s Rx:Tx, %sMII interface.\n", 454 ram_split[(config & Ram_split) >> Ram_split_shift],
456 8 << config & Ram_size, 455 config & Autoselect ? "autoselect " : "");
457 ram_split[(config & Ram_split) >> Ram_split_shift],
458 config & Autoselect ? "autoselect " : "");
459 456
460 return 0; 457 return 0;
461 458
@@ -502,14 +499,14 @@ static void dump_status(struct net_device *dev)
502{ 499{
503 unsigned int ioaddr = dev->base_addr; 500 unsigned int ioaddr = dev->base_addr;
504 EL3WINDOW(1); 501 EL3WINDOW(1);
505 printk(KERN_INFO " irq status %04x, rx status %04x, tx status " 502 netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x, tx free %04x\n",
506 "%02x, tx free %04x\n", inw(ioaddr+EL3_STATUS), 503 inw(ioaddr+EL3_STATUS),
507 inw(ioaddr+RxStatus), inb(ioaddr+TxStatus), 504 inw(ioaddr+RxStatus), inb(ioaddr+TxStatus),
508 inw(ioaddr+TxFree)); 505 inw(ioaddr+TxFree));
509 EL3WINDOW(4); 506 EL3WINDOW(4);
510 printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x" 507 netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
511 " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06), 508 inw(ioaddr+0x04), inw(ioaddr+0x06),
512 inw(ioaddr+0x08), inw(ioaddr+0x0a)); 509 inw(ioaddr+0x08), inw(ioaddr+0x0a));
513 EL3WINDOW(1); 510 EL3WINDOW(1);
514} 511}
515 512
@@ -523,7 +520,7 @@ static void tc574_wait_for_completion(struct net_device *dev, int cmd)
523 while (--i > 0) 520 while (--i > 0)
524 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; 521 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
525 if (i == 0) 522 if (i == 0)
526 printk(KERN_NOTICE "%s: command 0x%04x did not complete!\n", dev->name, cmd); 523 netdev_notice(dev, "command 0x%04x did not complete!\n", cmd);
527} 524}
528 525
529/* Read a word from the EEPROM using the regular EEPROM access register. 526/* Read a word from the EEPROM using the regular EEPROM access register.
@@ -710,7 +707,7 @@ static int el3_open(struct net_device *dev)
710 netif_start_queue(dev); 707 netif_start_queue(dev);
711 708
712 tc574_reset(dev); 709 tc574_reset(dev);
713 lp->media.function = &media_check; 710 lp->media.function = media_check;
714 lp->media.data = (unsigned long) dev; 711 lp->media.data = (unsigned long) dev;
715 lp->media.expires = jiffies + HZ; 712 lp->media.expires = jiffies + HZ;
716 add_timer(&lp->media); 713 add_timer(&lp->media);
@@ -725,7 +722,7 @@ static void el3_tx_timeout(struct net_device *dev)
725{ 722{
726 unsigned int ioaddr = dev->base_addr; 723 unsigned int ioaddr = dev->base_addr;
727 724
728 printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name); 725 netdev_notice(dev, "Transmit timed out!\n");
729 dump_status(dev); 726 dump_status(dev);
730 dev->stats.tx_errors++; 727 dev->stats.tx_errors++;
731 dev->trans_start = jiffies; /* prevent tx timeout */ 728 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -848,8 +845,8 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
848 EL3WINDOW(4); 845 EL3WINDOW(4);
849 fifo_diag = inw(ioaddr + Wn4_FIFODiag); 846 fifo_diag = inw(ioaddr + Wn4_FIFODiag);
850 EL3WINDOW(1); 847 EL3WINDOW(1);
851 printk(KERN_NOTICE "%s: adapter failure, FIFO diagnostic" 848 netdev_notice(dev, "adapter failure, FIFO diagnostic register %04x\n",
852 " register %04x.\n", dev->name, fifo_diag); 849 fifo_diag);
853 if (fifo_diag & 0x0400) { 850 if (fifo_diag & 0x0400) {
854 /* Tx overrun */ 851 /* Tx overrun */
855 tc574_wait_for_completion(dev, TxReset); 852 tc574_wait_for_completion(dev, TxReset);
@@ -903,7 +900,7 @@ static void media_check(unsigned long arg)
903 this, we can limp along even if the interrupt is blocked */ 900 this, we can limp along even if the interrupt is blocked */
904 if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) { 901 if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) {
905 if (!lp->fast_poll) 902 if (!lp->fast_poll)
906 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 903 netdev_info(dev, "interrupt(s) dropped!\n");
907 904
908 local_irq_save(flags); 905 local_irq_save(flags);
909 el3_interrupt(dev->irq, dev); 906 el3_interrupt(dev->irq, dev);
@@ -926,23 +923,21 @@ static void media_check(unsigned long arg)
926 923
927 if (media != lp->media_status) { 924 if (media != lp->media_status) {
928 if ((media ^ lp->media_status) & 0x0004) 925 if ((media ^ lp->media_status) & 0x0004)
929 printk(KERN_INFO "%s: %s link beat\n", dev->name, 926 netdev_info(dev, "%s link beat\n",
930 (lp->media_status & 0x0004) ? "lost" : "found"); 927 (lp->media_status & 0x0004) ? "lost" : "found");
931 if ((media ^ lp->media_status) & 0x0020) { 928 if ((media ^ lp->media_status) & 0x0020) {
932 lp->partner = 0; 929 lp->partner = 0;
933 if (lp->media_status & 0x0020) { 930 if (lp->media_status & 0x0020) {
934 printk(KERN_INFO "%s: autonegotiation restarted\n", 931 netdev_info(dev, "autonegotiation restarted\n");
935 dev->name);
936 } else if (partner) { 932 } else if (partner) {
937 partner &= lp->advertising; 933 partner &= lp->advertising;
938 lp->partner = partner; 934 lp->partner = partner;
939 printk(KERN_INFO "%s: autonegotiation complete: " 935 netdev_info(dev, "autonegotiation complete: "
940 "%sbaseT-%cD selected\n", dev->name, 936 "%dbaseT-%cD selected\n",
941 ((partner & 0x0180) ? "100" : "10"), 937 (partner & 0x0180) ? 100 : 10,
942 ((partner & 0x0140) ? 'F' : 'H')); 938 (partner & 0x0140) ? 'F' : 'H');
943 } else { 939 } else {
944 printk(KERN_INFO "%s: link partner did not autonegotiate\n", 940 netdev_info(dev, "link partner did not autonegotiate\n");
945 dev->name);
946 } 941 }
947 942
948 EL3WINDOW(3); 943 EL3WINDOW(3);
@@ -952,10 +947,9 @@ static void media_check(unsigned long arg)
952 947
953 } 948 }
954 if (media & 0x0010) 949 if (media & 0x0010)
955 printk(KERN_INFO "%s: remote fault detected\n", 950 netdev_info(dev, "remote fault detected\n");
956 dev->name);
957 if (media & 0x0002) 951 if (media & 0x0002)
958 printk(KERN_INFO "%s: jabber detected\n", dev->name); 952 netdev_info(dev, "jabber detected\n");
959 lp->media_status = media; 953 lp->media_status = media;
960 } 954 }
961 spin_unlock_irqrestore(&lp->window_lock, flags); 955 spin_unlock_irqrestore(&lp->window_lock, flags);
@@ -1065,16 +1059,6 @@ static int el3_rx(struct net_device *dev, int worklimit)
1065 return worklimit; 1059 return worklimit;
1066} 1060}
1067 1061
1068static void netdev_get_drvinfo(struct net_device *dev,
1069 struct ethtool_drvinfo *info)
1070{
1071 strcpy(info->driver, "3c574_cs");
1072}
1073
1074static const struct ethtool_ops netdev_ethtool_ops = {
1075 .get_drvinfo = netdev_get_drvinfo,
1076};
1077
1078/* Provide ioctl() calls to examine the MII xcvr state. */ 1062/* Provide ioctl() calls to examine the MII xcvr state. */
1079static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1063static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1080{ 1064{
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 61f9cf2100ff..35562a395770 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -19,6 +19,8 @@
19 19
20======================================================================*/ 20======================================================================*/
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#define DRV_NAME "3c589_cs" 24#define DRV_NAME "3c589_cs"
23#define DRV_VERSION "1.162-ac" 25#define DRV_VERSION "1.162-ac"
24 26
@@ -264,7 +266,7 @@ static int tc589_config(struct pcmcia_device *link)
264 __be16 *phys_addr; 266 __be16 *phys_addr;
265 int ret, i, j, multi = 0, fifo; 267 int ret, i, j, multi = 0, fifo;
266 unsigned int ioaddr; 268 unsigned int ioaddr;
267 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 269 static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
268 u8 *buf; 270 u8 *buf;
269 size_t len; 271 size_t len;
270 272
@@ -273,8 +275,7 @@ static int tc589_config(struct pcmcia_device *link)
273 phys_addr = (__be16 *)dev->dev_addr; 275 phys_addr = (__be16 *)dev->dev_addr;
274 /* Is this a 3c562? */ 276 /* Is this a 3c562? */
275 if (link->manf_id != MANFID_3COM) 277 if (link->manf_id != MANFID_3COM)
276 printk(KERN_INFO "3c589_cs: hmmm, is this really a " 278 dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
277 "3Com card??\n");
278 multi = (link->card_id == PRODID_3COM_3C562); 279 multi = (link->card_id == PRODID_3COM_3C562);
279 280
280 link->io_lines = 16; 281 link->io_lines = 16;
@@ -315,8 +316,8 @@ static int tc589_config(struct pcmcia_device *link)
315 for (i = 0; i < 3; i++) 316 for (i = 0; i < 3; i++)
316 phys_addr[i] = htons(read_eeprom(ioaddr, i)); 317 phys_addr[i] = htons(read_eeprom(ioaddr, i));
317 if (phys_addr[0] == htons(0x6060)) { 318 if (phys_addr[0] == htons(0x6060)) {
318 printk(KERN_ERR "3c589_cs: IO port conflict at 0x%03lx" 319 dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
319 "-0x%03lx\n", dev->base_addr, dev->base_addr+15); 320 dev->base_addr, dev->base_addr+15);
320 goto failed; 321 goto failed;
321 } 322 }
322 } 323 }
@@ -330,12 +331,12 @@ static int tc589_config(struct pcmcia_device *link)
330 if ((if_port >= 0) && (if_port <= 3)) 331 if ((if_port >= 0) && (if_port <= 3))
331 dev->if_port = if_port; 332 dev->if_port = if_port;
332 else 333 else
333 printk(KERN_ERR "3c589_cs: invalid if_port requested\n"); 334 dev_err(&link->dev, "invalid if_port requested\n");
334 335
335 SET_NETDEV_DEV(dev, &link->dev); 336 SET_NETDEV_DEV(dev, &link->dev);
336 337
337 if (register_netdev(dev) != 0) { 338 if (register_netdev(dev) != 0) {
338 printk(KERN_ERR "3c589_cs: register_netdev() failed\n"); 339 dev_err(&link->dev, "register_netdev() failed\n");
339 goto failed; 340 goto failed;
340 } 341 }
341 342
@@ -537,7 +538,7 @@ static int el3_open(struct net_device *dev)
537 538
538 tc589_reset(dev); 539 tc589_reset(dev);
539 init_timer(&lp->media); 540 init_timer(&lp->media);
540 lp->media.function = &media_check; 541 lp->media.function = media_check;
541 lp->media.data = (unsigned long) dev; 542 lp->media.data = (unsigned long) dev;
542 lp->media.expires = jiffies + HZ; 543 lp->media.expires = jiffies + HZ;
543 add_timer(&lp->media); 544 add_timer(&lp->media);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 5f05ffb240cc..3f61fde70d73 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -24,6 +24,8 @@
24 24
25======================================================================*/ 25======================================================================*/
26 26
27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28
27#include <linux/kernel.h> 29#include <linux/kernel.h>
28#include <linux/module.h> 30#include <linux/module.h>
29#include <linux/init.h> 31#include <linux/init.h>
@@ -32,7 +34,6 @@
32#include <linux/timer.h> 34#include <linux/timer.h>
33#include <linux/delay.h> 35#include <linux/delay.h>
34#include <linux/spinlock.h> 36#include <linux/spinlock.h>
35#include <linux/ethtool.h>
36#include <linux/netdevice.h> 37#include <linux/netdevice.h>
37#include <linux/etherdevice.h> 38#include <linux/etherdevice.h>
38#include <linux/crc32.h> 39#include <linux/crc32.h>
@@ -86,7 +87,6 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
86static struct net_device_stats *get_stats(struct net_device *dev); 87static struct net_device_stats *get_stats(struct net_device *dev);
87static void set_multicast_list(struct net_device *dev); 88static void set_multicast_list(struct net_device *dev);
88static void axnet_tx_timeout(struct net_device *dev); 89static void axnet_tx_timeout(struct net_device *dev);
89static const struct ethtool_ops netdev_ethtool_ops;
90static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); 90static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
91static void ei_watchdog(u_long arg); 91static void ei_watchdog(u_long arg);
92static void axnet_reset_8390(struct net_device *dev); 92static void axnet_reset_8390(struct net_device *dev);
@@ -171,7 +171,6 @@ static int axnet_probe(struct pcmcia_device *link)
171 171
172 dev->netdev_ops = &axnet_netdev_ops; 172 dev->netdev_ops = &axnet_netdev_ops;
173 173
174 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
175 dev->watchdog_timeo = TX_TIMEOUT; 174 dev->watchdog_timeo = TX_TIMEOUT;
176 175
177 return axnet_config(link); 176 return axnet_config(link);
@@ -347,8 +346,8 @@ static int axnet_config(struct pcmcia_device *link)
347 dev->base_addr = link->resource[0]->start; 346 dev->base_addr = link->resource[0]->start;
348 347
349 if (!get_prom(link)) { 348 if (!get_prom(link)) {
350 printk(KERN_NOTICE "axnet_cs: this is not an AX88190 card!\n"); 349 pr_notice("this is not an AX88190 card!\n");
351 printk(KERN_NOTICE "axnet_cs: use pcnet_cs instead.\n"); 350 pr_notice("use pcnet_cs instead.\n");
352 goto failed; 351 goto failed;
353 } 352 }
354 353
@@ -357,10 +356,10 @@ static int axnet_config(struct pcmcia_device *link)
357 ei_status.tx_start_page = AXNET_START_PG; 356 ei_status.tx_start_page = AXNET_START_PG;
358 ei_status.rx_start_page = AXNET_START_PG + TX_PAGES; 357 ei_status.rx_start_page = AXNET_START_PG + TX_PAGES;
359 ei_status.stop_page = AXNET_STOP_PG; 358 ei_status.stop_page = AXNET_STOP_PG;
360 ei_status.reset_8390 = &axnet_reset_8390; 359 ei_status.reset_8390 = axnet_reset_8390;
361 ei_status.get_8390_hdr = &get_8390_hdr; 360 ei_status.get_8390_hdr = get_8390_hdr;
362 ei_status.block_input = &block_input; 361 ei_status.block_input = block_input;
363 ei_status.block_output = &block_output; 362 ei_status.block_output = block_output;
364 363
365 if (inb(dev->base_addr + AXNET_TEST) != 0) 364 if (inb(dev->base_addr + AXNET_TEST) != 0)
366 info->flags |= IS_AX88790; 365 info->flags |= IS_AX88790;
@@ -393,19 +392,18 @@ static int axnet_config(struct pcmcia_device *link)
393 SET_NETDEV_DEV(dev, &link->dev); 392 SET_NETDEV_DEV(dev, &link->dev);
394 393
395 if (register_netdev(dev) != 0) { 394 if (register_netdev(dev) != 0) {
396 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n"); 395 pr_notice("register_netdev() failed\n");
397 goto failed; 396 goto failed;
398 } 397 }
399 398
400 printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, " 399 netdev_info(dev, "Asix AX88%d90: io %#3lx, irq %d, hw_addr %pM\n",
401 "hw_addr %pM\n", 400 ((info->flags & IS_AX88790) ? 7 : 1),
402 dev->name, ((info->flags & IS_AX88790) ? 7 : 1), 401 dev->base_addr, dev->irq, dev->dev_addr);
403 dev->base_addr, dev->irq,
404 dev->dev_addr);
405 if (info->phy_id != -1) { 402 if (info->phy_id != -1) {
406 dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n", info->phy_id, j); 403 netdev_dbg(dev, " MII transceiver at index %d, status %x\n",
404 info->phy_id, j);
407 } else { 405 } else {
408 printk(KERN_NOTICE " No MII transceivers found!\n"); 406 netdev_notice(dev, " No MII transceivers found!\n");
409 } 407 }
410 return 0; 408 return 0;
411 409
@@ -532,7 +530,7 @@ static int axnet_open(struct net_device *dev)
532 530
533 info->link_status = 0x00; 531 info->link_status = 0x00;
534 init_timer(&info->watchdog); 532 init_timer(&info->watchdog);
535 info->watchdog.function = &ei_watchdog; 533 info->watchdog.function = ei_watchdog;
536 info->watchdog.data = (u_long)dev; 534 info->watchdog.data = (u_long)dev;
537 info->watchdog.expires = jiffies + HZ; 535 info->watchdog.expires = jiffies + HZ;
538 add_timer(&info->watchdog); 536 add_timer(&info->watchdog);
@@ -585,8 +583,7 @@ static void axnet_reset_8390(struct net_device *dev)
585 outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ 583 outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
586 584
587 if (i == 100) 585 if (i == 100)
588 printk(KERN_ERR "%s: axnet_reset_8390() did not complete.\n", 586 netdev_err(dev, "axnet_reset_8390() did not complete\n");
589 dev->name);
590 587
591} /* axnet_reset_8390 */ 588} /* axnet_reset_8390 */
592 589
@@ -613,7 +610,7 @@ static void ei_watchdog(u_long arg)
613 this, we can limp along even if the interrupt is blocked */ 610 this, we can limp along even if the interrupt is blocked */
614 if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { 611 if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
615 if (!info->fast_poll) 612 if (!info->fast_poll)
616 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 613 netdev_info(dev, "interrupt(s) dropped!\n");
617 ei_irq_wrapper(dev->irq, dev); 614 ei_irq_wrapper(dev->irq, dev);
618 info->fast_poll = HZ; 615 info->fast_poll = HZ;
619 } 616 }
@@ -628,7 +625,7 @@ static void ei_watchdog(u_long arg)
628 goto reschedule; 625 goto reschedule;
629 link = mdio_read(mii_addr, info->phy_id, 1); 626 link = mdio_read(mii_addr, info->phy_id, 1);
630 if (!link || (link == 0xffff)) { 627 if (!link || (link == 0xffff)) {
631 printk(KERN_INFO "%s: MII is missing!\n", dev->name); 628 netdev_info(dev, "MII is missing!\n");
632 info->phy_id = -1; 629 info->phy_id = -1;
633 goto reschedule; 630 goto reschedule;
634 } 631 }
@@ -636,18 +633,14 @@ static void ei_watchdog(u_long arg)
636 link &= 0x0004; 633 link &= 0x0004;
637 if (link != info->link_status) { 634 if (link != info->link_status) {
638 u_short p = mdio_read(mii_addr, info->phy_id, 5); 635 u_short p = mdio_read(mii_addr, info->phy_id, 5);
639 printk(KERN_INFO "%s: %s link beat\n", dev->name, 636 netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
640 (link) ? "found" : "lost");
641 if (link) { 637 if (link) {
642 info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00; 638 info->duplex_flag = (p & 0x0140) ? 0x80 : 0x00;
643 if (p) 639 if (p)
644 printk(KERN_INFO "%s: autonegotiation complete: " 640 netdev_info(dev, "autonegotiation complete: %dbaseT-%cD selected\n",
645 "%sbaseT-%cD selected\n", dev->name, 641 (p & 0x0180) ? 100 : 10, (p & 0x0140) ? 'F' : 'H');
646 ((p & 0x0180) ? "100" : "10"),
647 ((p & 0x0140) ? 'F' : 'H'));
648 else 642 else
649 printk(KERN_INFO "%s: link partner did not autonegotiate\n", 643 netdev_info(dev, "link partner did not autonegotiate\n");
650 dev->name);
651 AX88190_init(dev, 1); 644 AX88190_init(dev, 1);
652 } 645 }
653 info->link_status = link; 646 info->link_status = link;
@@ -658,16 +651,6 @@ reschedule:
658 add_timer(&info->watchdog); 651 add_timer(&info->watchdog);
659} 652}
660 653
661static void netdev_get_drvinfo(struct net_device *dev,
662 struct ethtool_drvinfo *info)
663{
664 strcpy(info->driver, "axnet_cs");
665}
666
667static const struct ethtool_ops netdev_ethtool_ops = {
668 .get_drvinfo = netdev_get_drvinfo,
669};
670
671/*====================================================================*/ 654/*====================================================================*/
672 655
673static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 656static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -855,9 +838,6 @@ module_exit(exit_axnet_cs);
855 838
856 */ 839 */
857 840
858static const char version_8390[] = KERN_INFO \
859 "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@scyld.com)\n";
860
861#include <linux/bitops.h> 841#include <linux/bitops.h>
862#include <asm/irq.h> 842#include <asm/irq.h>
863#include <linux/fcntl.h> 843#include <linux/fcntl.h>
@@ -1004,9 +984,11 @@ static void axnet_tx_timeout(struct net_device *dev)
1004 isr = inb(e8390_base+EN0_ISR); 984 isr = inb(e8390_base+EN0_ISR);
1005 spin_unlock_irqrestore(&ei_local->page_lock, flags); 985 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1006 986
1007 printk(KERN_DEBUG "%s: Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n", 987 netdev_printk(KERN_DEBUG, dev,
1008 dev->name, (txsr & ENTSR_ABT) ? "excess collisions." : 988 "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d.\n",
1009 (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); 989 (txsr & ENTSR_ABT) ? "excess collisions." :
990 (isr) ? "lost interrupt?" : "cable problem?",
991 txsr, isr, tickssofar);
1010 992
1011 if (!isr && !dev->stats.tx_packets) 993 if (!isr && !dev->stats.tx_packets)
1012 { 994 {
@@ -1076,22 +1058,28 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
1076 output_page = ei_local->tx_start_page; 1058 output_page = ei_local->tx_start_page;
1077 ei_local->tx1 = send_length; 1059 ei_local->tx1 = send_length;
1078 if (ei_debug && ei_local->tx2 > 0) 1060 if (ei_debug && ei_local->tx2 > 0)
1079 printk(KERN_DEBUG "%s: idle transmitter tx2=%d, lasttx=%d, txing=%d.\n", 1061 netdev_printk(KERN_DEBUG, dev,
1080 dev->name, ei_local->tx2, ei_local->lasttx, ei_local->txing); 1062 "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
1063 ei_local->tx2, ei_local->lasttx,
1064 ei_local->txing);
1081 } 1065 }
1082 else if (ei_local->tx2 == 0) 1066 else if (ei_local->tx2 == 0)
1083 { 1067 {
1084 output_page = ei_local->tx_start_page + TX_PAGES/2; 1068 output_page = ei_local->tx_start_page + TX_PAGES/2;
1085 ei_local->tx2 = send_length; 1069 ei_local->tx2 = send_length;
1086 if (ei_debug && ei_local->tx1 > 0) 1070 if (ei_debug && ei_local->tx1 > 0)
1087 printk(KERN_DEBUG "%s: idle transmitter, tx1=%d, lasttx=%d, txing=%d.\n", 1071 netdev_printk(KERN_DEBUG, dev,
1088 dev->name, ei_local->tx1, ei_local->lasttx, ei_local->txing); 1072 "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
1073 ei_local->tx1, ei_local->lasttx,
1074 ei_local->txing);
1089 } 1075 }
1090 else 1076 else
1091 { /* We should never get here. */ 1077 { /* We should never get here. */
1092 if (ei_debug) 1078 if (ei_debug)
1093 printk(KERN_DEBUG "%s: No Tx buffers free! tx1=%d tx2=%d last=%d\n", 1079 netdev_printk(KERN_DEBUG, dev,
1094 dev->name, ei_local->tx1, ei_local->tx2, ei_local->lasttx); 1080 "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
1081 ei_local->tx1, ei_local->tx2,
1082 ei_local->lasttx);
1095 ei_local->irqlock = 0; 1083 ei_local->irqlock = 0;
1096 netif_stop_queue(dev); 1084 netif_stop_queue(dev);
1097 outb_p(ENISR_ALL, e8390_base + EN0_IMR); 1085 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
@@ -1179,23 +1167,26 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
1179 1167
1180 spin_lock_irqsave(&ei_local->page_lock, flags); 1168 spin_lock_irqsave(&ei_local->page_lock, flags);
1181 1169
1182 if (ei_local->irqlock) 1170 if (ei_local->irqlock) {
1183 {
1184#if 1 /* This might just be an interrupt for a PCI device sharing this line */ 1171#if 1 /* This might just be an interrupt for a PCI device sharing this line */
1172 const char *msg;
1185 /* The "irqlock" check is only for testing. */ 1173 /* The "irqlock" check is only for testing. */
1186 printk(ei_local->irqlock 1174 if (ei_local->irqlock)
1187 ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n" 1175 msg = "Interrupted while interrupts are masked!";
1188 : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n", 1176 else
1189 dev->name, inb_p(e8390_base + EN0_ISR), 1177 msg = "Reentering the interrupt handler!";
1190 inb_p(e8390_base + EN0_IMR)); 1178 netdev_info(dev, "%s, isr=%#2x imr=%#2x\n",
1179 msg,
1180 inb_p(e8390_base + EN0_ISR),
1181 inb_p(e8390_base + EN0_IMR));
1191#endif 1182#endif
1192 spin_unlock_irqrestore(&ei_local->page_lock, flags); 1183 spin_unlock_irqrestore(&ei_local->page_lock, flags);
1193 return IRQ_NONE; 1184 return IRQ_NONE;
1194 } 1185 }
1195 1186
1196 if (ei_debug > 3) 1187 if (ei_debug > 3)
1197 printk(KERN_DEBUG "%s: interrupt(isr=%#2.2x).\n", dev->name, 1188 netdev_printk(KERN_DEBUG, dev, "interrupt(isr=%#2.2x)\n",
1198 inb_p(e8390_base + EN0_ISR)); 1189 inb_p(e8390_base + EN0_ISR));
1199 1190
1200 outb_p(0x00, e8390_base + EN0_ISR); 1191 outb_p(0x00, e8390_base + EN0_ISR);
1201 ei_local->irqlock = 1; 1192 ei_local->irqlock = 1;
@@ -1206,7 +1197,8 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
1206 { 1197 {
1207 if (!netif_running(dev) || (interrupts == 0xff)) { 1198 if (!netif_running(dev) || (interrupts == 0xff)) {
1208 if (ei_debug > 1) 1199 if (ei_debug > 1)
1209 printk(KERN_WARNING "%s: interrupt from stopped card\n", dev->name); 1200 netdev_warn(dev,
1201 "interrupt from stopped card\n");
1210 outb_p(interrupts, e8390_base + EN0_ISR); 1202 outb_p(interrupts, e8390_base + EN0_ISR);
1211 interrupts = 0; 1203 interrupts = 0;
1212 break; 1204 break;
@@ -1249,11 +1241,12 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
1249 { 1241 {
1250 /* 0xFF is valid for a card removal */ 1242 /* 0xFF is valid for a card removal */
1251 if(interrupts!=0xFF) 1243 if(interrupts!=0xFF)
1252 printk(KERN_WARNING "%s: Too much work at interrupt, status %#2.2x\n", 1244 netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
1253 dev->name, interrupts); 1245 interrupts);
1254 outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ 1246 outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
1255 } else { 1247 } else {
1256 printk(KERN_WARNING "%s: unknown interrupt %#2x\n", dev->name, interrupts); 1248 netdev_warn(dev, "unknown interrupt %#2x\n",
1249 interrupts);
1257 outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ 1250 outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
1258 } 1251 }
1259 } 1252 }
@@ -1287,18 +1280,19 @@ static void ei_tx_err(struct net_device *dev)
1287 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); 1280 unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
1288 1281
1289#ifdef VERBOSE_ERROR_DUMP 1282#ifdef VERBOSE_ERROR_DUMP
1290 printk(KERN_DEBUG "%s: transmitter error (%#2x): ", dev->name, txsr); 1283 netdev_printk(KERN_DEBUG, dev,
1284 "transmitter error (%#2x):", txsr);
1291 if (txsr & ENTSR_ABT) 1285 if (txsr & ENTSR_ABT)
1292 printk("excess-collisions "); 1286 pr_cont(" excess-collisions");
1293 if (txsr & ENTSR_ND) 1287 if (txsr & ENTSR_ND)
1294 printk("non-deferral "); 1288 pr_cont(" non-deferral");
1295 if (txsr & ENTSR_CRS) 1289 if (txsr & ENTSR_CRS)
1296 printk("lost-carrier "); 1290 pr_cont(" lost-carrier");
1297 if (txsr & ENTSR_FU) 1291 if (txsr & ENTSR_FU)
1298 printk("FIFO-underrun "); 1292 pr_cont(" FIFO-underrun");
1299 if (txsr & ENTSR_CDH) 1293 if (txsr & ENTSR_CDH)
1300 printk("lost-heartbeat "); 1294 pr_cont(" lost-heartbeat");
1301 printk("\n"); 1295 pr_cont("\n");
1302#endif 1296#endif
1303 1297
1304 if (tx_was_aborted) 1298 if (tx_was_aborted)
@@ -1335,8 +1329,9 @@ static void ei_tx_intr(struct net_device *dev)
1335 if (ei_local->tx1 < 0) 1329 if (ei_local->tx1 < 0)
1336 { 1330 {
1337 if (ei_local->lasttx != 1 && ei_local->lasttx != -1) 1331 if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
1338 printk(KERN_ERR "%s: bogus last_tx_buffer %d, tx1=%d.\n", 1332 netdev_err(dev, "%s: bogus last_tx_buffer %d, tx1=%d\n",
1339 ei_local->name, ei_local->lasttx, ei_local->tx1); 1333 ei_local->name, ei_local->lasttx,
1334 ei_local->tx1);
1340 ei_local->tx1 = 0; 1335 ei_local->tx1 = 0;
1341 if (ei_local->tx2 > 0) 1336 if (ei_local->tx2 > 0)
1342 { 1337 {
@@ -1351,8 +1346,9 @@ static void ei_tx_intr(struct net_device *dev)
1351 else if (ei_local->tx2 < 0) 1346 else if (ei_local->tx2 < 0)
1352 { 1347 {
1353 if (ei_local->lasttx != 2 && ei_local->lasttx != -2) 1348 if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
1354 printk("%s: bogus last_tx_buffer %d, tx2=%d.\n", 1349 netdev_info(dev, "%s: bogus last_tx_buffer %d, tx2=%d\n",
1355 ei_local->name, ei_local->lasttx, ei_local->tx2); 1350 ei_local->name, ei_local->lasttx,
1351 ei_local->tx2);
1356 ei_local->tx2 = 0; 1352 ei_local->tx2 = 0;
1357 if (ei_local->tx1 > 0) 1353 if (ei_local->tx1 > 0)
1358 { 1354 {
@@ -1365,8 +1361,9 @@ static void ei_tx_intr(struct net_device *dev)
1365 else 1361 else
1366 ei_local->lasttx = 10, ei_local->txing = 0; 1362 ei_local->lasttx = 10, ei_local->txing = 0;
1367 } 1363 }
1368// else printk(KERN_WARNING "%s: unexpected TX-done interrupt, lasttx=%d.\n", 1364// else
1369// dev->name, ei_local->lasttx); 1365// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
1366// ei_local->lasttx);
1370 1367
1371 /* Minimize Tx latency: update the statistics after we restart TXing. */ 1368 /* Minimize Tx latency: update the statistics after we restart TXing. */
1372 if (status & ENTSR_COL) 1369 if (status & ENTSR_COL)
@@ -1429,8 +1426,8 @@ static void ei_receive(struct net_device *dev)
1429 is that some clones crash in roughly the same way. 1426 is that some clones crash in roughly the same way.
1430 */ 1427 */
1431 if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF)) 1428 if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame!=0x0 || rxing_page!=0xFF))
1432 printk(KERN_ERR "%s: mismatched read page pointers %2x vs %2x.\n", 1429 netdev_err(dev, "mismatched read page pointers %2x vs %2x\n",
1433 dev->name, this_frame, ei_local->current_page); 1430 this_frame, ei_local->current_page);
1434 1431
1435 if (this_frame == rxing_page) /* Read all the frames? */ 1432 if (this_frame == rxing_page) /* Read all the frames? */
1436 break; /* Done for now */ 1433 break; /* Done for now */
@@ -1446,9 +1443,10 @@ static void ei_receive(struct net_device *dev)
1446 if (pkt_len < 60 || pkt_len > 1518) 1443 if (pkt_len < 60 || pkt_len > 1518)
1447 { 1444 {
1448 if (ei_debug) 1445 if (ei_debug)
1449 printk(KERN_DEBUG "%s: bogus packet size: %d, status=%#2x nxpg=%#2x.\n", 1446 netdev_printk(KERN_DEBUG, dev,
1450 dev->name, rx_frame.count, rx_frame.status, 1447 "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
1451 rx_frame.next); 1448 rx_frame.count, rx_frame.status,
1449 rx_frame.next);
1452 dev->stats.rx_errors++; 1450 dev->stats.rx_errors++;
1453 dev->stats.rx_length_errors++; 1451 dev->stats.rx_length_errors++;
1454 } 1452 }
@@ -1460,8 +1458,9 @@ static void ei_receive(struct net_device *dev)
1460 if (skb == NULL) 1458 if (skb == NULL)
1461 { 1459 {
1462 if (ei_debug > 1) 1460 if (ei_debug > 1)
1463 printk(KERN_DEBUG "%s: Couldn't allocate a sk_buff of size %d.\n", 1461 netdev_printk(KERN_DEBUG, dev,
1464 dev->name, pkt_len); 1462 "Couldn't allocate a sk_buff of size %d\n",
1463 pkt_len);
1465 dev->stats.rx_dropped++; 1464 dev->stats.rx_dropped++;
1466 break; 1465 break;
1467 } 1466 }
@@ -1481,9 +1480,10 @@ static void ei_receive(struct net_device *dev)
1481 else 1480 else
1482 { 1481 {
1483 if (ei_debug) 1482 if (ei_debug)
1484 printk(KERN_DEBUG "%s: bogus packet: status=%#2x nxpg=%#2x size=%d\n", 1483 netdev_printk(KERN_DEBUG, dev,
1485 dev->name, rx_frame.status, rx_frame.next, 1484 "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
1486 rx_frame.count); 1485 rx_frame.status, rx_frame.next,
1486 rx_frame.count);
1487 dev->stats.rx_errors++; 1487 dev->stats.rx_errors++;
1488 /* NB: The NIC counts CRC, frame and missed errors. */ 1488 /* NB: The NIC counts CRC, frame and missed errors. */
1489 if (pkt_stat & ENRSR_FO) 1489 if (pkt_stat & ENRSR_FO)
@@ -1493,8 +1493,8 @@ static void ei_receive(struct net_device *dev)
1493 1493
1494 /* This _should_ never happen: it's here for avoiding bad clones. */ 1494 /* This _should_ never happen: it's here for avoiding bad clones. */
1495 if (next_frame >= ei_local->stop_page) { 1495 if (next_frame >= ei_local->stop_page) {
1496 printk("%s: next frame inconsistency, %#2x\n", dev->name, 1496 netdev_info(dev, "next frame inconsistency, %#2x\n",
1497 next_frame); 1497 next_frame);
1498 next_frame = ei_local->rx_start_page; 1498 next_frame = ei_local->rx_start_page;
1499 } 1499 }
1500 ei_local->current_page = next_frame; 1500 ei_local->current_page = next_frame;
@@ -1529,7 +1529,7 @@ static void ei_rx_overrun(struct net_device *dev)
1529 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); 1529 outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1530 1530
1531 if (ei_debug > 1) 1531 if (ei_debug > 1)
1532 printk(KERN_DEBUG "%s: Receiver overrun.\n", dev->name); 1532 netdev_printk(KERN_DEBUG, dev, "Receiver overrun\n");
1533 dev->stats.rx_over_errors++; 1533 dev->stats.rx_over_errors++;
1534 1534
1535 /* 1535 /*
@@ -1726,7 +1726,7 @@ static void AX88190_init(struct net_device *dev, int startp)
1726 { 1726 {
1727 outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); 1727 outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1728 if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i]) 1728 if(inb_p(e8390_base + EN1_PHYS_SHIFT(i))!=dev->dev_addr[i])
1729 printk(KERN_ERR "Hw. address read/write mismap %d\n",i); 1729 netdev_err(dev, "Hw. address read/write mismap %d\n", i);
1730 } 1730 }
1731 1731
1732 outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); 1732 outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
@@ -1763,8 +1763,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1763 1763
1764 if (inb_p(e8390_base) & E8390_TRANS) 1764 if (inb_p(e8390_base) & E8390_TRANS)
1765 { 1765 {
1766 printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n", 1766 netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1767 dev->name);
1768 return; 1767 return;
1769 } 1768 }
1770 outb_p(length & 0xff, e8390_base + EN0_TCNTLO); 1769 outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 3c400cfa82ae..f065c35cd4b7 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -52,23 +52,23 @@
52 52
53#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n" 53#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n"
54 54
55#ifdef DEBUG
56 55
57static void regdump(struct net_device *dev) 56static void regdump(struct net_device *dev)
58{ 57{
58#ifdef DEBUG
59 int ioaddr = dev->base_addr; 59 int ioaddr = dev->base_addr;
60 int count; 60 int count;
61 61
62 printk("com20020 register dump:\n"); 62 netdev_dbg(dev, "register dump:\n");
63 for (count = ioaddr; count < ioaddr + 16; count++) 63 for (count = ioaddr; count < ioaddr + 16; count++)
64 { 64 {
65 if (!(count % 16)) 65 if (!(count % 16))
66 printk("\n%04X: ", count); 66 pr_cont("%04X:", count);
67 printk("%02X ", inb(count)); 67 pr_cont(" %02X", inb(count));
68 } 68 }
69 printk("\n"); 69 pr_cont("\n");
70 70
71 printk("buffer0 dump:\n"); 71 netdev_dbg(dev, "buffer0 dump:\n");
72 /* set up the address register */ 72 /* set up the address register */
73 count = 0; 73 count = 0;
74 outb((count >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI); 74 outb((count >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI);
@@ -77,19 +77,15 @@ static void regdump(struct net_device *dev)
77 for (count = 0; count < 256+32; count++) 77 for (count = 0; count < 256+32; count++)
78 { 78 {
79 if (!(count % 16)) 79 if (!(count % 16))
80 printk("\n%04X: ", count); 80 pr_cont("%04X:", count);
81 81
82 /* copy the data */ 82 /* copy the data */
83 printk("%02X ", inb(_MEMDATA)); 83 pr_cont(" %02X", inb(_MEMDATA));
84 } 84 }
85 printk("\n"); 85 pr_cont("\n");
86#endif
86} 87}
87 88
88#else
89
90static inline void regdump(struct net_device *dev) { }
91
92#endif
93 89
94 90
95/*====================================================================*/ 91/*====================================================================*/
@@ -301,13 +297,13 @@ static int com20020_config(struct pcmcia_device *link)
301 i = com20020_found(dev, 0); /* calls register_netdev */ 297 i = com20020_found(dev, 0); /* calls register_netdev */
302 298
303 if (i != 0) { 299 if (i != 0) {
304 dev_printk(KERN_NOTICE, &link->dev, 300 dev_notice(&link->dev,
305 "com20020_cs: com20020_found() failed\n"); 301 "com20020_found() failed\n");
306 goto failed; 302 goto failed;
307 } 303 }
308 304
309 dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n", 305 netdev_dbg(dev, "port %#3lx, irq %d\n",
310 dev->name, dev->base_addr, dev->irq); 306 dev->base_addr, dev->irq);
311 return 0; 307 return 0;
312 308
313failed: 309failed:
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 98fffb03ecd7..8f26d548d1bb 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -28,6 +28,8 @@
28 28
29======================================================================*/ 29======================================================================*/
30 30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
31#define DRV_NAME "fmvj18x_cs" 33#define DRV_NAME "fmvj18x_cs"
32#define DRV_VERSION "2.9" 34#define DRV_VERSION "2.9"
33 35
@@ -291,7 +293,7 @@ static int mfc_try_io_port(struct pcmcia_device *link)
291 link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; 293 link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8;
292 if (link->resource[1]->start == 0) { 294 if (link->resource[1]->start == 0) {
293 link->resource[1]->end = 0; 295 link->resource[1]->end = 0;
294 printk(KERN_NOTICE "fmvj18x_cs: out of resource for serial\n"); 296 pr_notice("out of resource for serial\n");
295 } 297 }
296 ret = pcmcia_request_io(link); 298 ret = pcmcia_request_io(link);
297 if (ret == 0) 299 if (ret == 0)
@@ -503,7 +505,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
503 case XXX10304: 505 case XXX10304:
504 /* Read MACID from Buggy CIS */ 506 /* Read MACID from Buggy CIS */
505 if (fmvj18x_get_hwinfo(link, buggybuf) == -1) { 507 if (fmvj18x_get_hwinfo(link, buggybuf) == -1) {
506 printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.\n"); 508 pr_notice("unable to read hardware net address\n");
507 goto failed; 509 goto failed;
508 } 510 }
509 for (i = 0 ; i < 6; i++) { 511 for (i = 0 ; i < 6; i++) {
@@ -524,15 +526,14 @@ static int fmvj18x_config(struct pcmcia_device *link)
524 SET_NETDEV_DEV(dev, &link->dev); 526 SET_NETDEV_DEV(dev, &link->dev);
525 527
526 if (register_netdev(dev) != 0) { 528 if (register_netdev(dev) != 0) {
527 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n"); 529 pr_notice("register_netdev() failed\n");
528 goto failed; 530 goto failed;
529 } 531 }
530 532
531 /* print current configuration */ 533 /* print current configuration */
532 printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, " 534 netdev_info(dev, "%s, sram %s, port %#3lx, irq %d, hw_addr %pM\n",
533 "hw_addr %pM\n", 535 card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2",
534 dev->name, card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2", 536 dev->base_addr, dev->irq, dev->dev_addr);
535 dev->base_addr, dev->irq, dev->dev_addr);
536 537
537 return 0; 538 return 0;
538 539
@@ -606,7 +607,7 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
606 607
607 lp->base = ioremap(req.Base, req.Size); 608 lp->base = ioremap(req.Base, req.Size);
608 if (lp->base == NULL) { 609 if (lp->base == NULL) {
609 printk(KERN_NOTICE "fmvj18x_cs: ioremap failed\n"); 610 netdev_notice(dev, "ioremap failed\n");
610 return -1; 611 return -1;
611 } 612 }
612 613
@@ -800,17 +801,16 @@ static void fjn_tx_timeout(struct net_device *dev)
800 struct local_info_t *lp = netdev_priv(dev); 801 struct local_info_t *lp = netdev_priv(dev);
801 unsigned int ioaddr = dev->base_addr; 802 unsigned int ioaddr = dev->base_addr;
802 803
803 printk(KERN_NOTICE "%s: transmit timed out with status %04x, %s?\n", 804 netdev_notice(dev, "transmit timed out with status %04x, %s?\n",
804 dev->name, htons(inw(ioaddr + TX_STATUS)), 805 htons(inw(ioaddr + TX_STATUS)),
805 inb(ioaddr + TX_STATUS) & F_TMT_RDY 806 inb(ioaddr + TX_STATUS) & F_TMT_RDY
806 ? "IRQ conflict" : "network cable problem"); 807 ? "IRQ conflict" : "network cable problem");
807 printk(KERN_NOTICE "%s: timeout registers: %04x %04x %04x " 808 netdev_notice(dev, "timeout registers: %04x %04x %04x "
808 "%04x %04x %04x %04x %04x.\n", 809 "%04x %04x %04x %04x %04x.\n",
809 dev->name, htons(inw(ioaddr + 0)), 810 htons(inw(ioaddr + 0)), htons(inw(ioaddr + 2)),
810 htons(inw(ioaddr + 2)), htons(inw(ioaddr + 4)), 811 htons(inw(ioaddr + 4)), htons(inw(ioaddr + 6)),
811 htons(inw(ioaddr + 6)), htons(inw(ioaddr + 8)), 812 htons(inw(ioaddr + 8)), htons(inw(ioaddr + 10)),
812 htons(inw(ioaddr +10)), htons(inw(ioaddr +12)), 813 htons(inw(ioaddr + 12)), htons(inw(ioaddr + 14)));
813 htons(inw(ioaddr +14)));
814 dev->stats.tx_errors++; 814 dev->stats.tx_errors++;
815 /* ToDo: We should try to restart the adaptor... */ 815 /* ToDo: We should try to restart the adaptor... */
816 local_irq_disable(); 816 local_irq_disable();
@@ -845,13 +845,13 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
845 unsigned char *buf = skb->data; 845 unsigned char *buf = skb->data;
846 846
847 if (length > ETH_FRAME_LEN) { 847 if (length > ETH_FRAME_LEN) {
848 printk(KERN_NOTICE "%s: Attempting to send a large packet" 848 netdev_notice(dev, "Attempting to send a large packet (%d bytes)\n",
849 " (%d bytes).\n", dev->name, length); 849 length);
850 return NETDEV_TX_BUSY; 850 return NETDEV_TX_BUSY;
851 } 851 }
852 852
853 pr_debug("%s: Transmitting a packet of length %lu.\n", 853 netdev_dbg(dev, "Transmitting a packet of length %lu\n",
854 dev->name, (unsigned long)skb->len); 854 (unsigned long)skb->len);
855 dev->stats.tx_bytes += skb->len; 855 dev->stats.tx_bytes += skb->len;
856 856
857 /* Disable both interrupts. */ 857 /* Disable both interrupts. */
@@ -904,7 +904,7 @@ static void fjn_reset(struct net_device *dev)
904 unsigned int ioaddr = dev->base_addr; 904 unsigned int ioaddr = dev->base_addr;
905 int i; 905 int i;
906 906
907 pr_debug("fjn_reset(%s) called.\n",dev->name); 907 netdev_dbg(dev, "fjn_reset() called\n");
908 908
909 /* Reset controller */ 909 /* Reset controller */
910 if( sram_config == 0 ) 910 if( sram_config == 0 )
@@ -988,8 +988,8 @@ static void fjn_rx(struct net_device *dev)
988 while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { 988 while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
989 u_short status = inw(ioaddr + DATAPORT); 989 u_short status = inw(ioaddr + DATAPORT);
990 990
991 pr_debug("%s: Rxing packet mode %02x status %04x.\n", 991 netdev_dbg(dev, "Rxing packet mode %02x status %04x.\n",
992 dev->name, inb(ioaddr + RX_MODE), status); 992 inb(ioaddr + RX_MODE), status);
993#ifndef final_version 993#ifndef final_version
994 if (status == 0) { 994 if (status == 0) {
995 outb(F_SKP_PKT, ioaddr + RX_SKIP); 995 outb(F_SKP_PKT, ioaddr + RX_SKIP);
@@ -1008,16 +1008,16 @@ static void fjn_rx(struct net_device *dev)
1008 struct sk_buff *skb; 1008 struct sk_buff *skb;
1009 1009
1010 if (pkt_len > 1550) { 1010 if (pkt_len > 1550) {
1011 printk(KERN_NOTICE "%s: The FMV-18x claimed a very " 1011 netdev_notice(dev, "The FMV-18x claimed a very large packet, size %d\n",
1012 "large packet, size %d.\n", dev->name, pkt_len); 1012 pkt_len);
1013 outb(F_SKP_PKT, ioaddr + RX_SKIP); 1013 outb(F_SKP_PKT, ioaddr + RX_SKIP);
1014 dev->stats.rx_errors++; 1014 dev->stats.rx_errors++;
1015 break; 1015 break;
1016 } 1016 }
1017 skb = dev_alloc_skb(pkt_len+2); 1017 skb = dev_alloc_skb(pkt_len+2);
1018 if (skb == NULL) { 1018 if (skb == NULL) {
1019 printk(KERN_NOTICE "%s: Memory squeeze, dropping " 1019 netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
1020 "packet (len %d).\n", dev->name, pkt_len); 1020 pkt_len);
1021 outb(F_SKP_PKT, ioaddr + RX_SKIP); 1021 outb(F_SKP_PKT, ioaddr + RX_SKIP);
1022 dev->stats.rx_dropped++; 1022 dev->stats.rx_dropped++;
1023 break; 1023 break;
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index b0d06a3d962f..dc85282193bf 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -45,6 +45,8 @@
45 45
46======================================================================*/ 46======================================================================*/
47 47
48#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
48#include <linux/kernel.h> 50#include <linux/kernel.h>
49#include <linux/init.h> 51#include <linux/init.h>
50#include <linux/ptrace.h> 52#include <linux/ptrace.h>
@@ -52,7 +54,6 @@
52#include <linux/string.h> 54#include <linux/string.h>
53#include <linux/timer.h> 55#include <linux/timer.h>
54#include <linux/module.h> 56#include <linux/module.h>
55#include <linux/ethtool.h>
56#include <linux/netdevice.h> 57#include <linux/netdevice.h>
57#include <linux/trdevice.h> 58#include <linux/trdevice.h>
58#include <linux/ibmtr.h> 59#include <linux/ibmtr.h>
@@ -107,16 +108,6 @@ typedef struct ibmtr_dev_t {
107 struct tok_info *ti; 108 struct tok_info *ti;
108} ibmtr_dev_t; 109} ibmtr_dev_t;
109 110
110static void netdev_get_drvinfo(struct net_device *dev,
111 struct ethtool_drvinfo *info)
112{
113 strcpy(info->driver, "ibmtr_cs");
114}
115
116static const struct ethtool_ops netdev_ethtool_ops = {
117 .get_drvinfo = netdev_get_drvinfo,
118};
119
120static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) { 111static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) {
121 ibmtr_dev_t *info = dev_id; 112 ibmtr_dev_t *info = dev_id;
122 struct net_device *dev = info->dev; 113 struct net_device *dev = info->dev;
@@ -159,8 +150,6 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
159 150
160 info->dev = dev; 151 info->dev = dev;
161 152
162 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
163
164 return ibmtr_config(link); 153 return ibmtr_config(link);
165} /* ibmtr_attach */ 154} /* ibmtr_attach */
166 155
@@ -285,15 +274,14 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
285 274
286 i = ibmtr_probe_card(dev); 275 i = ibmtr_probe_card(dev);
287 if (i != 0) { 276 if (i != 0) {
288 printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n"); 277 pr_notice("register_netdev() failed\n");
289 goto failed; 278 goto failed;
290 } 279 }
291 280
292 printk(KERN_INFO 281 netdev_info(dev, "port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
293 "%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n", 282 dev->base_addr, dev->irq,
294 dev->name, dev->base_addr, dev->irq, 283 (u_long)ti->mmio, (u_long)(ti->sram_base << 12),
295 (u_long)ti->mmio, (u_long)(ti->sram_base << 12), 284 dev->dev_addr);
296 dev->dev_addr);
297 return 0; 285 return 0;
298 286
299failed: 287failed:
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 68f2deeb3ade..c1d8ce9e4a6c 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -111,6 +111,8 @@ Log: nmclan_cs.c,v
111 111
112---------------------------------------------------------------------------- */ 112---------------------------------------------------------------------------- */
113 113
114#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115
114#define DRV_NAME "nmclan_cs" 116#define DRV_NAME "nmclan_cs"
115#define DRV_VERSION "0.16" 117#define DRV_VERSION "0.16"
116 118
@@ -519,7 +521,7 @@ static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
519 spin_unlock_irqrestore(&lp->bank_lock, flags); 521 spin_unlock_irqrestore(&lp->bank_lock, flags);
520 break; 522 break;
521 } 523 }
522 return (data & 0xFF); 524 return data & 0xFF;
523} /* mace_read */ 525} /* mace_read */
524 526
525/* ---------------------------------------------------------------------------- 527/* ----------------------------------------------------------------------------
@@ -563,7 +565,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
563 /* Wait for reset bit to be cleared automatically after <= 200ns */; 565 /* Wait for reset bit to be cleared automatically after <= 200ns */;
564 if(++ct > 500) 566 if(++ct > 500)
565 { 567 {
566 printk(KERN_ERR "mace: reset failed, card removed ?\n"); 568 pr_err("reset failed, card removed?\n");
567 return -1; 569 return -1;
568 } 570 }
569 udelay(1); 571 udelay(1);
@@ -610,7 +612,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
610 { 612 {
611 if(++ ct > 500) 613 if(++ ct > 500)
612 { 614 {
613 printk(KERN_ERR "mace: ADDRCHG timeout, card removed ?\n"); 615 pr_err("ADDRCHG timeout, card removed?\n");
614 return -1; 616 return -1;
615 } 617 }
616 } 618 }
@@ -678,8 +680,8 @@ static int nmclan_config(struct pcmcia_device *link)
678 dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n", 680 dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n",
679 sig[0], sig[1]); 681 sig[0], sig[1]);
680 } else { 682 } else {
681 printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should" 683 pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
682 " be 0x40 0x?9\n", sig[0], sig[1]); 684 sig[0], sig[1]);
683 return -ENODEV; 685 return -ENODEV;
684 } 686 }
685 } 687 }
@@ -691,20 +693,18 @@ static int nmclan_config(struct pcmcia_device *link)
691 if (if_port <= 2) 693 if (if_port <= 2)
692 dev->if_port = if_port; 694 dev->if_port = if_port;
693 else 695 else
694 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n"); 696 pr_notice("invalid if_port requested\n");
695 697
696 SET_NETDEV_DEV(dev, &link->dev); 698 SET_NETDEV_DEV(dev, &link->dev);
697 699
698 i = register_netdev(dev); 700 i = register_netdev(dev);
699 if (i != 0) { 701 if (i != 0) {
700 printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n"); 702 pr_notice("register_netdev() failed\n");
701 goto failed; 703 goto failed;
702 } 704 }
703 705
704 printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port," 706 netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n",
705 " hw_addr %pM\n", 707 dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr);
706 dev->name, dev->base_addr, dev->irq, if_names[dev->if_port],
707 dev->dev_addr);
708 return 0; 708 return 0;
709 709
710failed: 710failed:
@@ -798,8 +798,7 @@ static int mace_config(struct net_device *dev, struct ifmap *map)
798 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { 798 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
799 if (map->port <= 2) { 799 if (map->port <= 2) {
800 dev->if_port = map->port; 800 dev->if_port = map->port;
801 printk(KERN_INFO "%s: switched to %s port\n", dev->name, 801 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
802 if_names[dev->if_port]);
803 } else 802 } else
804 return -EINVAL; 803 return -EINVAL;
805 } 804 }
@@ -878,12 +877,12 @@ static void mace_tx_timeout(struct net_device *dev)
878 mace_private *lp = netdev_priv(dev); 877 mace_private *lp = netdev_priv(dev);
879 struct pcmcia_device *link = lp->p_dev; 878 struct pcmcia_device *link = lp->p_dev;
880 879
881 printk(KERN_NOTICE "%s: transmit timed out -- ", dev->name); 880 netdev_notice(dev, "transmit timed out -- ");
882#if RESET_ON_TIMEOUT 881#if RESET_ON_TIMEOUT
883 printk("resetting card\n"); 882 pr_cont("resetting card\n");
884 pcmcia_reset_card(link->socket); 883 pcmcia_reset_card(link->socket);
885#else /* #if RESET_ON_TIMEOUT */ 884#else /* #if RESET_ON_TIMEOUT */
886 printk("NOT resetting card\n"); 885 pr_cont("NOT resetting card\n");
887#endif /* #if RESET_ON_TIMEOUT */ 886#endif /* #if RESET_ON_TIMEOUT */
888 dev->trans_start = jiffies; /* prevent tx timeout */ 887 dev->trans_start = jiffies; /* prevent tx timeout */
889 netif_wake_queue(dev); 888 netif_wake_queue(dev);
@@ -965,22 +964,21 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
965 ioaddr = dev->base_addr; 964 ioaddr = dev->base_addr;
966 965
967 if (lp->tx_irq_disabled) { 966 if (lp->tx_irq_disabled) {
968 printk( 967 const char *msg;
969 (lp->tx_irq_disabled? 968 if (lp->tx_irq_disabled)
970 KERN_NOTICE "%s: Interrupt with tx_irq_disabled " 969 msg = "Interrupt with tx_irq_disabled";
971 "[isr=%02X, imr=%02X]\n": 970 else
972 KERN_NOTICE "%s: Re-entering the interrupt handler " 971 msg = "Re-entering the interrupt handler";
973 "[isr=%02X, imr=%02X]\n"), 972 netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n",
974 dev->name, 973 msg,
975 inb(ioaddr + AM2150_MACE_BASE + MACE_IR), 974 inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
976 inb(ioaddr + AM2150_MACE_BASE + MACE_IMR) 975 inb(ioaddr + AM2150_MACE_BASE + MACE_IMR));
977 );
978 /* WARNING: MACE_IR has been read! */ 976 /* WARNING: MACE_IR has been read! */
979 return IRQ_NONE; 977 return IRQ_NONE;
980 } 978 }
981 979
982 if (!netif_device_present(dev)) { 980 if (!netif_device_present(dev)) {
983 pr_debug("%s: interrupt from dead card\n", dev->name); 981 netdev_dbg(dev, "interrupt from dead card\n");
984 return IRQ_NONE; 982 return IRQ_NONE;
985 } 983 }
986 984
@@ -1378,8 +1376,8 @@ static void BuildLAF(int *ladrf, int *adr)
1378 printk(KERN_DEBUG " adr =%pM\n", adr); 1376 printk(KERN_DEBUG " adr =%pM\n", adr);
1379 printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode); 1377 printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
1380 for (i = 0; i < 8; i++) 1378 for (i = 0; i < 8; i++)
1381 printk(KERN_CONT " %02X", ladrf[i]); 1379 pr_cont(" %02X", ladrf[i]);
1382 printk(KERN_CONT "\n"); 1380 pr_cont("\n");
1383#endif 1381#endif
1384} /* BuildLAF */ 1382} /* BuildLAF */
1385 1383
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 49279b0ee526..e180832c278f 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -28,6 +28,8 @@
28 28
29======================================================================*/ 29======================================================================*/
30 30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
31#include <linux/kernel.h> 33#include <linux/kernel.h>
32#include <linux/module.h> 34#include <linux/module.h>
33#include <linux/init.h> 35#include <linux/init.h>
@@ -35,7 +37,6 @@
35#include <linux/string.h> 37#include <linux/string.h>
36#include <linux/timer.h> 38#include <linux/timer.h>
37#include <linux/delay.h> 39#include <linux/delay.h>
38#include <linux/ethtool.h>
39#include <linux/netdevice.h> 40#include <linux/netdevice.h>
40#include <linux/log2.h> 41#include <linux/log2.h>
41#include <linux/etherdevice.h> 42#include <linux/etherdevice.h>
@@ -100,7 +101,6 @@ static void pcnet_release(struct pcmcia_device *link);
100static int pcnet_open(struct net_device *dev); 101static int pcnet_open(struct net_device *dev);
101static int pcnet_close(struct net_device *dev); 102static int pcnet_close(struct net_device *dev);
102static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 103static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
103static const struct ethtool_ops netdev_ethtool_ops;
104static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); 104static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
105static void ei_watchdog(u_long arg); 105static void ei_watchdog(u_long arg);
106static void pcnet_reset_8390(struct net_device *dev); 106static void pcnet_reset_8390(struct net_device *dev);
@@ -434,8 +434,6 @@ static hw_info_t *get_ax88190(struct pcmcia_device *link)
434 dev->dev_addr[i] = j & 0xff; 434 dev->dev_addr[i] = j & 0xff;
435 dev->dev_addr[i+1] = j >> 8; 435 dev->dev_addr[i+1] = j >> 8;
436 } 436 }
437 printk(KERN_NOTICE "pcnet_cs: this is an AX88190 card!\n");
438 printk(KERN_NOTICE "pcnet_cs: use axnet_cs instead.\n");
439 return NULL; 437 return NULL;
440} 438}
441 439
@@ -570,15 +568,15 @@ static int pcnet_config(struct pcmcia_device *link)
570 if ((if_port == 1) || (if_port == 2)) 568 if ((if_port == 1) || (if_port == 2))
571 dev->if_port = if_port; 569 dev->if_port = if_port;
572 else 570 else
573 printk(KERN_NOTICE "pcnet_cs: invalid if_port requested\n"); 571 pr_notice("invalid if_port requested\n");
574 } else { 572 } else {
575 dev->if_port = 0; 573 dev->if_port = 0;
576 } 574 }
577 575
578 if ((link->conf.ConfigBase == 0x03c0) && 576 if ((link->conf.ConfigBase == 0x03c0) &&
579 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) { 577 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
580 printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n"); 578 pr_notice("this is an AX88190 card!\n");
581 printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n"); 579 pr_notice("use axnet_cs instead.\n");
582 goto failed; 580 goto failed;
583 } 581 }
584 582
@@ -593,8 +591,8 @@ static int pcnet_config(struct pcmcia_device *link)
593 local_hw_info = get_hwired(link); 591 local_hw_info = get_hwired(link);
594 592
595 if (local_hw_info == NULL) { 593 if (local_hw_info == NULL) {
596 printk(KERN_NOTICE "pcnet_cs: unable to read hardware net" 594 pr_notice("unable to read hardware net address for io base %#3lx\n",
597 " address for io base %#3lx\n", dev->base_addr); 595 dev->base_addr);
598 goto failed; 596 goto failed;
599 } 597 }
600 598
@@ -626,9 +624,7 @@ static int pcnet_config(struct pcmcia_device *link)
626 624
627 ei_status.name = "NE2000"; 625 ei_status.name = "NE2000";
628 ei_status.word16 = 1; 626 ei_status.word16 = 1;
629 ei_status.reset_8390 = &pcnet_reset_8390; 627 ei_status.reset_8390 = pcnet_reset_8390;
630
631 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
632 628
633 if (info->flags & (IS_DL10019|IS_DL10022)) 629 if (info->flags & (IS_DL10019|IS_DL10022))
634 mii_phy_probe(dev); 630 mii_phy_probe(dev);
@@ -636,25 +632,25 @@ static int pcnet_config(struct pcmcia_device *link)
636 SET_NETDEV_DEV(dev, &link->dev); 632 SET_NETDEV_DEV(dev, &link->dev);
637 633
638 if (register_netdev(dev) != 0) { 634 if (register_netdev(dev) != 0) {
639 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n"); 635 pr_notice("register_netdev() failed\n");
640 goto failed; 636 goto failed;
641 } 637 }
642 638
643 if (info->flags & (IS_DL10019|IS_DL10022)) { 639 if (info->flags & (IS_DL10019|IS_DL10022)) {
644 u_char id = inb(dev->base_addr + 0x1a); 640 u_char id = inb(dev->base_addr + 0x1a);
645 printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ", 641 netdev_info(dev, "NE2000 (DL100%d rev %02x): ",
646 dev->name, ((info->flags & IS_DL10022) ? 22 : 19), id); 642 (info->flags & IS_DL10022) ? 22 : 19, id);
647 if (info->pna_phy) 643 if (info->pna_phy)
648 printk("PNA, "); 644 pr_cont("PNA, ");
649 } else { 645 } else {
650 printk(KERN_INFO "%s: NE2000 Compatible: ", dev->name); 646 netdev_info(dev, "NE2000 Compatible: ");
651 } 647 }
652 printk("io %#3lx, irq %d,", dev->base_addr, dev->irq); 648 pr_cont("io %#3lx, irq %d,", dev->base_addr, dev->irq);
653 if (info->flags & USE_SHMEM) 649 if (info->flags & USE_SHMEM)
654 printk (" mem %#5lx,", dev->mem_start); 650 pr_cont(" mem %#5lx,", dev->mem_start);
655 if (info->flags & HAS_MISC_REG) 651 if (info->flags & HAS_MISC_REG)
656 printk(" %s xcvr,", if_names[dev->if_port]); 652 pr_cont(" %s xcvr,", if_names[dev->if_port]);
657 printk(" hw_addr %pM\n", dev->dev_addr); 653 pr_cont(" hw_addr %pM\n", dev->dev_addr);
658 return 0; 654 return 0;
659 655
660failed: 656failed:
@@ -928,7 +924,7 @@ static void mii_phy_probe(struct net_device *dev)
928 phyid = tmp << 16; 924 phyid = tmp << 16;
929 phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2); 925 phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2);
930 phyid &= MII_PHYID_REV_MASK; 926 phyid &= MII_PHYID_REV_MASK;
931 pr_debug("%s: MII at %d is 0x%08x\n", dev->name, i, phyid); 927 netdev_dbg(dev, "MII at %d is 0x%08x\n", i, phyid);
932 if (phyid == AM79C9XX_HOME_PHY) { 928 if (phyid == AM79C9XX_HOME_PHY) {
933 info->pna_phy = i; 929 info->pna_phy = i;
934 } else if (phyid != AM79C9XX_ETH_PHY) { 930 } else if (phyid != AM79C9XX_ETH_PHY) {
@@ -961,7 +957,7 @@ static int pcnet_open(struct net_device *dev)
961 info->phy_id = info->eth_phy; 957 info->phy_id = info->eth_phy;
962 info->link_status = 0x00; 958 info->link_status = 0x00;
963 init_timer(&info->watchdog); 959 init_timer(&info->watchdog);
964 info->watchdog.function = &ei_watchdog; 960 info->watchdog.function = ei_watchdog;
965 info->watchdog.data = (u_long)dev; 961 info->watchdog.data = (u_long)dev;
966 info->watchdog.expires = jiffies + HZ; 962 info->watchdog.expires = jiffies + HZ;
967 add_timer(&info->watchdog); 963 add_timer(&info->watchdog);
@@ -1014,8 +1010,8 @@ static void pcnet_reset_8390(struct net_device *dev)
1014 outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */ 1010 outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
1015 1011
1016 if (i == 100) 1012 if (i == 100)
1017 printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n", 1013 netdev_err(dev, "pcnet_reset_8390() did not complete.\n");
1018 dev->name); 1014
1019 set_misc_reg(dev); 1015 set_misc_reg(dev);
1020 1016
1021} /* pcnet_reset_8390 */ 1017} /* pcnet_reset_8390 */
@@ -1031,8 +1027,7 @@ static int set_config(struct net_device *dev, struct ifmap *map)
1031 else if ((map->port < 1) || (map->port > 2)) 1027 else if ((map->port < 1) || (map->port > 2))
1032 return -EINVAL; 1028 return -EINVAL;
1033 dev->if_port = map->port; 1029 dev->if_port = map->port;
1034 printk(KERN_INFO "%s: switched to %s port\n", 1030 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
1035 dev->name, if_names[dev->if_port]);
1036 NS8390_init(dev, 1); 1031 NS8390_init(dev, 1);
1037 } 1032 }
1038 return 0; 1033 return 0;
@@ -1067,7 +1062,7 @@ static void ei_watchdog(u_long arg)
1067 this, we can limp along even if the interrupt is blocked */ 1062 this, we can limp along even if the interrupt is blocked */
1068 if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) { 1063 if (info->stale++ && (inb_p(nic_base + EN0_ISR) & ENISR_ALL)) {
1069 if (!info->fast_poll) 1064 if (!info->fast_poll)
1070 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 1065 netdev_info(dev, "interrupt(s) dropped!\n");
1071 ei_irq_wrapper(dev->irq, dev); 1066 ei_irq_wrapper(dev->irq, dev);
1072 info->fast_poll = HZ; 1067 info->fast_poll = HZ;
1073 } 1068 }
@@ -1087,7 +1082,7 @@ static void ei_watchdog(u_long arg)
1087 if (info->eth_phy) { 1082 if (info->eth_phy) {
1088 info->phy_id = info->eth_phy = 0; 1083 info->phy_id = info->eth_phy = 0;
1089 } else { 1084 } else {
1090 printk(KERN_INFO "%s: MII is missing!\n", dev->name); 1085 netdev_info(dev, "MII is missing!\n");
1091 info->flags &= ~HAS_MII; 1086 info->flags &= ~HAS_MII;
1092 } 1087 }
1093 goto reschedule; 1088 goto reschedule;
@@ -1096,8 +1091,7 @@ static void ei_watchdog(u_long arg)
1096 link &= 0x0004; 1091 link &= 0x0004;
1097 if (link != info->link_status) { 1092 if (link != info->link_status) {
1098 u_short p = mdio_read(mii_addr, info->phy_id, 5); 1093 u_short p = mdio_read(mii_addr, info->phy_id, 5);
1099 printk(KERN_INFO "%s: %s link beat\n", dev->name, 1094 netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
1100 (link) ? "found" : "lost");
1101 if (link && (info->flags & IS_DL10022)) { 1095 if (link && (info->flags & IS_DL10022)) {
1102 /* Disable collision detection on full duplex links */ 1096 /* Disable collision detection on full duplex links */
1103 outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG); 1097 outb((p & 0x0140) ? 4 : 0, nic_base + DLINK_DIAG);
@@ -1108,13 +1102,12 @@ static void ei_watchdog(u_long arg)
1108 if (link) { 1102 if (link) {
1109 if (info->phy_id == info->eth_phy) { 1103 if (info->phy_id == info->eth_phy) {
1110 if (p) 1104 if (p)
1111 printk(KERN_INFO "%s: autonegotiation complete: " 1105 netdev_info(dev, "autonegotiation complete: "
1112 "%sbaseT-%cD selected\n", dev->name, 1106 "%sbaseT-%cD selected\n",
1113 ((p & 0x0180) ? "100" : "10"), 1107 ((p & 0x0180) ? "100" : "10"),
1114 ((p & 0x0140) ? 'F' : 'H')); 1108 ((p & 0x0140) ? 'F' : 'H'));
1115 else 1109 else
1116 printk(KERN_INFO "%s: link partner did not " 1110 netdev_info(dev, "link partner did not autonegotiate\n");
1117 "autonegotiate\n", dev->name);
1118 } 1111 }
1119 NS8390_init(dev, 1); 1112 NS8390_init(dev, 1);
1120 } 1113 }
@@ -1127,7 +1120,7 @@ static void ei_watchdog(u_long arg)
1127 /* isolate this MII and try flipping to the other one */ 1120 /* isolate this MII and try flipping to the other one */
1128 mdio_write(mii_addr, info->phy_id, 0, 0x0400); 1121 mdio_write(mii_addr, info->phy_id, 0, 0x0400);
1129 info->phy_id ^= info->pna_phy ^ info->eth_phy; 1122 info->phy_id ^= info->pna_phy ^ info->eth_phy;
1130 printk(KERN_INFO "%s: switched to %s transceiver\n", dev->name, 1123 netdev_info(dev, "switched to %s transceiver\n",
1131 (info->phy_id == info->eth_phy) ? "ethernet" : "PNA"); 1124 (info->phy_id == info->eth_phy) ? "ethernet" : "PNA");
1132 mdio_write(mii_addr, info->phy_id, 0, 1125 mdio_write(mii_addr, info->phy_id, 0,
1133 (info->phy_id == info->eth_phy) ? 0x1000 : 0); 1126 (info->phy_id == info->eth_phy) ? 0x1000 : 0);
@@ -1143,18 +1136,6 @@ reschedule:
1143 1136
1144/*====================================================================*/ 1137/*====================================================================*/
1145 1138
1146static void netdev_get_drvinfo(struct net_device *dev,
1147 struct ethtool_drvinfo *info)
1148{
1149 strcpy(info->driver, "pcnet_cs");
1150}
1151
1152static const struct ethtool_ops netdev_ethtool_ops = {
1153 .get_drvinfo = netdev_get_drvinfo,
1154};
1155
1156/*====================================================================*/
1157
1158 1139
1159static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1140static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1160{ 1141{
@@ -1187,9 +1168,9 @@ static void dma_get_8390_hdr(struct net_device *dev,
1187 unsigned int nic_base = dev->base_addr; 1168 unsigned int nic_base = dev->base_addr;
1188 1169
1189 if (ei_status.dmaing) { 1170 if (ei_status.dmaing) {
1190 printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." 1171 netdev_notice(dev, "DMAing conflict in dma_block_input."
1191 "[DMAstat:%1x][irqlock:%1x]\n", 1172 "[DMAstat:%1x][irqlock:%1x]\n",
1192 dev->name, ei_status.dmaing, ei_status.irqlock); 1173 ei_status.dmaing, ei_status.irqlock);
1193 return; 1174 return;
1194 } 1175 }
1195 1176
@@ -1220,11 +1201,11 @@ static void dma_block_input(struct net_device *dev, int count,
1220 char *buf = skb->data; 1201 char *buf = skb->data;
1221 1202
1222 if ((ei_debug > 4) && (count != 4)) 1203 if ((ei_debug > 4) && (count != 4))
1223 pr_debug("%s: [bi=%d]\n", dev->name, count+4); 1204 netdev_dbg(dev, "[bi=%d]\n", count+4);
1224 if (ei_status.dmaing) { 1205 if (ei_status.dmaing) {
1225 printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." 1206 netdev_notice(dev, "DMAing conflict in dma_block_input."
1226 "[DMAstat:%1x][irqlock:%1x]\n", 1207 "[DMAstat:%1x][irqlock:%1x]\n",
1227 dev->name, ei_status.dmaing, ei_status.irqlock); 1208 ei_status.dmaing, ei_status.irqlock);
1228 return; 1209 return;
1229 } 1210 }
1230 ei_status.dmaing |= 0x01; 1211 ei_status.dmaing |= 0x01;
@@ -1254,9 +1235,9 @@ static void dma_block_input(struct net_device *dev, int count,
1254 break; 1235 break;
1255 } while (--tries > 0); 1236 } while (--tries > 0);
1256 if (tries <= 0) 1237 if (tries <= 0)
1257 printk(KERN_NOTICE "%s: RX transfer address mismatch," 1238 netdev_notice(dev, "RX transfer address mismatch,"
1258 "%#4.4x (expected) vs. %#4.4x (actual).\n", 1239 "%#4.4x (expected) vs. %#4.4x (actual).\n",
1259 dev->name, ring_offset + xfer_count, addr); 1240 ring_offset + xfer_count, addr);
1260 } 1241 }
1261#endif 1242#endif
1262 outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 1243 outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
@@ -1277,7 +1258,7 @@ static void dma_block_output(struct net_device *dev, int count,
1277 1258
1278#ifdef PCMCIA_DEBUG 1259#ifdef PCMCIA_DEBUG
1279 if (ei_debug > 4) 1260 if (ei_debug > 4)
1280 printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count); 1261 netdev_dbg(dev, "[bo=%d]\n", count);
1281#endif 1262#endif
1282 1263
1283 /* Round the count up for word writes. Do we need to do this? 1264 /* Round the count up for word writes. Do we need to do this?
@@ -1286,9 +1267,9 @@ static void dma_block_output(struct net_device *dev, int count,
1286 if (count & 0x01) 1267 if (count & 0x01)
1287 count++; 1268 count++;
1288 if (ei_status.dmaing) { 1269 if (ei_status.dmaing) {
1289 printk(KERN_NOTICE "%s: DMAing conflict in dma_block_output." 1270 netdev_notice(dev, "DMAing conflict in dma_block_output."
1290 "[DMAstat:%1x][irqlock:%1x]\n", 1271 "[DMAstat:%1x][irqlock:%1x]\n",
1291 dev->name, ei_status.dmaing, ei_status.irqlock); 1272 ei_status.dmaing, ei_status.irqlock);
1292 return; 1273 return;
1293 } 1274 }
1294 ei_status.dmaing |= 0x01; 1275 ei_status.dmaing |= 0x01;
@@ -1325,9 +1306,9 @@ static void dma_block_output(struct net_device *dev, int count,
1325 break; 1306 break;
1326 } while (--tries > 0); 1307 } while (--tries > 0);
1327 if (tries <= 0) { 1308 if (tries <= 0) {
1328 printk(KERN_NOTICE "%s: Tx packet transfer address mismatch," 1309 netdev_notice(dev, "Tx packet transfer address mismatch,"
1329 "%#4.4x (expected) vs. %#4.4x (actual).\n", 1310 "%#4.4x (expected) vs. %#4.4x (actual).\n",
1330 dev->name, (start_page << 8) + count, addr); 1311 (start_page << 8) + count, addr);
1331 if (retries++ == 0) 1312 if (retries++ == 0)
1332 goto retry; 1313 goto retry;
1333 } 1314 }
@@ -1336,8 +1317,7 @@ static void dma_block_output(struct net_device *dev, int count,
1336 1317
1337 while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0) 1318 while ((inb_p(nic_base + EN0_ISR) & ENISR_RDC) == 0)
1338 if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) { 1319 if (time_after(jiffies, dma_start + PCNET_RDC_TIMEOUT)) {
1339 printk(KERN_NOTICE "%s: timeout waiting for Tx RDC.\n", 1320 netdev_notice(dev, "timeout waiting for Tx RDC.\n");
1340 dev->name);
1341 pcnet_reset_8390(dev); 1321 pcnet_reset_8390(dev);
1342 NS8390_init(dev, 1); 1322 NS8390_init(dev, 1);
1343 break; 1323 break;
@@ -1361,9 +1341,9 @@ static int setup_dma_config(struct pcmcia_device *link, int start_pg,
1361 ei_status.stop_page = stop_pg; 1341 ei_status.stop_page = stop_pg;
1362 1342
1363 /* set up block i/o functions */ 1343 /* set up block i/o functions */
1364 ei_status.get_8390_hdr = &dma_get_8390_hdr; 1344 ei_status.get_8390_hdr = dma_get_8390_hdr;
1365 ei_status.block_input = &dma_block_input; 1345 ei_status.block_input = dma_block_input;
1366 ei_status.block_output = &dma_block_output; 1346 ei_status.block_output = dma_block_output;
1367 1347
1368 return 0; 1348 return 0;
1369} 1349}
@@ -1509,9 +1489,9 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
1509 ei_status.stop_page = start_pg + ((req.Size - offset) >> 8); 1489 ei_status.stop_page = start_pg + ((req.Size - offset) >> 8);
1510 1490
1511 /* set up block i/o functions */ 1491 /* set up block i/o functions */
1512 ei_status.get_8390_hdr = &shmem_get_8390_hdr; 1492 ei_status.get_8390_hdr = shmem_get_8390_hdr;
1513 ei_status.block_input = &shmem_block_input; 1493 ei_status.block_input = shmem_block_input;
1514 ei_status.block_output = &shmem_block_output; 1494 ei_status.block_output = shmem_block_output;
1515 1495
1516 info->flags |= USE_SHMEM; 1496 info->flags |= USE_SHMEM;
1517 return 0; 1497 return 0;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 377367d03b41..7204a4b5529b 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -25,6 +25,8 @@
25 25
26======================================================================*/ 26======================================================================*/
27 27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
28#include <linux/module.h> 30#include <linux/module.h>
29#include <linux/kernel.h> 31#include <linux/kernel.h>
30#include <linux/init.h> 32#include <linux/init.h>
@@ -294,7 +296,7 @@ static const struct net_device_ops smc_netdev_ops = {
294 .ndo_tx_timeout = smc_tx_timeout, 296 .ndo_tx_timeout = smc_tx_timeout,
295 .ndo_set_config = s9k_config, 297 .ndo_set_config = s9k_config,
296 .ndo_set_multicast_list = set_rx_mode, 298 .ndo_set_multicast_list = set_rx_mode,
297 .ndo_do_ioctl = &smc_ioctl, 299 .ndo_do_ioctl = smc_ioctl,
298 .ndo_change_mtu = eth_change_mtu, 300 .ndo_change_mtu = eth_change_mtu,
299 .ndo_set_mac_address = eth_mac_addr, 301 .ndo_set_mac_address = eth_mac_addr,
300 .ndo_validate_addr = eth_validate_addr, 302 .ndo_validate_addr = eth_validate_addr,
@@ -813,14 +815,14 @@ static int check_sig(struct pcmcia_device *link)
813 ((s >> 8) != (s & 0xff))) { 815 ((s >> 8) != (s & 0xff))) {
814 SMC_SELECT_BANK(3); 816 SMC_SELECT_BANK(3);
815 s = inw(ioaddr + REVISION); 817 s = inw(ioaddr + REVISION);
816 return (s & 0xff); 818 return s & 0xff;
817 } 819 }
818 820
819 if (width) { 821 if (width) {
820 modconf_t mod = { 822 modconf_t mod = {
821 .Attributes = CONF_IO_CHANGE_WIDTH, 823 .Attributes = CONF_IO_CHANGE_WIDTH,
822 }; 824 };
823 printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n"); 825 pr_info("using 8-bit IO window\n");
824 826
825 smc91c92_suspend(link); 827 smc91c92_suspend(link);
826 pcmcia_modify_configuration(link, &mod); 828 pcmcia_modify_configuration(link, &mod);
@@ -881,7 +883,7 @@ static int smc91c92_config(struct pcmcia_device *link)
881 if ((if_port >= 0) && (if_port <= 2)) 883 if ((if_port >= 0) && (if_port <= 2))
882 dev->if_port = if_port; 884 dev->if_port = if_port;
883 else 885 else
884 printk(KERN_NOTICE "smc91c92_cs: invalid if_port requested\n"); 886 dev_notice(&link->dev, "invalid if_port requested\n");
885 887
886 switch (smc->manfid) { 888 switch (smc->manfid) {
887 case MANFID_OSITECH: 889 case MANFID_OSITECH:
@@ -899,7 +901,7 @@ static int smc91c92_config(struct pcmcia_device *link)
899 } 901 }
900 902
901 if (i != 0) { 903 if (i != 0) {
902 printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n"); 904 dev_notice(&link->dev, "Unable to find hardware address.\n");
903 goto config_failed; 905 goto config_failed;
904 } 906 }
905 907
@@ -952,30 +954,28 @@ static int smc91c92_config(struct pcmcia_device *link)
952 SET_NETDEV_DEV(dev, &link->dev); 954 SET_NETDEV_DEV(dev, &link->dev);
953 955
954 if (register_netdev(dev) != 0) { 956 if (register_netdev(dev) != 0) {
955 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n"); 957 dev_err(&link->dev, "register_netdev() failed\n");
956 goto config_undo; 958 goto config_undo;
957 } 959 }
958 960
959 printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, " 961 netdev_info(dev, "smc91c%s rev %d: io %#3lx, irq %d, hw_addr %pM\n",
960 "hw_addr %pM\n", 962 name, (rev & 0x0f), dev->base_addr, dev->irq, dev->dev_addr);
961 dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq,
962 dev->dev_addr);
963 963
964 if (rev > 0) { 964 if (rev > 0) {
965 if (mir & 0x3ff) 965 if (mir & 0x3ff)
966 printk(KERN_INFO " %lu byte", mir); 966 netdev_info(dev, " %lu byte", mir);
967 else 967 else
968 printk(KERN_INFO " %lu kb", mir>>10); 968 netdev_info(dev, " %lu kb", mir>>10);
969 printk(" buffer, %s xcvr\n", (smc->cfg & CFG_MII_SELECT) ? 969 pr_cont(" buffer, %s xcvr\n",
970 "MII" : if_names[dev->if_port]); 970 (smc->cfg & CFG_MII_SELECT) ? "MII" : if_names[dev->if_port]);
971 } 971 }
972 972
973 if (smc->cfg & CFG_MII_SELECT) { 973 if (smc->cfg & CFG_MII_SELECT) {
974 if (smc->mii_if.phy_id != -1) { 974 if (smc->mii_if.phy_id != -1) {
975 dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n", 975 netdev_dbg(dev, " MII transceiver at index %d, status %x\n",
976 smc->mii_if.phy_id, j); 976 smc->mii_if.phy_id, j);
977 } else { 977 } else {
978 printk(KERN_NOTICE " No MII transceivers found!\n"); 978 netdev_notice(dev, " No MII transceivers found!\n");
979 } 979 }
980 } 980 }
981 return 0; 981 return 0;
@@ -1081,10 +1081,10 @@ static void smc_dump(struct net_device *dev)
1081 save = inw(ioaddr + BANK_SELECT); 1081 save = inw(ioaddr + BANK_SELECT);
1082 for (w = 0; w < 4; w++) { 1082 for (w = 0; w < 4; w++) {
1083 SMC_SELECT_BANK(w); 1083 SMC_SELECT_BANK(w);
1084 printk(KERN_DEBUG "bank %d: ", w); 1084 netdev_printk(KERN_DEBUG, dev, "bank %d: ", w);
1085 for (i = 0; i < 14; i += 2) 1085 for (i = 0; i < 14; i += 2)
1086 printk(" %04x", inw(ioaddr + i)); 1086 pr_cont(" %04x", inw(ioaddr + i));
1087 printk("\n"); 1087 pr_cont("\n");
1088 } 1088 }
1089 outw(save, ioaddr + BANK_SELECT); 1089 outw(save, ioaddr + BANK_SELECT);
1090} 1090}
@@ -1106,7 +1106,7 @@ static int smc_open(struct net_device *dev)
1106 return -ENODEV; 1106 return -ENODEV;
1107 /* Physical device present signature. */ 1107 /* Physical device present signature. */
1108 if (check_sig(link) < 0) { 1108 if (check_sig(link) < 0) {
1109 printk("smc91c92_cs: Yikes! Bad chip signature!\n"); 1109 netdev_info(dev, "Yikes! Bad chip signature!\n");
1110 return -ENODEV; 1110 return -ENODEV;
1111 } 1111 }
1112 link->open++; 1112 link->open++;
@@ -1117,7 +1117,7 @@ static int smc_open(struct net_device *dev)
1117 1117
1118 smc_reset(dev); 1118 smc_reset(dev);
1119 init_timer(&smc->media); 1119 init_timer(&smc->media);
1120 smc->media.function = &media_check; 1120 smc->media.function = media_check;
1121 smc->media.data = (u_long) dev; 1121 smc->media.data = (u_long) dev;
1122 smc->media.expires = jiffies + HZ; 1122 smc->media.expires = jiffies + HZ;
1123 add_timer(&smc->media); 1123 add_timer(&smc->media);
@@ -1172,7 +1172,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
1172 u_char packet_no; 1172 u_char packet_no;
1173 1173
1174 if (!skb) { 1174 if (!skb) {
1175 printk(KERN_ERR "%s: In XMIT with no packet to send.\n", dev->name); 1175 netdev_err(dev, "In XMIT with no packet to send\n");
1176 return; 1176 return;
1177 } 1177 }
1178 1178
@@ -1180,8 +1180,8 @@ static void smc_hardware_send_packet(struct net_device * dev)
1180 packet_no = inw(ioaddr + PNR_ARR) >> 8; 1180 packet_no = inw(ioaddr + PNR_ARR) >> 8;
1181 if (packet_no & 0x80) { 1181 if (packet_no & 0x80) {
1182 /* If not, there is a hardware problem! Likely an ejected card. */ 1182 /* If not, there is a hardware problem! Likely an ejected card. */
1183 printk(KERN_WARNING "%s: 91c92 hardware Tx buffer allocation" 1183 netdev_warn(dev, "hardware Tx buffer allocation failed, status %#2.2x\n",
1184 " failed, status %#2.2x.\n", dev->name, packet_no); 1184 packet_no);
1185 dev_kfree_skb_irq(skb); 1185 dev_kfree_skb_irq(skb);
1186 smc->saved_skb = NULL; 1186 smc->saved_skb = NULL;
1187 netif_start_queue(dev); 1187 netif_start_queue(dev);
@@ -1200,8 +1200,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
1200 u_char *buf = skb->data; 1200 u_char *buf = skb->data;
1201 u_int length = skb->len; /* The chip will pad to ethernet min. */ 1201 u_int length = skb->len; /* The chip will pad to ethernet min. */
1202 1202
1203 pr_debug("%s: Trying to xmit packet of length %d.\n", 1203 netdev_dbg(dev, "Trying to xmit packet of length %d\n", length);
1204 dev->name, length);
1205 1204
1206 /* send the packet length: +6 for status word, length, and ctl */ 1205 /* send the packet length: +6 for status word, length, and ctl */
1207 outw(0, ioaddr + DATA_1); 1206 outw(0, ioaddr + DATA_1);
@@ -1233,9 +1232,8 @@ static void smc_tx_timeout(struct net_device *dev)
1233 struct smc_private *smc = netdev_priv(dev); 1232 struct smc_private *smc = netdev_priv(dev);
1234 unsigned int ioaddr = dev->base_addr; 1233 unsigned int ioaddr = dev->base_addr;
1235 1234
1236 printk(KERN_NOTICE "%s: SMC91c92 transmit timed out, " 1235 netdev_notice(dev, "transmit timed out, Tx_status %2.2x status %4.4x.\n",
1237 "Tx_status %2.2x status %4.4x.\n", 1236 inw(ioaddr)&0xff, inw(ioaddr + 2));
1238 dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
1239 dev->stats.tx_errors++; 1237 dev->stats.tx_errors++;
1240 smc_reset(dev); 1238 smc_reset(dev);
1241 dev->trans_start = jiffies; /* prevent tx timeout */ 1239 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1254,14 +1252,14 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
1254 1252
1255 netif_stop_queue(dev); 1253 netif_stop_queue(dev);
1256 1254
1257 pr_debug("%s: smc_start_xmit(length = %d) called," 1255 netdev_dbg(dev, "smc_start_xmit(length = %d) called, status %04x\n",
1258 " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2)); 1256 skb->len, inw(ioaddr + 2));
1259 1257
1260 if (smc->saved_skb) { 1258 if (smc->saved_skb) {
1261 /* THIS SHOULD NEVER HAPPEN. */ 1259 /* THIS SHOULD NEVER HAPPEN. */
1262 dev->stats.tx_aborted_errors++; 1260 dev->stats.tx_aborted_errors++;
1263 printk(KERN_DEBUG "%s: Internal error -- sent packet while busy.\n", 1261 netdev_printk(KERN_DEBUG, dev,
1264 dev->name); 1262 "Internal error -- sent packet while busy\n");
1265 return NETDEV_TX_BUSY; 1263 return NETDEV_TX_BUSY;
1266 } 1264 }
1267 smc->saved_skb = skb; 1265 smc->saved_skb = skb;
@@ -1269,7 +1267,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
1269 num_pages = skb->len >> 8; 1267 num_pages = skb->len >> 8;
1270 1268
1271 if (num_pages > 7) { 1269 if (num_pages > 7) {
1272 printk(KERN_ERR "%s: Far too big packet error.\n", dev->name); 1270 netdev_err(dev, "Far too big packet error: %d pages\n", num_pages);
1273 dev_kfree_skb (skb); 1271 dev_kfree_skb (skb);
1274 smc->saved_skb = NULL; 1272 smc->saved_skb = NULL;
1275 dev->stats.tx_dropped++; 1273 dev->stats.tx_dropped++;
@@ -1339,8 +1337,7 @@ static void smc_tx_err(struct net_device * dev)
1339 } 1337 }
1340 1338
1341 if (tx_status & TS_SUCCESS) { 1339 if (tx_status & TS_SUCCESS) {
1342 printk(KERN_NOTICE "%s: Successful packet caused error " 1340 netdev_notice(dev, "Successful packet caused error interrupt?\n");
1343 "interrupt?\n", dev->name);
1344 } 1341 }
1345 /* re-enable transmit */ 1342 /* re-enable transmit */
1346 SMC_SELECT_BANK(0); 1343 SMC_SELECT_BANK(0);
@@ -1530,8 +1527,7 @@ static void smc_rx(struct net_device *dev)
1530 /* Assertion: we are in Window 2. */ 1527 /* Assertion: we are in Window 2. */
1531 1528
1532 if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) { 1529 if (inw(ioaddr + FIFO_PORTS) & FP_RXEMPTY) {
1533 printk(KERN_ERR "%s: smc_rx() with nothing on Rx FIFO.\n", 1530 netdev_err(dev, "smc_rx() with nothing on Rx FIFO\n");
1534 dev->name);
1535 return; 1531 return;
1536 } 1532 }
1537 1533
@@ -1646,8 +1642,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map)
1646 else if (map->port > 2) 1642 else if (map->port > 2)
1647 return -EINVAL; 1643 return -EINVAL;
1648 dev->if_port = map->port; 1644 dev->if_port = map->port;
1649 printk(KERN_INFO "%s: switched to %s port\n", 1645 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
1650 dev->name, if_names[dev->if_port]);
1651 smc_reset(dev); 1646 smc_reset(dev);
1652 } 1647 }
1653 return 0; 1648 return 0;
@@ -1798,7 +1793,7 @@ static void media_check(u_long arg)
1798 this, we can limp along even if the interrupt is blocked */ 1793 this, we can limp along even if the interrupt is blocked */
1799 if (smc->watchdog++ && ((i>>8) & i)) { 1794 if (smc->watchdog++ && ((i>>8) & i)) {
1800 if (!smc->fast_poll) 1795 if (!smc->fast_poll)
1801 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 1796 netdev_info(dev, "interrupt(s) dropped!\n");
1802 local_irq_save(flags); 1797 local_irq_save(flags);
1803 smc_interrupt(dev->irq, dev); 1798 smc_interrupt(dev->irq, dev);
1804 local_irq_restore(flags); 1799 local_irq_restore(flags);
@@ -1822,7 +1817,7 @@ static void media_check(u_long arg)
1822 SMC_SELECT_BANK(3); 1817 SMC_SELECT_BANK(3);
1823 link = mdio_read(dev, smc->mii_if.phy_id, 1); 1818 link = mdio_read(dev, smc->mii_if.phy_id, 1);
1824 if (!link || (link == 0xffff)) { 1819 if (!link || (link == 0xffff)) {
1825 printk(KERN_INFO "%s: MII is missing!\n", dev->name); 1820 netdev_info(dev, "MII is missing!\n");
1826 smc->mii_if.phy_id = -1; 1821 smc->mii_if.phy_id = -1;
1827 goto reschedule; 1822 goto reschedule;
1828 } 1823 }
@@ -1830,15 +1825,13 @@ static void media_check(u_long arg)
1830 link &= 0x0004; 1825 link &= 0x0004;
1831 if (link != smc->link_status) { 1826 if (link != smc->link_status) {
1832 u_short p = mdio_read(dev, smc->mii_if.phy_id, 5); 1827 u_short p = mdio_read(dev, smc->mii_if.phy_id, 5);
1833 printk(KERN_INFO "%s: %s link beat\n", dev->name, 1828 netdev_info(dev, "%s link beat\n", link ? "found" : "lost");
1834 (link) ? "found" : "lost");
1835 smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40)) 1829 smc->duplex = (((p & 0x0100) || ((p & 0x1c0) == 0x40))
1836 ? TCR_FDUPLX : 0); 1830 ? TCR_FDUPLX : 0);
1837 if (link) { 1831 if (link) {
1838 printk(KERN_INFO "%s: autonegotiation complete: " 1832 netdev_info(dev, "autonegotiation complete: "
1839 "%sbaseT-%cD selected\n", dev->name, 1833 "%dbaseT-%cD selected\n",
1840 ((p & 0x0180) ? "100" : "10"), 1834 (p & 0x0180) ? 100 : 10, smc->duplex ? 'F' : 'H');
1841 (smc->duplex ? 'F' : 'H'));
1842 } 1835 }
1843 SMC_SELECT_BANK(0); 1836 SMC_SELECT_BANK(0);
1844 outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR); 1837 outw(inw(ioaddr + TCR) | smc->duplex, ioaddr + TCR);
@@ -1857,25 +1850,23 @@ static void media_check(u_long arg)
1857 if (media != smc->media_status) { 1850 if (media != smc->media_status) {
1858 if ((media & smc->media_status & 1) && 1851 if ((media & smc->media_status & 1) &&
1859 ((smc->media_status ^ media) & EPH_LINK_OK)) 1852 ((smc->media_status ^ media) & EPH_LINK_OK))
1860 printk(KERN_INFO "%s: %s link beat\n", dev->name, 1853 netdev_info(dev, "%s link beat\n",
1861 (smc->media_status & EPH_LINK_OK ? "lost" : "found")); 1854 smc->media_status & EPH_LINK_OK ? "lost" : "found");
1862 else if ((media & smc->media_status & 2) && 1855 else if ((media & smc->media_status & 2) &&
1863 ((smc->media_status ^ media) & EPH_16COL)) 1856 ((smc->media_status ^ media) & EPH_16COL))
1864 printk(KERN_INFO "%s: coax cable %s\n", dev->name, 1857 netdev_info(dev, "coax cable %s\n",
1865 (media & EPH_16COL ? "problem" : "ok")); 1858 media & EPH_16COL ? "problem" : "ok");
1866 if (dev->if_port == 0) { 1859 if (dev->if_port == 0) {
1867 if (media & 1) { 1860 if (media & 1) {
1868 if (media & EPH_LINK_OK) 1861 if (media & EPH_LINK_OK)
1869 printk(KERN_INFO "%s: flipped to 10baseT\n", 1862 netdev_info(dev, "flipped to 10baseT\n");
1870 dev->name);
1871 else 1863 else
1872 smc_set_xcvr(dev, 2); 1864 smc_set_xcvr(dev, 2);
1873 } else { 1865 } else {
1874 if (media & EPH_16COL) 1866 if (media & EPH_16COL)
1875 smc_set_xcvr(dev, 1); 1867 smc_set_xcvr(dev, 1);
1876 else 1868 else
1877 printk(KERN_INFO "%s: flipped to 10base2\n", 1869 netdev_info(dev, "flipped to 10base2\n");
1878 dev->name);
1879 } 1870 }
1880 } 1871 }
1881 smc->media_status = media; 1872 smc->media_status = media;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index f5819526b5ee..d858b5e4c4a7 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -63,6 +63,8 @@
63 * OF THE POSSIBILITY OF SUCH DAMAGE. 63 * OF THE POSSIBILITY OF SUCH DAMAGE.
64 */ 64 */
65 65
66#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67
66#include <linux/module.h> 68#include <linux/module.h>
67#include <linux/kernel.h> 69#include <linux/kernel.h>
68#include <linux/init.h> 70#include <linux/init.h>
@@ -210,13 +212,6 @@ enum xirc_cmd { /* Commands */
210 212
211static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; 213static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" };
212 214
213
214#define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: "
215#define KERR_XIRC KERN_ERR "xirc2ps_cs: "
216#define KWRN_XIRC KERN_WARNING "xirc2ps_cs: "
217#define KNOT_XIRC KERN_NOTICE "xirc2ps_cs: "
218#define KINF_XIRC KERN_INFO "xirc2ps_cs: "
219
220/* card types */ 215/* card types */
221#define XIR_UNKNOWN 0 /* unknown: not supported */ 216#define XIR_UNKNOWN 0 /* unknown: not supported */
222#define XIR_CE 1 /* (prodid 1) different hardware: not supported */ 217#define XIR_CE 1 /* (prodid 1) different hardware: not supported */
@@ -350,26 +345,26 @@ PrintRegisters(struct net_device *dev)
350 if (pc_debug > 1) { 345 if (pc_debug > 1) {
351 int i, page; 346 int i, page;
352 347
353 printk(KDBG_XIRC "Register common: "); 348 printk(KERN_DEBUG pr_fmt("Register common: "));
354 for (i = 0; i < 8; i++) 349 for (i = 0; i < 8; i++)
355 printk(" %2.2x", GetByte(i)); 350 pr_cont(" %2.2x", GetByte(i));
356 printk("\n"); 351 pr_cont("\n");
357 for (page = 0; page <= 8; page++) { 352 for (page = 0; page <= 8; page++) {
358 printk(KDBG_XIRC "Register page %2x: ", page); 353 printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
359 SelectPage(page); 354 SelectPage(page);
360 for (i = 8; i < 16; i++) 355 for (i = 8; i < 16; i++)
361 printk(" %2.2x", GetByte(i)); 356 pr_cont(" %2.2x", GetByte(i));
362 printk("\n"); 357 pr_cont("\n");
363 } 358 }
364 for (page=0x40 ; page <= 0x5f; page++) { 359 for (page=0x40 ; page <= 0x5f; page++) {
365 if (page == 0x43 || (page >= 0x46 && page <= 0x4f) || 360 if (page == 0x43 || (page >= 0x46 && page <= 0x4f) ||
366 (page >= 0x51 && page <=0x5e)) 361 (page >= 0x51 && page <=0x5e))
367 continue; 362 continue;
368 printk(KDBG_XIRC "Register page %2x: ", page); 363 printk(KERN_DEBUG pr_fmt("Register page %2x: "), page);
369 SelectPage(page); 364 SelectPage(page);
370 for (i = 8; i < 16; i++) 365 for (i = 8; i < 16; i++)
371 printk(" %2.2x", GetByte(i)); 366 pr_cont(" %2.2x", GetByte(i));
372 printk("\n"); 367 pr_cont("\n");
373 } 368 }
374 } 369 }
375} 370}
@@ -608,11 +603,11 @@ set_card_type(struct pcmcia_device *link)
608 local->modem = 0; 603 local->modem = 0;
609 local->card_type = XIR_UNKNOWN; 604 local->card_type = XIR_UNKNOWN;
610 if (!(prodid & 0x40)) { 605 if (!(prodid & 0x40)) {
611 printk(KNOT_XIRC "Ooops: Not a creditcard\n"); 606 pr_notice("Oops: Not a creditcard\n");
612 return 0; 607 return 0;
613 } 608 }
614 if (!(mediaid & 0x01)) { 609 if (!(mediaid & 0x01)) {
615 printk(KNOT_XIRC "Not an Ethernet card\n"); 610 pr_notice("Not an Ethernet card\n");
616 return 0; 611 return 0;
617 } 612 }
618 if (mediaid & 0x10) { 613 if (mediaid & 0x10) {
@@ -643,12 +638,11 @@ set_card_type(struct pcmcia_device *link)
643 } 638 }
644 } 639 }
645 if (local->card_type == XIR_CE || local->card_type == XIR_CEM) { 640 if (local->card_type == XIR_CE || local->card_type == XIR_CEM) {
646 printk(KNOT_XIRC "Sorry, this is an old CE card\n"); 641 pr_notice("Sorry, this is an old CE card\n");
647 return 0; 642 return 0;
648 } 643 }
649 if (local->card_type == XIR_UNKNOWN) 644 if (local->card_type == XIR_UNKNOWN)
650 printk(KNOT_XIRC "unknown card (mediaid=%02x prodid=%02x)\n", 645 pr_notice("unknown card (mediaid=%02x prodid=%02x)\n", mediaid, prodid);
651 mediaid, prodid);
652 646
653 return 1; 647 return 1;
654} 648}
@@ -748,7 +742,7 @@ xirc2ps_config(struct pcmcia_device * link)
748 742
749 /* Is this a valid card */ 743 /* Is this a valid card */
750 if (link->has_manf_id == 0) { 744 if (link->has_manf_id == 0) {
751 printk(KNOT_XIRC "manfid not found in CIS\n"); 745 pr_notice("manfid not found in CIS\n");
752 goto failure; 746 goto failure;
753 } 747 }
754 748
@@ -770,14 +764,14 @@ xirc2ps_config(struct pcmcia_device * link)
770 local->manf_str = "Toshiba"; 764 local->manf_str = "Toshiba";
771 break; 765 break;
772 default: 766 default:
773 printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n", 767 pr_notice("Unknown Card Manufacturer ID: 0x%04x\n",
774 (unsigned)link->manf_id); 768 (unsigned)link->manf_id);
775 goto failure; 769 goto failure;
776 } 770 }
777 dev_dbg(&link->dev, "found %s card\n", local->manf_str); 771 dev_dbg(&link->dev, "found %s card\n", local->manf_str);
778 772
779 if (!set_card_type(link)) { 773 if (!set_card_type(link)) {
780 printk(KNOT_XIRC "this card is not supported\n"); 774 pr_notice("this card is not supported\n");
781 goto failure; 775 goto failure;
782 } 776 }
783 777
@@ -803,7 +797,7 @@ xirc2ps_config(struct pcmcia_device * link)
803 err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev); 797 err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev);
804 798
805 if (err) { 799 if (err) {
806 printk(KNOT_XIRC "node-id not found in CIS\n"); 800 pr_notice("node-id not found in CIS\n");
807 goto failure; 801 goto failure;
808 } 802 }
809 803
@@ -838,7 +832,7 @@ xirc2ps_config(struct pcmcia_device * link)
838 * try to configure as Ethernet only. 832 * try to configure as Ethernet only.
839 * .... */ 833 * .... */
840 } 834 }
841 printk(KNOT_XIRC "no ports available\n"); 835 pr_notice("no ports available\n");
842 } else { 836 } else {
843 link->resource[0]->end = 16; 837 link->resource[0]->end = 16;
844 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { 838 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
@@ -911,24 +905,24 @@ xirc2ps_config(struct pcmcia_device * link)
911 #if 0 905 #if 0
912 { 906 {
913 u_char tmp; 907 u_char tmp;
914 printk(KERN_INFO "ECOR:"); 908 pr_info("ECOR:");
915 for (i=0; i < 7; i++) { 909 for (i=0; i < 7; i++) {
916 tmp = readb(local->dingo_ccr + i*2); 910 tmp = readb(local->dingo_ccr + i*2);
917 printk(" %02x", tmp); 911 pr_cont(" %02x", tmp);
918 } 912 }
919 printk("\n"); 913 pr_cont("\n");
920 printk(KERN_INFO "DCOR:"); 914 pr_info("DCOR:");
921 for (i=0; i < 4; i++) { 915 for (i=0; i < 4; i++) {
922 tmp = readb(local->dingo_ccr + 0x20 + i*2); 916 tmp = readb(local->dingo_ccr + 0x20 + i*2);
923 printk(" %02x", tmp); 917 pr_cont(" %02x", tmp);
924 } 918 }
925 printk("\n"); 919 pr_cont("\n");
926 printk(KERN_INFO "SCOR:"); 920 pr_info("SCOR:");
927 for (i=0; i < 10; i++) { 921 for (i=0; i < 10; i++) {
928 tmp = readb(local->dingo_ccr + 0x40 + i*2); 922 tmp = readb(local->dingo_ccr + 0x40 + i*2);
929 printk(" %02x", tmp); 923 pr_cont(" %02x", tmp);
930 } 924 }
931 printk("\n"); 925 pr_cont("\n");
932 } 926 }
933 #endif 927 #endif
934 928
@@ -947,7 +941,7 @@ xirc2ps_config(struct pcmcia_device * link)
947 (local->mohawk && if_port==4)) 941 (local->mohawk && if_port==4))
948 dev->if_port = if_port; 942 dev->if_port = if_port;
949 else 943 else
950 printk(KNOT_XIRC "invalid if_port requested\n"); 944 pr_notice("invalid if_port requested\n");
951 945
952 /* we can now register the device with the net subsystem */ 946 /* we can now register the device with the net subsystem */
953 dev->irq = link->irq; 947 dev->irq = link->irq;
@@ -959,14 +953,14 @@ xirc2ps_config(struct pcmcia_device * link)
959 SET_NETDEV_DEV(dev, &link->dev); 953 SET_NETDEV_DEV(dev, &link->dev);
960 954
961 if ((err=register_netdev(dev))) { 955 if ((err=register_netdev(dev))) {
962 printk(KNOT_XIRC "register_netdev() failed\n"); 956 pr_notice("register_netdev() failed\n");
963 goto config_error; 957 goto config_error;
964 } 958 }
965 959
966 /* give some infos about the hardware */ 960 /* give some infos about the hardware */
967 printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n", 961 netdev_info(dev, "%s: port %#3lx, irq %d, hwaddr %pM\n",
968 dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq, 962 local->manf_str, (u_long)dev->base_addr, (int)dev->irq,
969 dev->dev_addr); 963 dev->dev_addr);
970 964
971 return 0; 965 return 0;
972 966
@@ -1098,8 +1092,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
1098 1092
1099 skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */ 1093 skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
1100 if (!skb) { 1094 if (!skb) {
1101 printk(KNOT_XIRC "low memory, packet dropped (size=%u)\n", 1095 pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
1102 pktlen);
1103 dev->stats.rx_dropped++; 1096 dev->stats.rx_dropped++;
1104 } else { /* okay get the packet */ 1097 } else { /* okay get the packet */
1105 skb_reserve(skb, 2); 1098 skb_reserve(skb, 2);
@@ -1268,7 +1261,7 @@ xirc_tx_timeout(struct net_device *dev)
1268{ 1261{
1269 local_info_t *lp = netdev_priv(dev); 1262 local_info_t *lp = netdev_priv(dev);
1270 dev->stats.tx_errors++; 1263 dev->stats.tx_errors++;
1271 printk(KERN_NOTICE "%s: transmit timed out\n", dev->name); 1264 netdev_notice(dev, "transmit timed out\n");
1272 schedule_work(&lp->tx_timeout_task); 1265 schedule_work(&lp->tx_timeout_task);
1273} 1266}
1274 1267
@@ -1435,8 +1428,7 @@ do_config(struct net_device *dev, struct ifmap *map)
1435 local->probe_port = 0; 1428 local->probe_port = 0;
1436 dev->if_port = map->port; 1429 dev->if_port = map->port;
1437 } 1430 }
1438 printk(KERN_INFO "%s: switching to %s port\n", 1431 netdev_info(dev, "switching to %s port\n", if_names[dev->if_port]);
1439 dev->name, if_names[dev->if_port]);
1440 do_reset(dev,1); /* not the fine way :-) */ 1432 do_reset(dev,1); /* not the fine way :-) */
1441 } 1433 }
1442 return 0; 1434 return 0;
@@ -1576,7 +1568,7 @@ do_reset(struct net_device *dev, int full)
1576 { 1568 {
1577 SelectPage(0); 1569 SelectPage(0);
1578 value = GetByte(XIRCREG_ESR); /* read the ESR */ 1570 value = GetByte(XIRCREG_ESR); /* read the ESR */
1579 printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value); 1571 pr_debug("%s: ESR is: %#02x\n", dev->name, value);
1580 } 1572 }
1581 #endif 1573 #endif
1582 1574
@@ -1626,13 +1618,12 @@ do_reset(struct net_device *dev, int full)
1626 1618
1627 if (full && local->mohawk && init_mii(dev)) { 1619 if (full && local->mohawk && init_mii(dev)) {
1628 if (dev->if_port == 4 || local->dingo || local->new_mii) { 1620 if (dev->if_port == 4 || local->dingo || local->new_mii) {
1629 printk(KERN_INFO "%s: MII selected\n", dev->name); 1621 netdev_info(dev, "MII selected\n");
1630 SelectPage(2); 1622 SelectPage(2);
1631 PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08); 1623 PutByte(XIRCREG2_MSR, GetByte(XIRCREG2_MSR) | 0x08);
1632 msleep(20); 1624 msleep(20);
1633 } else { 1625 } else {
1634 printk(KERN_INFO "%s: MII detected; using 10mbs\n", 1626 netdev_info(dev, "MII detected; using 10mbs\n");
1635 dev->name);
1636 SelectPage(0x42); 1627 SelectPage(0x42);
1637 if (dev->if_port == 2) /* enable 10Base2 */ 1628 if (dev->if_port == 2) /* enable 10Base2 */
1638 PutByte(XIRCREG42_SWC1, 0xC0); 1629 PutByte(XIRCREG42_SWC1, 0xC0);
@@ -1677,8 +1668,8 @@ do_reset(struct net_device *dev, int full)
1677 } 1668 }
1678 1669
1679 if (full) 1670 if (full)
1680 printk(KERN_INFO "%s: media %s, silicon revision %d\n", 1671 netdev_info(dev, "media %s, silicon revision %d\n",
1681 dev->name, if_names[dev->if_port], local->silicon); 1672 if_names[dev->if_port], local->silicon);
1682 /* We should switch back to page 0 to avoid a bug in revision 0 1673 /* We should switch back to page 0 to avoid a bug in revision 0
1683 * where regs with offset below 8 can't be read after an access 1674 * where regs with offset below 8 can't be read after an access
1684 * to the MAC registers */ 1675 * to the MAC registers */
@@ -1720,8 +1711,7 @@ init_mii(struct net_device *dev)
1720 control = mii_rd(ioaddr, 0, 0); 1711 control = mii_rd(ioaddr, 0, 0);
1721 1712
1722 if (control & 0x0400) { 1713 if (control & 0x0400) {
1723 printk(KERN_NOTICE "%s can't take PHY out of isolation mode\n", 1714 netdev_notice(dev, "can't take PHY out of isolation mode\n");
1724 dev->name);
1725 local->probe_port = 0; 1715 local->probe_port = 0;
1726 return 0; 1716 return 0;
1727 } 1717 }
@@ -1739,8 +1729,7 @@ init_mii(struct net_device *dev)
1739 } 1729 }
1740 1730
1741 if (!(status & 0x0020)) { 1731 if (!(status & 0x0020)) {
1742 printk(KERN_INFO "%s: autonegotiation failed;" 1732 netdev_info(dev, "autonegotiation failed; using 10mbs\n");
1743 " using 10mbs\n", dev->name);
1744 if (!local->new_mii) { 1733 if (!local->new_mii) {
1745 control = 0x0000; 1734 control = 0x0000;
1746 mii_wr(ioaddr, 0, 0, control, 16); 1735 mii_wr(ioaddr, 0, 0, control, 16);
@@ -1750,8 +1739,7 @@ init_mii(struct net_device *dev)
1750 } 1739 }
1751 } else { 1740 } else {
1752 linkpartner = mii_rd(ioaddr, 0, 5); 1741 linkpartner = mii_rd(ioaddr, 0, 5);
1753 printk(KERN_INFO "%s: MII link partner: %04x\n", 1742 netdev_info(dev, "MII link partner: %04x\n", linkpartner);
1754 dev->name, linkpartner);
1755 if (linkpartner & 0x0080) { 1743 if (linkpartner & 0x0080) {
1756 dev->if_port = 4; 1744 dev->if_port = 4;
1757 } else 1745 } else
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index c200c2821730..aee3bb0358bf 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -376,7 +376,7 @@ static void pcnet32_wio_reset(unsigned long addr)
376static int pcnet32_wio_check(unsigned long addr) 376static int pcnet32_wio_check(unsigned long addr)
377{ 377{
378 outw(88, addr + PCNET32_WIO_RAP); 378 outw(88, addr + PCNET32_WIO_RAP);
379 return (inw(addr + PCNET32_WIO_RAP) == 88); 379 return inw(addr + PCNET32_WIO_RAP) == 88;
380} 380}
381 381
382static struct pcnet32_access pcnet32_wio = { 382static struct pcnet32_access pcnet32_wio = {
@@ -431,7 +431,7 @@ static void pcnet32_dwio_reset(unsigned long addr)
431static int pcnet32_dwio_check(unsigned long addr) 431static int pcnet32_dwio_check(unsigned long addr)
432{ 432{
433 outl(88, addr + PCNET32_DWIO_RAP); 433 outl(88, addr + PCNET32_DWIO_RAP);
434 return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88); 434 return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
435} 435}
436 436
437static struct pcnet32_access pcnet32_dwio = { 437static struct pcnet32_access pcnet32_dwio = {
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index ec0349e84a8a..ca4df7f4cf21 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -995,8 +995,10 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
995static void 995static void
996plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth) 996plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
997{ 997{
998 const struct in_device *in_dev = dev->ip_ptr; 998 const struct in_device *in_dev;
999 999
1000 rcu_read_lock();
1001 in_dev = __in_dev_get_rcu(dev);
1000 if (in_dev) { 1002 if (in_dev) {
1001 /* Any address will do - we take the first */ 1003 /* Any address will do - we take the first */
1002 const struct in_ifaddr *ifa = in_dev->ifa_list; 1004 const struct in_ifaddr *ifa = in_dev->ifa_list;
@@ -1006,6 +1008,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1006 memcpy(eth->h_dest+2, &ifa->ifa_address, 4); 1008 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1007 } 1009 }
1008 } 1010 }
1011 rcu_read_unlock();
1009} 1012}
1010 1013
1011static int 1014static int
@@ -1088,7 +1091,8 @@ plip_open(struct net_device *dev)
1088 when the device address isn't identical to the address of a 1091 when the device address isn't identical to the address of a
1089 received frame, the kernel incorrectly drops it). */ 1092 received frame, the kernel incorrectly drops it). */
1090 1093
1091 if ((in_dev=dev->ip_ptr) != NULL) { 1094 in_dev=__in_dev_get_rtnl(dev);
1095 if (in_dev) {
1092 /* Any address will do - we take the first. We already 1096 /* Any address will do - we take the first. We already
1093 have the first two bytes filled with 0xfc, from 1097 have the first two bytes filled with 0xfc, from
1094 plip_init_dev(). */ 1098 plip_init_dev(). */
@@ -1279,7 +1283,6 @@ static void plip_attach (struct parport *port)
1279 if (!nl->pardev) { 1283 if (!nl->pardev) {
1280 printk(KERN_ERR "%s: parport_register failed\n", name); 1284 printk(KERN_ERR "%s: parport_register failed\n", name);
1281 goto err_free_dev; 1285 goto err_free_dev;
1282 return;
1283 } 1286 }
1284 1287
1285 plip_init_netdev(dev); 1288 plip_init_netdev(dev);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index c07de359dc07..d72fb0519a2a 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -1124,7 +1124,7 @@ static const struct proto_ops pppoe_ops = {
1124 .ioctl = pppox_ioctl, 1124 .ioctl = pppox_ioctl,
1125}; 1125};
1126 1126
1127static struct pppox_proto pppoe_proto = { 1127static const struct pppox_proto pppoe_proto = {
1128 .create = pppoe_create, 1128 .create = pppoe_create,
1129 .ioctl = pppoe_ioctl, 1129 .ioctl = pppoe_ioctl,
1130 .owner = THIS_MODULE, 1130 .owner = THIS_MODULE,
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index d4191ef9cad1..8c0d170dabcd 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -36,9 +36,9 @@
36 36
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38 38
39static struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1]; 39static const struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1];
40 40
41int register_pppox_proto(int proto_num, struct pppox_proto *pp) 41int register_pppox_proto(int proto_num, const struct pppox_proto *pp)
42{ 42{
43 if (proto_num < 0 || proto_num > PX_MAX_PROTO) 43 if (proto_num < 0 || proto_num > PX_MAX_PROTO)
44 return -EINVAL; 44 return -EINVAL;
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
new file mode 100644
index 000000000000..ccbc91326bfa
--- /dev/null
+++ b/drivers/net/pptp.c
@@ -0,0 +1,726 @@
1/*
2 * Point-to-Point Tunneling Protocol for Linux
3 *
4 * Authors: Dmitry Kozlov <xeb@mail.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/string.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/errno.h>
18#include <linux/netdevice.h>
19#include <linux/net.h>
20#include <linux/skbuff.h>
21#include <linux/vmalloc.h>
22#include <linux/init.h>
23#include <linux/ppp_channel.h>
24#include <linux/ppp_defs.h>
25#include <linux/if_pppox.h>
26#include <linux/if_ppp.h>
27#include <linux/notifier.h>
28#include <linux/file.h>
29#include <linux/in.h>
30#include <linux/ip.h>
31#include <linux/netfilter.h>
32#include <linux/netfilter_ipv4.h>
33#include <linux/version.h>
34#include <linux/rcupdate.h>
35#include <linux/spinlock.h>
36
37#include <net/sock.h>
38#include <net/protocol.h>
39#include <net/ip.h>
40#include <net/icmp.h>
41#include <net/route.h>
42#include <net/gre.h>
43
44#include <linux/uaccess.h>
45
46#define PPTP_DRIVER_VERSION "0.8.5"
47
48#define MAX_CALLID 65535
49
50static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
51static struct pppox_sock **callid_sock;
52
53static DEFINE_SPINLOCK(chan_lock);
54
55static struct proto pptp_sk_proto __read_mostly;
56static const struct ppp_channel_ops pptp_chan_ops;
57static const struct proto_ops pptp_ops;
58
59#define PPP_LCP_ECHOREQ 0x09
60#define PPP_LCP_ECHOREP 0x0A
61#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
62
63#define MISSING_WINDOW 20
64#define WRAPPED(curseq, lastseq)\
65 ((((curseq) & 0xffffff00) == 0) &&\
66 (((lastseq) & 0xffffff00) == 0xffffff00))
67
68#define PPTP_GRE_PROTO 0x880B
69#define PPTP_GRE_VER 0x1
70
71#define PPTP_GRE_FLAG_C 0x80
72#define PPTP_GRE_FLAG_R 0x40
73#define PPTP_GRE_FLAG_K 0x20
74#define PPTP_GRE_FLAG_S 0x10
75#define PPTP_GRE_FLAG_A 0x80
76
77#define PPTP_GRE_IS_C(f) ((f)&PPTP_GRE_FLAG_C)
78#define PPTP_GRE_IS_R(f) ((f)&PPTP_GRE_FLAG_R)
79#define PPTP_GRE_IS_K(f) ((f)&PPTP_GRE_FLAG_K)
80#define PPTP_GRE_IS_S(f) ((f)&PPTP_GRE_FLAG_S)
81#define PPTP_GRE_IS_A(f) ((f)&PPTP_GRE_FLAG_A)
82
83#define PPTP_HEADER_OVERHEAD (2+sizeof(struct pptp_gre_header))
84struct pptp_gre_header {
85 u8 flags;
86 u8 ver;
87 u16 protocol;
88 u16 payload_len;
89 u16 call_id;
90 u32 seq;
91 u32 ack;
92} __packed;
93
94static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
95{
96 struct pppox_sock *sock;
97 struct pptp_opt *opt;
98
99 rcu_read_lock();
100 sock = rcu_dereference(callid_sock[call_id]);
101 if (sock) {
102 opt = &sock->proto.pptp;
103 if (opt->dst_addr.sin_addr.s_addr != s_addr)
104 sock = NULL;
105 else
106 sock_hold(sk_pppox(sock));
107 }
108 rcu_read_unlock();
109
110 return sock;
111}
112
113static int lookup_chan_dst(u16 call_id, __be32 d_addr)
114{
115 struct pppox_sock *sock;
116 struct pptp_opt *opt;
117 int i;
118
119 rcu_read_lock();
120 for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID;
121 i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) {
122 sock = rcu_dereference(callid_sock[i]);
123 if (!sock)
124 continue;
125 opt = &sock->proto.pptp;
126 if (opt->dst_addr.call_id == call_id &&
127 opt->dst_addr.sin_addr.s_addr == d_addr)
128 break;
129 }
130 rcu_read_unlock();
131
132 return i < MAX_CALLID;
133}
134
135static int add_chan(struct pppox_sock *sock)
136{
137 static int call_id;
138
139 spin_lock(&chan_lock);
140 if (!sock->proto.pptp.src_addr.call_id) {
141 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
142 if (call_id == MAX_CALLID) {
143 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
144 if (call_id == MAX_CALLID)
145 goto out_err;
146 }
147 sock->proto.pptp.src_addr.call_id = call_id;
148 } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
149 goto out_err;
150
151 set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
152 rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
153 spin_unlock(&chan_lock);
154
155 return 0;
156
157out_err:
158 spin_unlock(&chan_lock);
159 return -1;
160}
161
162static void del_chan(struct pppox_sock *sock)
163{
164 spin_lock(&chan_lock);
165 clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
166 rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
167 spin_unlock(&chan_lock);
168 synchronize_rcu();
169}
170
171static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
172{
173 struct sock *sk = (struct sock *) chan->private;
174 struct pppox_sock *po = pppox_sk(sk);
175 struct pptp_opt *opt = &po->proto.pptp;
176 struct pptp_gre_header *hdr;
177 unsigned int header_len = sizeof(*hdr);
178 int err = 0;
179 int islcp;
180 int len;
181 unsigned char *data;
182 __u32 seq_recv;
183
184
185 struct rtable *rt;
186 struct net_device *tdev;
187 struct iphdr *iph;
188 int max_headroom;
189
190 if (sk_pppox(po)->sk_state & PPPOX_DEAD)
191 goto tx_error;
192
193 {
194 struct flowi fl = { .oif = 0,
195 .nl_u = {
196 .ip4_u = {
197 .daddr = opt->dst_addr.sin_addr.s_addr,
198 .saddr = opt->src_addr.sin_addr.s_addr,
199 .tos = RT_TOS(0) } },
200 .proto = IPPROTO_GRE };
201 err = ip_route_output_key(&init_net, &rt, &fl);
202 if (err)
203 goto tx_error;
204 }
205 tdev = rt->dst.dev;
206
207 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2;
208
209 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
210 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
211 if (!new_skb) {
212 ip_rt_put(rt);
213 goto tx_error;
214 }
215 if (skb->sk)
216 skb_set_owner_w(new_skb, skb->sk);
217 kfree_skb(skb);
218 skb = new_skb;
219 }
220
221 data = skb->data;
222 islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
223
224 /* compress protocol field */
225 if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp)
226 skb_pull(skb, 1);
227
228 /* Put in the address/control bytes if necessary */
229 if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) {
230 data = skb_push(skb, 2);
231 data[0] = PPP_ALLSTATIONS;
232 data[1] = PPP_UI;
233 }
234
235 len = skb->len;
236
237 seq_recv = opt->seq_recv;
238
239 if (opt->ack_sent == seq_recv)
240 header_len -= sizeof(hdr->ack);
241
242 /* Push down and install GRE header */
243 skb_push(skb, header_len);
244 hdr = (struct pptp_gre_header *)(skb->data);
245
246 hdr->flags = PPTP_GRE_FLAG_K;
247 hdr->ver = PPTP_GRE_VER;
248 hdr->protocol = htons(PPTP_GRE_PROTO);
249 hdr->call_id = htons(opt->dst_addr.call_id);
250
251 hdr->flags |= PPTP_GRE_FLAG_S;
252 hdr->seq = htonl(++opt->seq_sent);
253 if (opt->ack_sent != seq_recv) {
254 /* send ack with this message */
255 hdr->ver |= PPTP_GRE_FLAG_A;
256 hdr->ack = htonl(seq_recv);
257 opt->ack_sent = seq_recv;
258 }
259 hdr->payload_len = htons(len);
260
261 /* Push down and install the IP header. */
262
263 skb_reset_transport_header(skb);
264 skb_push(skb, sizeof(*iph));
265 skb_reset_network_header(skb);
266 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
267 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
268
269 iph = ip_hdr(skb);
270 iph->version = 4;
271 iph->ihl = sizeof(struct iphdr) >> 2;
272 if (ip_dont_fragment(sk, &rt->dst))
273 iph->frag_off = htons(IP_DF);
274 else
275 iph->frag_off = 0;
276 iph->protocol = IPPROTO_GRE;
277 iph->tos = 0;
278 iph->daddr = rt->rt_dst;
279 iph->saddr = rt->rt_src;
280 iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
281 iph->tot_len = htons(skb->len);
282
283 skb_dst_drop(skb);
284 skb_dst_set(skb, &rt->dst);
285
286 nf_reset(skb);
287
288 skb->ip_summed = CHECKSUM_NONE;
289 ip_select_ident(iph, &rt->dst, NULL);
290 ip_send_check(iph);
291
292 ip_local_out(skb);
293
294tx_error:
295 return 1;
296}
297
298static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
299{
300 struct pppox_sock *po = pppox_sk(sk);
301 struct pptp_opt *opt = &po->proto.pptp;
302 int headersize, payload_len, seq;
303 __u8 *payload;
304 struct pptp_gre_header *header;
305
306 if (!(sk->sk_state & PPPOX_CONNECTED)) {
307 if (sock_queue_rcv_skb(sk, skb))
308 goto drop;
309 return NET_RX_SUCCESS;
310 }
311
312 header = (struct pptp_gre_header *)(skb->data);
313
314 /* test if acknowledgement present */
315 if (PPTP_GRE_IS_A(header->ver)) {
316 __u32 ack = (PPTP_GRE_IS_S(header->flags)) ?
317 header->ack : header->seq; /* ack in different place if S = 0 */
318
319 ack = ntohl(ack);
320
321 if (ack > opt->ack_recv)
322 opt->ack_recv = ack;
323 /* also handle sequence number wrap-around */
324 if (WRAPPED(ack, opt->ack_recv))
325 opt->ack_recv = ack;
326 }
327
328 /* test if payload present */
329 if (!PPTP_GRE_IS_S(header->flags))
330 goto drop;
331
332 headersize = sizeof(*header);
333 payload_len = ntohs(header->payload_len);
334 seq = ntohl(header->seq);
335
336 /* no ack present? */
337 if (!PPTP_GRE_IS_A(header->ver))
338 headersize -= sizeof(header->ack);
339 /* check for incomplete packet (length smaller than expected) */
340 if (skb->len - headersize < payload_len)
341 goto drop;
342
343 payload = skb->data + headersize;
344 /* check for expected sequence number */
345 if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
346 if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
347 (PPP_PROTOCOL(payload) == PPP_LCP) &&
348 ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP)))
349 goto allow_packet;
350 } else {
351 opt->seq_recv = seq;
352allow_packet:
353 skb_pull(skb, headersize);
354
355 if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) {
356 /* chop off address/control */
357 if (skb->len < 3)
358 goto drop;
359 skb_pull(skb, 2);
360 }
361
362 if ((*skb->data) & 1) {
363 /* protocol is compressed */
364 skb_push(skb, 1)[0] = 0;
365 }
366
367 skb->ip_summed = CHECKSUM_NONE;
368 skb_set_network_header(skb, skb->head-skb->data);
369 ppp_input(&po->chan, skb);
370
371 return NET_RX_SUCCESS;
372 }
373drop:
374 kfree_skb(skb);
375 return NET_RX_DROP;
376}
377
378static int pptp_rcv(struct sk_buff *skb)
379{
380 struct pppox_sock *po;
381 struct pptp_gre_header *header;
382 struct iphdr *iph;
383
384 if (skb->pkt_type != PACKET_HOST)
385 goto drop;
386
387 if (!pskb_may_pull(skb, 12))
388 goto drop;
389
390 iph = ip_hdr(skb);
391
392 header = (struct pptp_gre_header *)skb->data;
393
394 if (ntohs(header->protocol) != PPTP_GRE_PROTO || /* PPTP-GRE protocol for PPTP */
395 PPTP_GRE_IS_C(header->flags) || /* flag C should be clear */
396 PPTP_GRE_IS_R(header->flags) || /* flag R should be clear */
397 !PPTP_GRE_IS_K(header->flags) || /* flag K should be set */
398 (header->flags&0xF) != 0) /* routing and recursion ctrl = 0 */
399 /* if invalid, discard this packet */
400 goto drop;
401
402 po = lookup_chan(htons(header->call_id), iph->saddr);
403 if (po) {
404 skb_dst_drop(skb);
405 nf_reset(skb);
406 return sk_receive_skb(sk_pppox(po), skb, 0);
407 }
408drop:
409 kfree_skb(skb);
410 return NET_RX_DROP;
411}
412
413static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
414 int sockaddr_len)
415{
416 struct sock *sk = sock->sk;
417 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
418 struct pppox_sock *po = pppox_sk(sk);
419 struct pptp_opt *opt = &po->proto.pptp;
420 int error = 0;
421
422 lock_sock(sk);
423
424 opt->src_addr = sp->sa_addr.pptp;
425 if (add_chan(po)) {
426 release_sock(sk);
427 error = -EBUSY;
428 }
429
430 release_sock(sk);
431 return error;
432}
433
434static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
435 int sockaddr_len, int flags)
436{
437 struct sock *sk = sock->sk;
438 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
439 struct pppox_sock *po = pppox_sk(sk);
440 struct pptp_opt *opt = &po->proto.pptp;
441 struct rtable *rt;
442 int error = 0;
443
444 if (sp->sa_protocol != PX_PROTO_PPTP)
445 return -EINVAL;
446
447 if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr))
448 return -EALREADY;
449
450 lock_sock(sk);
451 /* Check for already bound sockets */
452 if (sk->sk_state & PPPOX_CONNECTED) {
453 error = -EBUSY;
454 goto end;
455 }
456
457 /* Check for already disconnected sockets, on attempts to disconnect */
458 if (sk->sk_state & PPPOX_DEAD) {
459 error = -EALREADY;
460 goto end;
461 }
462
463 if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) {
464 error = -EINVAL;
465 goto end;
466 }
467
468 po->chan.private = sk;
469 po->chan.ops = &pptp_chan_ops;
470
471 {
472 struct flowi fl = {
473 .nl_u = {
474 .ip4_u = {
475 .daddr = opt->dst_addr.sin_addr.s_addr,
476 .saddr = opt->src_addr.sin_addr.s_addr,
477 .tos = RT_CONN_FLAGS(sk) } },
478 .proto = IPPROTO_GRE };
479 security_sk_classify_flow(sk, &fl);
480 if (ip_route_output_key(&init_net, &rt, &fl)) {
481 error = -EHOSTUNREACH;
482 goto end;
483 }
484 sk_setup_caps(sk, &rt->dst);
485 }
486 po->chan.mtu = dst_mtu(&rt->dst);
487 if (!po->chan.mtu)
488 po->chan.mtu = PPP_MTU;
489 ip_rt_put(rt);
490 po->chan.mtu -= PPTP_HEADER_OVERHEAD;
491
492 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
493 error = ppp_register_channel(&po->chan);
494 if (error) {
495 pr_err("PPTP: failed to register PPP channel (%d)\n", error);
496 goto end;
497 }
498
499 opt->dst_addr = sp->sa_addr.pptp;
500 sk->sk_state = PPPOX_CONNECTED;
501
502 end:
503 release_sock(sk);
504 return error;
505}
506
507static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
508 int *usockaddr_len, int peer)
509{
510 int len = sizeof(struct sockaddr_pppox);
511 struct sockaddr_pppox sp;
512
513 sp.sa_family = AF_PPPOX;
514 sp.sa_protocol = PX_PROTO_PPTP;
515 sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
516
517 memcpy(uaddr, &sp, len);
518
519 *usockaddr_len = len;
520
521 return 0;
522}
523
524static int pptp_release(struct socket *sock)
525{
526 struct sock *sk = sock->sk;
527 struct pppox_sock *po;
528 struct pptp_opt *opt;
529 int error = 0;
530
531 if (!sk)
532 return 0;
533
534 lock_sock(sk);
535
536 if (sock_flag(sk, SOCK_DEAD)) {
537 release_sock(sk);
538 return -EBADF;
539 }
540
541 po = pppox_sk(sk);
542 opt = &po->proto.pptp;
543 del_chan(po);
544
545 pppox_unbind_sock(sk);
546 sk->sk_state = PPPOX_DEAD;
547
548 sock_orphan(sk);
549 sock->sk = NULL;
550
551 release_sock(sk);
552 sock_put(sk);
553
554 return error;
555}
556
557static void pptp_sock_destruct(struct sock *sk)
558{
559 if (!(sk->sk_state & PPPOX_DEAD)) {
560 del_chan(pppox_sk(sk));
561 pppox_unbind_sock(sk);
562 }
563 skb_queue_purge(&sk->sk_receive_queue);
564}
565
566static int pptp_create(struct net *net, struct socket *sock)
567{
568 int error = -ENOMEM;
569 struct sock *sk;
570 struct pppox_sock *po;
571 struct pptp_opt *opt;
572
573 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto);
574 if (!sk)
575 goto out;
576
577 sock_init_data(sock, sk);
578
579 sock->state = SS_UNCONNECTED;
580 sock->ops = &pptp_ops;
581
582 sk->sk_backlog_rcv = pptp_rcv_core;
583 sk->sk_state = PPPOX_NONE;
584 sk->sk_type = SOCK_STREAM;
585 sk->sk_family = PF_PPPOX;
586 sk->sk_protocol = PX_PROTO_PPTP;
587 sk->sk_destruct = pptp_sock_destruct;
588
589 po = pppox_sk(sk);
590 opt = &po->proto.pptp;
591
592 opt->seq_sent = 0; opt->seq_recv = 0;
593 opt->ack_recv = 0; opt->ack_sent = 0;
594
595 error = 0;
596out:
597 return error;
598}
599
600static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
601 unsigned long arg)
602{
603 struct sock *sk = (struct sock *) chan->private;
604 struct pppox_sock *po = pppox_sk(sk);
605 struct pptp_opt *opt = &po->proto.pptp;
606 void __user *argp = (void __user *)arg;
607 int __user *p = argp;
608 int err, val;
609
610 err = -EFAULT;
611 switch (cmd) {
612 case PPPIOCGFLAGS:
613 val = opt->ppp_flags;
614 if (put_user(val, p))
615 break;
616 err = 0;
617 break;
618 case PPPIOCSFLAGS:
619 if (get_user(val, p))
620 break;
621 opt->ppp_flags = val & ~SC_RCV_BITS;
622 err = 0;
623 break;
624 default:
625 err = -ENOTTY;
626 }
627
628 return err;
629}
630
631static const struct ppp_channel_ops pptp_chan_ops = {
632 .start_xmit = pptp_xmit,
633 .ioctl = pptp_ppp_ioctl,
634};
635
636static struct proto pptp_sk_proto __read_mostly = {
637 .name = "PPTP",
638 .owner = THIS_MODULE,
639 .obj_size = sizeof(struct pppox_sock),
640};
641
642static const struct proto_ops pptp_ops = {
643 .family = AF_PPPOX,
644 .owner = THIS_MODULE,
645 .release = pptp_release,
646 .bind = pptp_bind,
647 .connect = pptp_connect,
648 .socketpair = sock_no_socketpair,
649 .accept = sock_no_accept,
650 .getname = pptp_getname,
651 .poll = sock_no_poll,
652 .listen = sock_no_listen,
653 .shutdown = sock_no_shutdown,
654 .setsockopt = sock_no_setsockopt,
655 .getsockopt = sock_no_getsockopt,
656 .sendmsg = sock_no_sendmsg,
657 .recvmsg = sock_no_recvmsg,
658 .mmap = sock_no_mmap,
659 .ioctl = pppox_ioctl,
660};
661
662static const struct pppox_proto pppox_pptp_proto = {
663 .create = pptp_create,
664 .owner = THIS_MODULE,
665};
666
667static const struct gre_protocol gre_pptp_protocol = {
668 .handler = pptp_rcv,
669};
670
671static int __init pptp_init_module(void)
672{
673 int err = 0;
674 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
675
676 callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *),
677 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
678 if (!callid_sock) {
679 pr_err("PPTP: cann't allocate memory\n");
680 return -ENOMEM;
681 }
682
683 err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
684 if (err) {
685 pr_err("PPTP: can't add gre protocol\n");
686 goto out_mem_free;
687 }
688
689 err = proto_register(&pptp_sk_proto, 0);
690 if (err) {
691 pr_err("PPTP: can't register sk_proto\n");
692 goto out_gre_del_protocol;
693 }
694
695 err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto);
696 if (err) {
697 pr_err("PPTP: can't register pppox_proto\n");
698 goto out_unregister_sk_proto;
699 }
700
701 return 0;
702
703out_unregister_sk_proto:
704 proto_unregister(&pptp_sk_proto);
705out_gre_del_protocol:
706 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
707out_mem_free:
708 vfree(callid_sock);
709
710 return err;
711}
712
713static void __exit pptp_exit_module(void)
714{
715 unregister_pppox_proto(PX_PROTO_PPTP);
716 proto_unregister(&pptp_sk_proto);
717 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP);
718 vfree(callid_sock);
719}
720
721module_init(pptp_init_module);
722module_exit(pptp_exit_module);
723
724MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol");
725MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
726MODULE_LICENSE("GPL");
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 87d6b8f36304..5526ab4895e6 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -956,9 +956,9 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
956 (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK))) 956 (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK)))
957 skb->ip_summed = CHECKSUM_UNNECESSARY; 957 skb->ip_summed = CHECKSUM_UNNECESSARY;
958 else 958 else
959 skb->ip_summed = CHECKSUM_NONE; 959 skb_checksum_none_assert(skb);
960 } else 960 } else
961 skb->ip_summed = CHECKSUM_NONE; 961 skb_checksum_none_assert(skb);
962 962
963 /* update netdevice statistics */ 963 /* update netdevice statistics */
964 netdev->stats.rx_packets++; 964 netdev->stats.rx_packets++;
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 43b8d7797f0a..4a624a29393f 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -85,12 +85,12 @@ static const int bitrate_list[] = {
85 */ 85 */
86static inline int wpa2_capable(void) 86static inline int wpa2_capable(void)
87{ 87{
88 return (0 <= ps3_compare_firmware_version(2, 0, 0)); 88 return 0 <= ps3_compare_firmware_version(2, 0, 0);
89} 89}
90 90
91static inline int precise_ie(void) 91static inline int precise_ie(void)
92{ 92{
93 return (0 <= ps3_compare_firmware_version(2, 2, 0)); 93 return 0 <= ps3_compare_firmware_version(2, 2, 0);
94} 94}
95/* 95/*
96 * post_eurus_cmd helpers 96 * post_eurus_cmd helpers
@@ -506,7 +506,7 @@ static size_t gelic_wl_synthesize_ie(u8 *buf,
506 start[1] = (buf - start - 2); 506 start[1] = (buf - start - 2);
507 507
508 pr_debug("%s: ->\n", __func__); 508 pr_debug("%s: ->\n", __func__);
509 return (buf - start); 509 return buf - start;
510} 510}
511 511
512struct ie_item { 512struct ie_item {
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 85eddda276bd..75c2ff99d66d 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -42,8 +42,6 @@
42#include <linux/types.h> 42#include <linux/types.h>
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/system.h> 44#include <asm/system.h>
45#include <linux/delay.h>
46#include <linux/dma-mapping.h>
47#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
48#include <linux/pxa168_eth.h> 46#include <linux/pxa168_eth.h>
49 47
@@ -850,7 +848,6 @@ static int rxq_process(struct net_device *dev, int budget)
850 skb->protocol = eth_type_trans(skb, dev); 848 skb->protocol = eth_type_trans(skb, dev);
851 netif_receive_skb(skb); 849 netif_receive_skb(skb);
852 } 850 }
853 dev->last_rx = jiffies;
854 } 851 }
855 /* Fill RX ring with skb's */ 852 /* Fill RX ring with skb's */
856 rxq_refill(dev); 853 rxq_refill(dev);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 6168a130f33f..7496ed2c34ab 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2029,7 +2029,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2029 dma_unmap_len(lrg_buf_cb2, maplen), 2029 dma_unmap_len(lrg_buf_cb2, maplen),
2030 PCI_DMA_FROMDEVICE); 2030 PCI_DMA_FROMDEVICE);
2031 prefetch(skb->data); 2031 prefetch(skb->data);
2032 skb->ip_summed = CHECKSUM_NONE; 2032 skb_checksum_none_assert(skb);
2033 skb->protocol = eth_type_trans(skb, qdev->ndev); 2033 skb->protocol = eth_type_trans(skb, qdev->ndev);
2034 2034
2035 netif_receive_skb(skb); 2035 netif_receive_skb(skb);
@@ -2076,7 +2076,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2076 PCI_DMA_FROMDEVICE); 2076 PCI_DMA_FROMDEVICE);
2077 prefetch(skb2->data); 2077 prefetch(skb2->data);
2078 2078
2079 skb2->ip_summed = CHECKSUM_NONE; 2079 skb_checksum_none_assert(skb2);
2080 if (qdev->device_id == QL3022_DEVICE_ID) { 2080 if (qdev->device_id == QL3022_DEVICE_ID) {
2081 /* 2081 /*
2082 * Copy the ethhdr from first buffer to second. This 2082 * Copy the ethhdr from first buffer to second. This
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 970389331bbc..714ddf461d73 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,9 +51,11 @@
51 51
52#define _QLCNIC_LINUX_MAJOR 5 52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0 53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 7 54#define _QLCNIC_LINUX_SUBVERSION 10
55#define QLCNIC_LINUX_VERSIONID "5.0.7" 55#define QLCNIC_LINUX_VERSIONID "5.0.10"
56#define QLCNIC_DRV_IDC_VER 0x01 56#define QLCNIC_DRV_IDC_VER 0x01
57#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
58 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
57 59
58#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 60#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
59#define _major(v) (((v) >> 24) & 0xff) 61#define _major(v) (((v) >> 24) & 0xff)
@@ -148,6 +150,7 @@
148 150
149#define DEFAULT_RCV_DESCRIPTORS_1G 2048 151#define DEFAULT_RCV_DESCRIPTORS_1G 2048
150#define DEFAULT_RCV_DESCRIPTORS_10G 4096 152#define DEFAULT_RCV_DESCRIPTORS_10G 4096
153#define MAX_RDS_RINGS 2
151 154
152#define get_next_index(index, length) \ 155#define get_next_index(index, length) \
153 (((index) + 1) & ((length) - 1)) 156 (((index) + 1) & ((length) - 1))
@@ -172,7 +175,7 @@
172 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)) 175 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
173 176
174#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \ 177#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
175 ((_desc)->flags_opcode = \ 178 ((_desc)->flags_opcode |= \
176 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))) 179 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
177 180
178#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \ 181#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
@@ -221,7 +224,8 @@ struct rcv_desc {
221#define QLCNIC_LRO_DESC 0x12 224#define QLCNIC_LRO_DESC 0x12
222 225
223/* for status field in status_desc */ 226/* for status field in status_desc */
224#define STATUS_CKSUM_OK (2) 227#define STATUS_CKSUM_LOOP 0
228#define STATUS_CKSUM_OK 2
225 229
226/* owner bits of status_desc */ 230/* owner bits of status_desc */
227#define STATUS_OWNER_HOST (0x1ULL << 56) 231#define STATUS_OWNER_HOST (0x1ULL << 56)
@@ -555,6 +559,8 @@ struct qlcnic_recv_context {
555#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026 559#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
556#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027 560#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
557#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028 561#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
562#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
563#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
558 564
559#define QLCNIC_RCODE_SUCCESS 0 565#define QLCNIC_RCODE_SUCCESS 0
560#define QLCNIC_RCODE_TIMEOUT 17 566#define QLCNIC_RCODE_TIMEOUT 17
@@ -717,6 +723,8 @@ struct qlcnic_cardrsp_tx_ctx {
717#define QLCNIC_MAC_NOOP 0 723#define QLCNIC_MAC_NOOP 0
718#define QLCNIC_MAC_ADD 1 724#define QLCNIC_MAC_ADD 1
719#define QLCNIC_MAC_DEL 2 725#define QLCNIC_MAC_DEL 2
726#define QLCNIC_MAC_VLAN_ADD 3
727#define QLCNIC_MAC_VLAN_DEL 4
720 728
721struct qlcnic_mac_list_s { 729struct qlcnic_mac_list_s {
722 struct list_head list; 730 struct list_head list;
@@ -893,9 +901,14 @@ struct qlcnic_mac_req {
893#define QLCNIC_MSI_ENABLED 0x02 901#define QLCNIC_MSI_ENABLED 0x02
894#define QLCNIC_MSIX_ENABLED 0x04 902#define QLCNIC_MSIX_ENABLED 0x04
895#define QLCNIC_LRO_ENABLED 0x08 903#define QLCNIC_LRO_ENABLED 0x08
904#define QLCNIC_LRO_DISABLED 0x00
896#define QLCNIC_BRIDGE_ENABLED 0X10 905#define QLCNIC_BRIDGE_ENABLED 0X10
897#define QLCNIC_DIAG_ENABLED 0x20 906#define QLCNIC_DIAG_ENABLED 0x20
898#define QLCNIC_ESWITCH_ENABLED 0x40 907#define QLCNIC_ESWITCH_ENABLED 0x40
908#define QLCNIC_ADAPTER_INITIALIZED 0x80
909#define QLCNIC_TAGGING_ENABLED 0x100
910#define QLCNIC_MACSPOOF 0x200
911#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
899#define QLCNIC_IS_MSI_FAMILY(adapter) \ 912#define QLCNIC_IS_MSI_FAMILY(adapter) \
900 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 913 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
901 914
@@ -916,6 +929,22 @@ struct qlcnic_mac_req {
916#define QLCNIC_INTERRUPT_TEST 1 929#define QLCNIC_INTERRUPT_TEST 1
917#define QLCNIC_LOOPBACK_TEST 2 930#define QLCNIC_LOOPBACK_TEST 2
918 931
932#define QLCNIC_FILTER_AGE 80
933#define QLCNIC_LB_MAX_FILTERS 64
934
935struct qlcnic_filter {
936 struct hlist_node fnode;
937 u8 faddr[ETH_ALEN];
938 u16 vlan_id;
939 unsigned long ftime;
940};
941
942struct qlcnic_filter_hash {
943 struct hlist_head *fhead;
944 u8 fnum;
945 u8 fmax;
946};
947
919struct qlcnic_adapter { 948struct qlcnic_adapter {
920 struct qlcnic_hardware_context ahw; 949 struct qlcnic_hardware_context ahw;
921 950
@@ -924,6 +953,7 @@ struct qlcnic_adapter {
924 struct list_head mac_list; 953 struct list_head mac_list;
925 954
926 spinlock_t tx_clean_lock; 955 spinlock_t tx_clean_lock;
956 spinlock_t mac_learn_lock;
927 957
928 u16 num_txd; 958 u16 num_txd;
929 u16 num_rxd; 959 u16 num_rxd;
@@ -931,7 +961,6 @@ struct qlcnic_adapter {
931 961
932 u8 max_rds_rings; 962 u8 max_rds_rings;
933 u8 max_sds_rings; 963 u8 max_sds_rings;
934 u8 driver_mismatch;
935 u8 msix_supported; 964 u8 msix_supported;
936 u8 rx_csum; 965 u8 rx_csum;
937 u8 portnum; 966 u8 portnum;
@@ -961,6 +990,7 @@ struct qlcnic_adapter {
961 u16 max_tx_ques; 990 u16 max_tx_ques;
962 u16 max_rx_ques; 991 u16 max_rx_ques;
963 u16 max_mtu; 992 u16 max_mtu;
993 u16 pvid;
964 994
965 u32 fw_hal_version; 995 u32 fw_hal_version;
966 u32 capabilities; 996 u32 capabilities;
@@ -969,7 +999,7 @@ struct qlcnic_adapter {
969 u32 temp; 999 u32 temp;
970 1000
971 u32 int_vec_bit; 1001 u32 int_vec_bit;
972 u32 heartbit; 1002 u32 heartbeat;
973 1003
974 u8 max_mac_filters; 1004 u8 max_mac_filters;
975 u8 dev_state; 1005 u8 dev_state;
@@ -983,6 +1013,7 @@ struct qlcnic_adapter {
983 1013
984 u64 dev_rst_time; 1014 u64 dev_rst_time;
985 1015
1016 struct vlan_group *vlgrp;
986 struct qlcnic_npar_info *npars; 1017 struct qlcnic_npar_info *npars;
987 struct qlcnic_eswitch *eswitch; 1018 struct qlcnic_eswitch *eswitch;
988 struct qlcnic_nic_template *nic_ops; 1019 struct qlcnic_nic_template *nic_ops;
@@ -1003,6 +1034,8 @@ struct qlcnic_adapter {
1003 1034
1004 struct qlcnic_nic_intr_coalesce coal; 1035 struct qlcnic_nic_intr_coalesce coal;
1005 1036
1037 struct qlcnic_filter_hash fhash;
1038
1006 unsigned long state; 1039 unsigned long state;
1007 __le32 file_prd_off; /*File fw product offset*/ 1040 __le32 file_prd_off; /*File fw product offset*/
1008 u32 fw_version; 1041 u32 fw_version;
@@ -1042,7 +1075,7 @@ struct qlcnic_pci_info {
1042}; 1075};
1043 1076
1044struct qlcnic_npar_info { 1077struct qlcnic_npar_info {
1045 u16 vlan_id; 1078 u16 pvid;
1046 u16 min_bw; 1079 u16 min_bw;
1047 u16 max_bw; 1080 u16 max_bw;
1048 u8 phy_port; 1081 u8 phy_port;
@@ -1050,11 +1083,13 @@ struct qlcnic_npar_info {
1050 u8 active; 1083 u8 active;
1051 u8 enable_pm; 1084 u8 enable_pm;
1052 u8 dest_npar; 1085 u8 dest_npar;
1053 u8 host_vlan_tag;
1054 u8 promisc_mode;
1055 u8 discard_tagged; 1086 u8 discard_tagged;
1056 u8 mac_learning; 1087 u8 mac_override;
1088 u8 mac_anti_spoof;
1089 u8 promisc_mode;
1090 u8 offload_flags;
1057}; 1091};
1092
1058struct qlcnic_eswitch { 1093struct qlcnic_eswitch {
1059 u8 port; 1094 u8 port;
1060 u8 active_vports; 1095 u8 active_vports;
@@ -1086,7 +1121,6 @@ struct qlcnic_eswitch {
1086#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW) 1121#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
1087#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES) 1122#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
1088#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES) 1123#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
1089#define IS_VALID_MODE(mode) (mode == 0 || mode == 1)
1090 1124
1091struct qlcnic_pci_func_cfg { 1125struct qlcnic_pci_func_cfg {
1092 u16 func_type; 1126 u16 func_type;
@@ -1118,12 +1152,41 @@ struct qlcnic_pm_func_cfg {
1118 1152
1119struct qlcnic_esw_func_cfg { 1153struct qlcnic_esw_func_cfg {
1120 u16 vlan_id; 1154 u16 vlan_id;
1155 u8 op_mode;
1156 u8 op_type;
1121 u8 pci_func; 1157 u8 pci_func;
1122 u8 host_vlan_tag; 1158 u8 host_vlan_tag;
1123 u8 promisc_mode; 1159 u8 promisc_mode;
1124 u8 discard_tagged; 1160 u8 discard_tagged;
1125 u8 mac_learning; 1161 u8 mac_override;
1126 u8 reserved; 1162 u8 mac_anti_spoof;
1163 u8 offload_flags;
1164 u8 reserved[5];
1165};
1166
1167#define QLCNIC_STATS_VERSION 1
1168#define QLCNIC_STATS_PORT 1
1169#define QLCNIC_STATS_ESWITCH 2
1170#define QLCNIC_QUERY_RX_COUNTER 0
1171#define QLCNIC_QUERY_TX_COUNTER 1
1172struct __qlcnic_esw_statistics {
1173 __le16 context_id;
1174 __le16 version;
1175 __le16 size;
1176 __le16 unused;
1177 __le64 unicast_frames;
1178 __le64 multicast_frames;
1179 __le64 broadcast_frames;
1180 __le64 dropped_frames;
1181 __le64 errors;
1182 __le64 local_frames;
1183 __le64 numbytes;
1184 __le64 rsvd[3];
1185};
1186
1187struct qlcnic_esw_statistics {
1188 struct __qlcnic_esw_statistics rx;
1189 struct __qlcnic_esw_statistics tx;
1127}; 1190};
1128 1191
1129int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val); 1192int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
@@ -1171,6 +1234,8 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
1171int qlcnic_get_board_info(struct qlcnic_adapter *adapter); 1234int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
1172int qlcnic_wol_supported(struct qlcnic_adapter *adapter); 1235int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1173int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate); 1236int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
1237void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
1238void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
1174 1239
1175/* Functions from qlcnic_init.c */ 1240/* Functions from qlcnic_init.c */
1176int qlcnic_load_firmware(struct qlcnic_adapter *adapter); 1241int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
@@ -1199,7 +1264,7 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
1199void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter); 1264void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
1200void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter); 1265void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
1201 1266
1202int qlcnic_init_firmware(struct qlcnic_adapter *adapter); 1267int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
1203void qlcnic_watchdog_task(struct work_struct *work); 1268void qlcnic_watchdog_task(struct work_struct *work);
1204void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid, 1269void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1205 struct qlcnic_host_rds_ring *rds_ring); 1270 struct qlcnic_host_rds_ring *rds_ring);
@@ -1220,7 +1285,6 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
1220int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); 1285int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1221void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, 1286void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1222 struct qlcnic_host_tx_ring *tx_ring); 1287 struct qlcnic_host_tx_ring *tx_ring);
1223int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac);
1224void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter); 1288void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
1225int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter); 1289int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
1226void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); 1290void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
@@ -1249,9 +1313,16 @@ int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8,
1249int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8, 1313int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8,
1250 struct qlcnic_eswitch *); 1314 struct qlcnic_eswitch *);
1251int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8); 1315int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8);
1252int qlcnic_config_switch_port(struct qlcnic_adapter *, u8, int, u8, u8, 1316int qlcnic_config_switch_port(struct qlcnic_adapter *,
1253 u8, u8, u16); 1317 struct qlcnic_esw_func_cfg *);
1318int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
1319 struct qlcnic_esw_func_cfg *);
1254int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8); 1320int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
1321int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
1322 struct __qlcnic_esw_statistics *);
1323int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
1324 struct __qlcnic_esw_statistics *);
1325int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
1255extern int qlcnic_config_tso; 1326extern int qlcnic_config_tso;
1256 1327
1257/* 1328/*
@@ -1280,6 +1351,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
1280 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"}, 1351 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
1281 {0x1077, 0x8020, 0x1077, 0x20f, 1352 {0x1077, 0x8020, 0x1077, 0x20f,
1282 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"}, 1353 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1354 {0x1077, 0x8020, 0x103c, 0x3733,
1355 "NC523SFP 10Gb 2-port Flex-10 Server Adapter"},
1283 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"}, 1356 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1284}; 1357};
1285 1358
@@ -1298,7 +1371,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1298extern const struct ethtool_ops qlcnic_ethtool_ops; 1371extern const struct ethtool_ops qlcnic_ethtool_ops;
1299 1372
1300struct qlcnic_nic_template { 1373struct qlcnic_nic_template {
1301 int (*get_mac_addr) (struct qlcnic_adapter *, u8*);
1302 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1374 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1303 int (*config_led) (struct qlcnic_adapter *, u32, u32); 1375 int (*config_led) (struct qlcnic_adapter *, u32, u32);
1304 int (*start_firmware) (struct qlcnic_adapter *); 1376 int (*start_firmware) (struct qlcnic_adapter *);
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index cc5d861d9a12..95a821e0b66f 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -813,9 +813,8 @@ int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port,
813 arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET); 813 arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
814 814
815 eswitch->port = arg1 & 0xf; 815 eswitch->port = arg1 & 0xf;
816 eswitch->active_vports = LSB(arg2); 816 eswitch->max_ucast_filters = LSW(arg2);
817 eswitch->max_ucast_filters = MSB(arg2); 817 eswitch->max_active_vlans = MSW(arg2) & 0xfff;
818 eswitch->max_active_vlans = LSB(MSW(arg2));
819 if (arg1 & BIT_6) 818 if (arg1 & BIT_6)
820 eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING; 819 eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
821 if (arg1 & BIT_7) 820 if (arg1 & BIT_7)
@@ -943,43 +942,271 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
943 return err; 942 return err;
944} 943}
945 944
946/* Configure eSwitch port */ 945int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
947int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, u8 id, 946 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
948 int vlan_tagging, u8 discard_tagged, u8 promsc_mode, 947
949 u8 mac_learn, u8 pci_func, u16 vlan_id) 948 size_t stats_size = sizeof(struct __qlcnic_esw_statistics);
949 struct __qlcnic_esw_statistics *stats;
950 dma_addr_t stats_dma_t;
951 void *stats_addr;
952 u32 arg1;
953 int err;
954
955 if (esw_stats == NULL)
956 return -ENOMEM;
957
958 if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
959 func != adapter->ahw.pci_func) {
960 dev_err(&adapter->pdev->dev,
961 "Not privilege to query stats for func=%d", func);
962 return -EIO;
963 }
964
965 stats_addr = pci_alloc_consistent(adapter->pdev, stats_size,
966 &stats_dma_t);
967 if (!stats_addr) {
968 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
969 return -ENOMEM;
970 }
971 memset(stats_addr, 0, stats_size);
972
973 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
974 arg1 |= rx_tx << 15 | stats_size << 16;
975
976 err = qlcnic_issue_cmd(adapter,
977 adapter->ahw.pci_func,
978 adapter->fw_hal_version,
979 arg1,
980 MSD(stats_dma_t),
981 LSD(stats_dma_t),
982 QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
983
984 if (!err) {
985 stats = (struct __qlcnic_esw_statistics *)stats_addr;
986 esw_stats->context_id = le16_to_cpu(stats->context_id);
987 esw_stats->version = le16_to_cpu(stats->version);
988 esw_stats->size = le16_to_cpu(stats->size);
989 esw_stats->multicast_frames =
990 le64_to_cpu(stats->multicast_frames);
991 esw_stats->broadcast_frames =
992 le64_to_cpu(stats->broadcast_frames);
993 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
994 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
995 esw_stats->local_frames = le64_to_cpu(stats->local_frames);
996 esw_stats->errors = le64_to_cpu(stats->errors);
997 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
998 }
999
1000 pci_free_consistent(adapter->pdev, stats_size, stats_addr,
1001 stats_dma_t);
1002 return err;
1003}
1004
1005int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
1006 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
1007
1008 struct __qlcnic_esw_statistics port_stats;
1009 u8 i;
1010 int ret = -EIO;
1011
1012 if (esw_stats == NULL)
1013 return -ENOMEM;
1014 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
1015 return -EIO;
1016 if (adapter->npars == NULL)
1017 return -EIO;
1018
1019 memset(esw_stats, 0, sizeof(struct __qlcnic_esw_statistics));
1020 esw_stats->context_id = eswitch;
1021
1022 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
1023 if (adapter->npars[i].phy_port != eswitch)
1024 continue;
1025
1026 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
1027 if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats))
1028 continue;
1029
1030 esw_stats->size = port_stats.size;
1031 esw_stats->version = port_stats.version;
1032 esw_stats->unicast_frames += port_stats.unicast_frames;
1033 esw_stats->multicast_frames += port_stats.multicast_frames;
1034 esw_stats->broadcast_frames += port_stats.broadcast_frames;
1035 esw_stats->dropped_frames += port_stats.dropped_frames;
1036 esw_stats->errors += port_stats.errors;
1037 esw_stats->local_frames += port_stats.local_frames;
1038 esw_stats->numbytes += port_stats.numbytes;
1039
1040 ret = 0;
1041 }
1042 return ret;
1043}
1044
1045int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
1046 const u8 port, const u8 rx_tx)
950{ 1047{
951 int err = -EIO; 1048
952 u32 arg1; 1049 u32 arg1;
953 struct qlcnic_eswitch *eswitch;
954 1050
955 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 1051 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
956 return err; 1052 return -EIO;
957 1053
958 eswitch = &adapter->eswitch[id]; 1054 if (func_esw == QLCNIC_STATS_PORT) {
959 if (!(eswitch->flags & QLCNIC_SWITCH_ENABLE)) 1055 if (port >= QLCNIC_MAX_PCI_FUNC)
1056 goto err_ret;
1057 } else if (func_esw == QLCNIC_STATS_ESWITCH) {
1058 if (port >= QLCNIC_NIU_MAX_XG_PORTS)
1059 goto err_ret;
1060 } else {
1061 goto err_ret;
1062 }
1063
1064 if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
1065 goto err_ret;
1066
1067 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
1068 arg1 |= BIT_14 | rx_tx << 15;
1069
1070 return qlcnic_issue_cmd(adapter,
1071 adapter->ahw.pci_func,
1072 adapter->fw_hal_version,
1073 arg1,
1074 0,
1075 0,
1076 QLCNIC_CDRP_CMD_GET_ESWITCH_STATS);
1077
1078err_ret:
1079 dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
1080 "rx_ctx=%d\n", func_esw, port, rx_tx);
1081 return -EIO;
1082}
1083
1084static int
1085__qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1086 u32 *arg1, u32 *arg2)
1087{
1088 int err = -EIO;
1089 u8 pci_func;
1090 pci_func = (*arg1 >> 8);
1091 err = qlcnic_issue_cmd(adapter,
1092 adapter->ahw.pci_func,
1093 adapter->fw_hal_version,
1094 *arg1,
1095 0,
1096 0,
1097 QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG);
1098
1099 if (err == QLCNIC_RCODE_SUCCESS) {
1100 *arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
1101 *arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
1102 dev_info(&adapter->pdev->dev,
1103 "eSwitch port config for pci func %d\n", pci_func);
1104 } else {
1105 dev_err(&adapter->pdev->dev,
1106 "Failed to get eswitch port config for pci func %d\n",
1107 pci_func);
1108 }
1109 return err;
1110}
1111/* Configure eSwitch port
1112op_mode = 0 for setting default port behavior
1113op_mode = 1 for setting vlan id
1114op_mode = 2 for deleting vlan id
1115op_type = 0 for vlan_id
1116op_type = 1 for port vlan_id
1117*/
1118int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1119 struct qlcnic_esw_func_cfg *esw_cfg)
1120{
1121 int err = -EIO;
1122 u32 arg1, arg2 = 0;
1123 u8 pci_func;
1124
1125 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
960 return err; 1126 return err;
1127 pci_func = esw_cfg->pci_func;
1128 arg1 = (adapter->npars[pci_func].phy_port & BIT_0);
1129 arg1 |= (pci_func << 8);
961 1130
962 arg1 = eswitch->port | (discard_tagged ? BIT_4 : 0); 1131 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
963 arg1 |= (promsc_mode ? BIT_6 : 0) | (mac_learn ? BIT_7 : 0); 1132 return err;
964 arg1 |= pci_func << 8; 1133 arg1 &= ~(0x0ff << 8);
965 if (vlan_tagging) 1134 arg1 |= (pci_func << 8);
966 arg1 |= BIT_5 | (vlan_id << 16); 1135 arg1 &= ~(BIT_2 | BIT_3);
1136 switch (esw_cfg->op_mode) {
1137 case QLCNIC_PORT_DEFAULTS:
1138 arg1 |= (BIT_4 | BIT_6 | BIT_7);
1139 arg2 |= (BIT_0 | BIT_1);
1140 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1141 arg2 |= (BIT_2 | BIT_3);
1142 if (!(esw_cfg->discard_tagged))
1143 arg1 &= ~BIT_4;
1144 if (!(esw_cfg->promisc_mode))
1145 arg1 &= ~BIT_6;
1146 if (!(esw_cfg->mac_override))
1147 arg1 &= ~BIT_7;
1148 if (!(esw_cfg->mac_anti_spoof))
1149 arg2 &= ~BIT_0;
1150 if (!(esw_cfg->offload_flags & BIT_0))
1151 arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
1152 if (!(esw_cfg->offload_flags & BIT_1))
1153 arg2 &= ~BIT_2;
1154 if (!(esw_cfg->offload_flags & BIT_2))
1155 arg2 &= ~BIT_3;
1156 break;
1157 case QLCNIC_ADD_VLAN:
1158 arg1 |= (BIT_2 | BIT_5);
1159 arg1 |= (esw_cfg->vlan_id << 16);
1160 break;
1161 case QLCNIC_DEL_VLAN:
1162 arg1 |= (BIT_3 | BIT_5);
1163 arg1 &= ~(0x0ffff << 16);
1164 break;
1165 default:
1166 return err;
1167 }
967 1168
968 err = qlcnic_issue_cmd(adapter, 1169 err = qlcnic_issue_cmd(adapter,
969 adapter->ahw.pci_func, 1170 adapter->ahw.pci_func,
970 adapter->fw_hal_version, 1171 adapter->fw_hal_version,
971 arg1, 1172 arg1,
972 0, 1173 arg2,
973 0, 1174 0,
974 QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH); 1175 QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH);
975 1176
976 if (err != QLCNIC_RCODE_SUCCESS) { 1177 if (err != QLCNIC_RCODE_SUCCESS) {
977 dev_err(&adapter->pdev->dev, 1178 dev_err(&adapter->pdev->dev,
978 "Failed to configure eswitch port%d\n", eswitch->port); 1179 "Failed to configure eswitch pci func %d\n", pci_func);
979 } else { 1180 } else {
980 dev_info(&adapter->pdev->dev, 1181 dev_info(&adapter->pdev->dev,
981 "Configured eSwitch for port %d\n", eswitch->port); 1182 "Configured eSwitch for pci func %d\n", pci_func);
982 } 1183 }
983 1184
984 return err; 1185 return err;
985} 1186}
1187
1188int
1189qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1190 struct qlcnic_esw_func_cfg *esw_cfg)
1191{
1192 u32 arg1, arg2;
1193 u8 phy_port;
1194 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
1195 phy_port = adapter->npars[esw_cfg->pci_func].phy_port;
1196 else
1197 phy_port = adapter->physical_port;
1198 arg1 = phy_port;
1199 arg1 |= (esw_cfg->pci_func << 8);
1200 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1201 return -EIO;
1202
1203 esw_cfg->discard_tagged = !!(arg1 & BIT_4);
1204 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
1205 esw_cfg->promisc_mode = !!(arg1 & BIT_6);
1206 esw_cfg->mac_override = !!(arg1 & BIT_7);
1207 esw_cfg->vlan_id = LSW(arg1 >> 16);
1208 esw_cfg->mac_anti_spoof = (arg2 & 0x1);
1209 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
1210
1211 return 0;
1212}
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 9328d59e21e0..cb9463bd6b1e 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -99,7 +99,7 @@ static const u32 diag_registers[] = {
99 CRB_XG_STATE_P3, 99 CRB_XG_STATE_P3,
100 CRB_FW_CAPABILITIES_1, 100 CRB_FW_CAPABILITIES_1,
101 ISR_INT_STATE_REG, 101 ISR_INT_STATE_REG,
102 QLCNIC_CRB_DEV_REF_COUNT, 102 QLCNIC_CRB_DRV_ACTIVE,
103 QLCNIC_CRB_DEV_STATE, 103 QLCNIC_CRB_DEV_STATE,
104 QLCNIC_CRB_DRV_STATE, 104 QLCNIC_CRB_DRV_STATE,
105 QLCNIC_CRB_DRV_SCRATCH, 105 QLCNIC_CRB_DRV_SCRATCH,
@@ -115,9 +115,13 @@ static const u32 diag_registers[] = {
115 -1 115 -1
116}; 116};
117 117
118#define QLCNIC_MGMT_API_VERSION 2
119#define QLCNIC_DEV_INFO_SIZE 1
120#define QLCNIC_ETHTOOL_REGS_VER 2
118static int qlcnic_get_regs_len(struct net_device *dev) 121static int qlcnic_get_regs_len(struct net_device *dev)
119{ 122{
120 return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN; 123 return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN +
124 QLCNIC_DEV_INFO_SIZE + 1;
121} 125}
122 126
123static int qlcnic_get_eeprom_len(struct net_device *dev) 127static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -342,10 +346,13 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
342 int ring, i = 0; 346 int ring, i = 0;
343 347
344 memset(p, 0, qlcnic_get_regs_len(dev)); 348 memset(p, 0, qlcnic_get_regs_len(dev));
345 regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | 349 regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
346 (adapter->pdev)->device; 350 (adapter->ahw.revision_id << 16) | (adapter->pdev)->device;
347 351
348 for (i = 0; diag_registers[i] != -1; i++) 352 regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
353 regs_buff[1] = QLCNIC_MGMT_API_VERSION;
354
355 for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[i] != -1; i++)
349 regs_buff[i] = QLCRD32(adapter, diag_registers[i]); 356 regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
350 357
351 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) 358 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
@@ -747,6 +754,14 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
747{ 754{
748 memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN); 755 memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
749 756
757 data[0] = qlcnic_reg_test(dev);
758 if (data[0])
759 eth_test->flags |= ETH_TEST_FL_FAILED;
760
761 data[1] = (u64) qlcnic_test_link(dev);
762 if (data[1])
763 eth_test->flags |= ETH_TEST_FL_FAILED;
764
750 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 765 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
751 data[2] = qlcnic_irq_test(dev); 766 data[2] = qlcnic_irq_test(dev);
752 if (data[2]) 767 if (data[2])
@@ -757,15 +772,6 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
757 eth_test->flags |= ETH_TEST_FL_FAILED; 772 eth_test->flags |= ETH_TEST_FL_FAILED;
758 773
759 } 774 }
760
761 data[0] = qlcnic_reg_test(dev);
762 if (data[0])
763 eth_test->flags |= ETH_TEST_FL_FAILED;
764
765 /* link test */
766 data[1] = (u64) qlcnic_test_link(dev);
767 if (data[1])
768 eth_test->flags |= ETH_TEST_FL_FAILED;
769} 775}
770 776
771static void 777static void
@@ -805,6 +811,20 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
805 } 811 }
806} 812}
807 813
814static int qlcnic_set_tx_csum(struct net_device *dev, u32 data)
815{
816 struct qlcnic_adapter *adapter = netdev_priv(dev);
817
818 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
819 return -EOPNOTSUPP;
820 if (data)
821 dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
822 else
823 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
824
825 return 0;
826
827}
808static u32 qlcnic_get_tx_csum(struct net_device *dev) 828static u32 qlcnic_get_tx_csum(struct net_device *dev)
809{ 829{
810 return dev->features & NETIF_F_IP_CSUM; 830 return dev->features & NETIF_F_IP_CSUM;
@@ -819,7 +839,23 @@ static u32 qlcnic_get_rx_csum(struct net_device *dev)
819static int qlcnic_set_rx_csum(struct net_device *dev, u32 data) 839static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
820{ 840{
821 struct qlcnic_adapter *adapter = netdev_priv(dev); 841 struct qlcnic_adapter *adapter = netdev_priv(dev);
842
843 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
844 return -EOPNOTSUPP;
845 if (!!data) {
846 adapter->rx_csum = !!data;
847 return 0;
848 }
849
850 if (adapter->flags & QLCNIC_LRO_ENABLED) {
851 if (qlcnic_config_hw_lro(adapter, QLCNIC_LRO_DISABLED))
852 return -EIO;
853
854 dev->features &= ~NETIF_F_LRO;
855 qlcnic_send_lro_cleanup(adapter);
856 }
822 adapter->rx_csum = !!data; 857 adapter->rx_csum = !!data;
858 dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
823 return 0; 859 return 0;
824} 860}
825 861
@@ -1002,6 +1038,15 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
1002 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)) 1038 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
1003 return -EINVAL; 1039 return -EINVAL;
1004 1040
1041 if (!adapter->rx_csum) {
1042 dev_info(&adapter->pdev->dev, "rx csum is off, "
1043 "cannot toggle lro\n");
1044 return -EINVAL;
1045 }
1046
1047 if ((data & ETH_FLAG_LRO) && (adapter->flags & QLCNIC_LRO_ENABLED))
1048 return 0;
1049
1005 if (data & ETH_FLAG_LRO) { 1050 if (data & ETH_FLAG_LRO) {
1006 hw_lro = QLCNIC_LRO_ENABLED; 1051 hw_lro = QLCNIC_LRO_ENABLED;
1007 netdev->features |= NETIF_F_LRO; 1052 netdev->features |= NETIF_F_LRO;
@@ -1048,7 +1093,7 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
1048 .get_pauseparam = qlcnic_get_pauseparam, 1093 .get_pauseparam = qlcnic_get_pauseparam,
1049 .set_pauseparam = qlcnic_set_pauseparam, 1094 .set_pauseparam = qlcnic_set_pauseparam,
1050 .get_tx_csum = qlcnic_get_tx_csum, 1095 .get_tx_csum = qlcnic_get_tx_csum,
1051 .set_tx_csum = ethtool_op_set_tx_csum, 1096 .set_tx_csum = qlcnic_set_tx_csum,
1052 .set_sg = ethtool_op_set_sg, 1097 .set_sg = ethtool_op_set_sg,
1053 .get_tso = qlcnic_get_tso, 1098 .get_tso = qlcnic_get_tso,
1054 .set_tso = qlcnic_set_tso, 1099 .set_tso = qlcnic_set_tso,
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 15fc32070be3..716203e41dc7 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -698,7 +698,7 @@ enum {
698#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0)) 698#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
699#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8)) 699#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
700#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac)) 700#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
701#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138)) 701#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138))
702#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140)) 702#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
703 703
704#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) 704#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
@@ -718,8 +718,9 @@ enum {
718#define QLCNIC_DEV_FAILED 0x6 718#define QLCNIC_DEV_FAILED 0x6
719#define QLCNIC_DEV_QUISCENT 0x7 719#define QLCNIC_DEV_QUISCENT 0x7
720 720
721#define QLCNIC_DEV_NPAR_NOT_RDY 0 721#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */
722#define QLCNIC_DEV_NPAR_RDY 1 722#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
723#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
723 724
724#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4))) 725#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4)))
725#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) 726#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
@@ -744,6 +745,15 @@ enum {
744#define FW_POLL_DELAY (1 * HZ) 745#define FW_POLL_DELAY (1 * HZ)
745#define FW_FAIL_THRESH 2 746#define FW_FAIL_THRESH 2
746 747
748#define QLCNIC_RESET_TIMEOUT_SECS 10
749#define QLCNIC_INIT_TIMEOUT_SECS 30
750#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000
751#define QLCNIC_RCVPEG_CHECK_DELAY 10
752#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
753#define QLCNIC_CMDPEG_CHECK_DELAY 500
754#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
755#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
756
747#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) 757#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
748#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) 758#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
749 759
@@ -770,6 +780,7 @@ struct qlcnic_legacy_intr_set {
770#define QLCNIC_DRV_OP_MODE 0x1b2170 780#define QLCNIC_DRV_OP_MODE 0x1b2170
771#define QLCNIC_MSIX_BASE 0x132110 781#define QLCNIC_MSIX_BASE 0x132110
772#define QLCNIC_MAX_PCI_FUNC 8 782#define QLCNIC_MAX_PCI_FUNC 8
783#define QLCNIC_MAX_VLAN_FILTERS 64
773 784
774/* PCI function operational mode */ 785/* PCI function operational mode */
775enum { 786enum {
@@ -778,6 +789,12 @@ enum {
778 QLCNIC_NON_PRIV_FUNC = 2 789 QLCNIC_NON_PRIV_FUNC = 2
779}; 790};
780 791
792enum {
793 QLCNIC_PORT_DEFAULTS = 0,
794 QLCNIC_ADD_VLAN = 1,
795 QLCNIC_DEL_VLAN = 2
796};
797
781#define QLC_DEV_DRV_DEFAULT 0x11111111 798#define QLC_DEV_DRV_DEFAULT 0x11111111
782 799
783#define LSB(x) ((uint8_t)(x)) 800#define LSB(x) ((uint8_t)(x))
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e08c8b0556a4..c198df90ff3c 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -297,8 +297,8 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
297 break; 297 break;
298 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { 298 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
299 dev_err(&adapter->pdev->dev, 299 dev_err(&adapter->pdev->dev,
300 "Failed to acquire sem=%d lock;reg_id=%d\n", 300 "Failed to acquire sem=%d lock; holdby=%d\n",
301 sem, id_reg); 301 sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
302 return -EIO; 302 return -EIO;
303 } 303 }
304 msleep(1); 304 msleep(1);
@@ -375,7 +375,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
375 375
376static int 376static int
377qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, 377qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
378 unsigned op) 378 u16 vlan_id, unsigned op)
379{ 379{
380 struct qlcnic_nic_req req; 380 struct qlcnic_nic_req req;
381 struct qlcnic_mac_req *mac_req; 381 struct qlcnic_mac_req *mac_req;
@@ -391,6 +391,8 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
391 mac_req->op = op; 391 mac_req->op = op;
392 memcpy(mac_req->mac_addr, addr, 6); 392 memcpy(mac_req->mac_addr, addr, 6);
393 393
394 req.words[1] = cpu_to_le64(vlan_id);
395
394 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 396 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
395} 397}
396 398
@@ -415,7 +417,7 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
415 memcpy(cur->mac_addr, addr, ETH_ALEN); 417 memcpy(cur->mac_addr, addr, ETH_ALEN);
416 418
417 if (qlcnic_sre_macaddr_change(adapter, 419 if (qlcnic_sre_macaddr_change(adapter,
418 cur->mac_addr, QLCNIC_MAC_ADD)) { 420 cur->mac_addr, 0, QLCNIC_MAC_ADD)) {
419 kfree(cur); 421 kfree(cur);
420 return -EIO; 422 return -EIO;
421 } 423 }
@@ -485,12 +487,63 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
485 while (!list_empty(head)) { 487 while (!list_empty(head)) {
486 cur = list_entry(head->next, struct qlcnic_mac_list_s, list); 488 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
487 qlcnic_sre_macaddr_change(adapter, 489 qlcnic_sre_macaddr_change(adapter,
488 cur->mac_addr, QLCNIC_MAC_DEL); 490 cur->mac_addr, 0, QLCNIC_MAC_DEL);
489 list_del(&cur->list); 491 list_del(&cur->list);
490 kfree(cur); 492 kfree(cur);
491 } 493 }
492} 494}
493 495
496void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
497{
498 struct qlcnic_filter *tmp_fil;
499 struct hlist_node *tmp_hnode, *n;
500 struct hlist_head *head;
501 int i;
502
503 for (i = 0; i < adapter->fhash.fmax; i++) {
504 head = &(adapter->fhash.fhead[i]);
505
506 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
507 {
508 if (jiffies >
509 (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
510 qlcnic_sre_macaddr_change(adapter,
511 tmp_fil->faddr, tmp_fil->vlan_id,
512 tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
513 QLCNIC_MAC_DEL);
514 spin_lock_bh(&adapter->mac_learn_lock);
515 adapter->fhash.fnum--;
516 hlist_del(&tmp_fil->fnode);
517 spin_unlock_bh(&adapter->mac_learn_lock);
518 kfree(tmp_fil);
519 }
520 }
521 }
522}
523
524void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
525{
526 struct qlcnic_filter *tmp_fil;
527 struct hlist_node *tmp_hnode, *n;
528 struct hlist_head *head;
529 int i;
530
531 for (i = 0; i < adapter->fhash.fmax; i++) {
532 head = &(adapter->fhash.fhead[i]);
533
534 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
535 qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
536 tmp_fil->vlan_id, tmp_fil->vlan_id ?
537 QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
538 spin_lock_bh(&adapter->mac_learn_lock);
539 adapter->fhash.fnum--;
540 hlist_del(&tmp_fil->fnode);
541 spin_unlock_bh(&adapter->mac_learn_lock);
542 kfree(tmp_fil);
543 }
544 }
545}
546
494#define QLCNIC_CONFIG_INTR_COALESCE 3 547#define QLCNIC_CONFIG_INTR_COALESCE 3
495 548
496/* 549/*
@@ -715,19 +768,6 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
715 return rc; 768 return rc;
716} 769}
717 770
718int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u8 *mac)
719{
720 u32 crbaddr;
721 int pci_func = adapter->ahw.pci_func;
722
723 crbaddr = CRB_MAC_BLOCK_START +
724 (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
725
726 qlcnic_fetch_mac(adapter, crbaddr, crbaddr+4, pci_func & 1, mac);
727
728 return 0;
729}
730
731/* 771/*
732 * Changes the CRB window to the specified window. 772 * Changes the CRB window to the specified window.
733 */ 773 */
@@ -1245,4 +1285,5 @@ void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
1245 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 1285 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1246 1286
1247 qlcnic_nic_set_promisc(adapter, mode); 1287 qlcnic_nic_set_promisc(adapter, mode);
1288 msleep(1000);
1248} 1289}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 2c7cf0b64811..5c33d15c874a 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -25,6 +25,7 @@
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/if_vlan.h>
28#include "qlcnic.h" 29#include "qlcnic.h"
29 30
30struct crb_addr_pair { 31struct crb_addr_pair {
@@ -45,6 +46,9 @@ static void
45qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, 46qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
46 struct qlcnic_host_rds_ring *rds_ring); 47 struct qlcnic_host_rds_ring *rds_ring);
47 48
49static int
50qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
51
48static void crb_addr_transform_setup(void) 52static void crb_addr_transform_setup(void)
49{ 53{
50 crb_addr_transform(XDMA); 54 crb_addr_transform(XDMA);
@@ -136,8 +140,6 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
136 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 140 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
137 rds_ring = &recv_ctx->rds_rings[ring]; 141 rds_ring = &recv_ctx->rds_rings[ring];
138 142
139 spin_lock(&rds_ring->lock);
140
141 INIT_LIST_HEAD(&rds_ring->free_list); 143 INIT_LIST_HEAD(&rds_ring->free_list);
142 144
143 rx_buf = rds_ring->rx_buf_arr; 145 rx_buf = rds_ring->rx_buf_arr;
@@ -146,8 +148,6 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
146 &rds_ring->free_list); 148 &rds_ring->free_list);
147 rx_buf++; 149 rx_buf++;
148 } 150 }
149
150 spin_unlock(&rds_ring->lock);
151 } 151 }
152} 152}
153 153
@@ -439,11 +439,14 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
439 u32 off; 439 u32 off;
440 struct pci_dev *pdev = adapter->pdev; 440 struct pci_dev *pdev = adapter->pdev;
441 441
442 /* resetall */ 442 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
443 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
444
443 qlcnic_rom_lock(adapter); 445 qlcnic_rom_lock(adapter);
444 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); 446 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
445 qlcnic_rom_unlock(adapter); 447 qlcnic_rom_unlock(adapter);
446 448
449 /* Init HW CRB block */
447 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || 450 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
448 qlcnic_rom_fast_read(adapter, 4, &n) != 0) { 451 qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
449 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n); 452 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
@@ -524,13 +527,10 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
524 } 527 }
525 kfree(buf); 528 kfree(buf);
526 529
527 /* p2dn replyCount */ 530 /* Initialize protocol process engine */
528 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e); 531 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
529 /* disable_peg_cache 0 & 1*/
530 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8); 532 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
531 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8); 533 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
532
533 /* peg_clr_all */
534 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0); 534 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
535 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0); 535 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
536 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0); 536 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
@@ -539,9 +539,87 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
539 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0); 539 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
540 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0); 540 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
541 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); 541 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
542 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
543 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
544 msleep(1);
545 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
546 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
542 return 0; 547 return 0;
543} 548}
544 549
550static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
551{
552 u32 val;
553 int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
554
555 do {
556 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
557
558 switch (val) {
559 case PHAN_INITIALIZE_COMPLETE:
560 case PHAN_INITIALIZE_ACK:
561 return 0;
562 case PHAN_INITIALIZE_FAILED:
563 goto out_err;
564 default:
565 break;
566 }
567
568 msleep(QLCNIC_CMDPEG_CHECK_DELAY);
569
570 } while (--retries);
571
572 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
573
574out_err:
575 dev_err(&adapter->pdev->dev, "Command Peg initialization not "
576 "complete, state: 0x%x.\n", val);
577 return -EIO;
578}
579
580static int
581qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
582{
583 u32 val;
584 int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
585
586 do {
587 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
588
589 if (val == PHAN_PEG_RCV_INITIALIZED)
590 return 0;
591
592 msleep(QLCNIC_RCVPEG_CHECK_DELAY);
593
594 } while (--retries);
595
596 if (!retries) {
597 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
598 "complete, state: 0x%x.\n", val);
599 return -EIO;
600 }
601
602 return 0;
603}
604
605int
606qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
607{
608 int err;
609
610 err = qlcnic_cmd_peg_ready(adapter);
611 if (err)
612 return err;
613
614 err = qlcnic_receive_peg_ready(adapter);
615 if (err)
616 return err;
617
618 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
619
620 return err;
621}
622
545int 623int
546qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { 624qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
547 625
@@ -557,12 +635,12 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
557 } 635 }
558 adapter->physical_port = (val >> 2); 636 adapter->physical_port = (val >> 2);
559 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) 637 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
560 timeo = 30; 638 timeo = QLCNIC_INIT_TIMEOUT_SECS;
561 639
562 adapter->dev_init_timeo = timeo; 640 adapter->dev_init_timeo = timeo;
563 641
564 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo)) 642 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
565 timeo = 10; 643 timeo = QLCNIC_RESET_TIMEOUT_SECS;
566 644
567 adapter->reset_ack_timeo = timeo; 645 adapter->reset_ack_timeo = timeo;
568 646
@@ -906,54 +984,47 @@ qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
906 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); 984 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
907} 985}
908 986
909int 987static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
910qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
911{ 988{
912 u32 count, old_count; 989 if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
913 u32 val, version, major, minor, build; 990 dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
914 int i, timeout;
915
916 if (adapter->need_fw_reset)
917 return 1;
918 991
919 /* last attempt had failed */ 992 qlcnic_pcie_sem_unlock(adapter, 2);
920 if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) 993}
921 return 1;
922 994
923 old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); 995static int
996qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
997{
998 u32 heartbeat, ret = -EIO;
999 int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
924 1000
925 for (i = 0; i < 10; i++) { 1001 adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
926 1002
927 timeout = msleep_interruptible(200); 1003 do {
928 if (timeout) { 1004 msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
929 QLCWR32(adapter, CRB_CMDPEG_STATE, 1005 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
930 PHAN_INITIALIZE_FAILED); 1006 if (heartbeat != adapter->heartbeat) {
931 return -EINTR; 1007 ret = QLCNIC_RCODE_SUCCESS;
1008 break;
932 } 1009 }
1010 } while (--retries);
933 1011
934 count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); 1012 return ret;
935 if (count != old_count) 1013}
936 break;
937 }
938 1014
939 /* firmware is dead */ 1015int
940 if (count == old_count) 1016qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
1017{
1018 if (qlcnic_check_fw_hearbeat(adapter)) {
1019 qlcnic_rom_lock_recovery(adapter);
941 return 1; 1020 return 1;
1021 }
942 1022
943 /* check if we have got newer or different file firmware */ 1023 if (adapter->need_fw_reset)
944 if (adapter->fw) { 1024 return 1;
945
946 val = qlcnic_get_fw_version(adapter);
947
948 version = QLCNIC_DECODE_VERSION(val);
949
950 major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
951 minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
952 build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
953 1025
954 if (version > QLCNIC_VERSION_CODE(major, minor, build)) 1026 if (adapter->fw)
955 return 1; 1027 return 1;
956 }
957 1028
958 return 0; 1029 return 0;
959} 1030}
@@ -1089,18 +1160,6 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1089 return -EINVAL; 1160 return -EINVAL;
1090 } 1161 }
1091 1162
1092 /* check if flashed firmware is newer */
1093 if (qlcnic_rom_fast_read(adapter,
1094 QLCNIC_FW_VERSION_OFFSET, (int *)&val))
1095 return -EIO;
1096
1097 val = QLCNIC_DECODE_VERSION(val);
1098 if (val > ver) {
1099 dev_info(&pdev->dev, "%s: firmware is older than flash\n",
1100 fw_name[fw_type]);
1101 return -EINVAL;
1102 }
1103
1104 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC); 1163 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
1105 return 0; 1164 return 0;
1106} 1165}
@@ -1162,78 +1221,6 @@ qlcnic_release_firmware(struct qlcnic_adapter *adapter)
1162 adapter->fw = NULL; 1221 adapter->fw = NULL;
1163} 1222}
1164 1223
1165static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
1166{
1167 u32 val;
1168 int retries = 60;
1169
1170 do {
1171 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
1172
1173 switch (val) {
1174 case PHAN_INITIALIZE_COMPLETE:
1175 case PHAN_INITIALIZE_ACK:
1176 return 0;
1177 case PHAN_INITIALIZE_FAILED:
1178 goto out_err;
1179 default:
1180 break;
1181 }
1182
1183 msleep(500);
1184
1185 } while (--retries);
1186
1187 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1188
1189out_err:
1190 dev_err(&adapter->pdev->dev, "Command Peg initialization not "
1191 "complete, state: 0x%x.\n", val);
1192 return -EIO;
1193}
1194
1195static int
1196qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
1197{
1198 u32 val;
1199 int retries = 2000;
1200
1201 do {
1202 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
1203
1204 if (val == PHAN_PEG_RCV_INITIALIZED)
1205 return 0;
1206
1207 msleep(10);
1208
1209 } while (--retries);
1210
1211 if (!retries) {
1212 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
1213 "complete, state: 0x%x.\n", val);
1214 return -EIO;
1215 }
1216
1217 return 0;
1218}
1219
1220int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
1221{
1222 int err;
1223
1224 err = qlcnic_cmd_peg_ready(adapter);
1225 if (err)
1226 return err;
1227
1228 err = qlcnic_receive_peg_ready(adapter);
1229 if (err)
1230 return err;
1231
1232 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
1233
1234 return err;
1235}
1236
1237static void 1224static void
1238qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, 1225qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1239 struct qlcnic_fw_msg *msg) 1226 struct qlcnic_fw_msg *msg)
@@ -1351,11 +1338,12 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1351 1338
1352 skb = buffer->skb; 1339 skb = buffer->skb;
1353 1340
1354 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) { 1341 if (likely(adapter->rx_csum && (cksum == STATUS_CKSUM_OK ||
1342 cksum == STATUS_CKSUM_LOOP))) {
1355 adapter->stats.csummed++; 1343 adapter->stats.csummed++;
1356 skb->ip_summed = CHECKSUM_UNNECESSARY; 1344 skb->ip_summed = CHECKSUM_UNNECESSARY;
1357 } else { 1345 } else {
1358 skb->ip_summed = CHECKSUM_NONE; 1346 skb_checksum_none_assert(skb);
1359 } 1347 }
1360 1348
1361 skb->dev = adapter->netdev; 1349 skb->dev = adapter->netdev;
@@ -1365,6 +1353,31 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1365 return skb; 1353 return skb;
1366} 1354}
1367 1355
1356static int
1357qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
1358 u16 *vlan_tag)
1359{
1360 struct ethhdr *eth_hdr;
1361
1362 if (!__vlan_get_tag(skb, vlan_tag)) {
1363 eth_hdr = (struct ethhdr *) skb->data;
1364 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1365 skb_pull(skb, VLAN_HLEN);
1366 }
1367 if (!adapter->pvid)
1368 return 0;
1369
1370 if (*vlan_tag == adapter->pvid) {
1371 /* Outer vlan tag. Packet should follow non-vlan path */
1372 *vlan_tag = 0xffff;
1373 return 0;
1374 }
1375 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
1376 return 0;
1377
1378 return -EINVAL;
1379}
1380
1368static struct qlcnic_rx_buffer * 1381static struct qlcnic_rx_buffer *
1369qlcnic_process_rcv(struct qlcnic_adapter *adapter, 1382qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1370 struct qlcnic_host_sds_ring *sds_ring, 1383 struct qlcnic_host_sds_ring *sds_ring,
@@ -1376,6 +1389,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1376 struct sk_buff *skb; 1389 struct sk_buff *skb;
1377 struct qlcnic_host_rds_ring *rds_ring; 1390 struct qlcnic_host_rds_ring *rds_ring;
1378 int index, length, cksum, pkt_offset; 1391 int index, length, cksum, pkt_offset;
1392 u16 vid = 0xffff;
1379 1393
1380 if (unlikely(ring >= adapter->max_rds_rings)) 1394 if (unlikely(ring >= adapter->max_rds_rings))
1381 return NULL; 1395 return NULL;
@@ -1404,9 +1418,18 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1404 if (pkt_offset) 1418 if (pkt_offset)
1405 skb_pull(skb, pkt_offset); 1419 skb_pull(skb, pkt_offset);
1406 1420
1421 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1422 adapter->stats.rxdropped++;
1423 dev_kfree_skb(skb);
1424 return buffer;
1425 }
1426
1407 skb->protocol = eth_type_trans(skb, netdev); 1427 skb->protocol = eth_type_trans(skb, netdev);
1408 1428
1409 napi_gro_receive(&sds_ring->napi, skb); 1429 if ((vid != 0xffff) && adapter->vlgrp)
1430 vlan_gro_receive(&sds_ring->napi, adapter->vlgrp, vid, skb);
1431 else
1432 napi_gro_receive(&sds_ring->napi, skb);
1410 1433
1411 adapter->stats.rx_pkts++; 1434 adapter->stats.rx_pkts++;
1412 adapter->stats.rxbytes += length; 1435 adapter->stats.rxbytes += length;
@@ -1435,6 +1458,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1435 int index; 1458 int index;
1436 u16 lro_length, length, data_offset; 1459 u16 lro_length, length, data_offset;
1437 u32 seq_number; 1460 u32 seq_number;
1461 u16 vid = 0xffff;
1438 1462
1439 if (unlikely(ring > adapter->max_rds_rings)) 1463 if (unlikely(ring > adapter->max_rds_rings))
1440 return NULL; 1464 return NULL;
@@ -1466,6 +1490,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1466 skb_put(skb, lro_length + data_offset); 1490 skb_put(skb, lro_length + data_offset);
1467 1491
1468 skb_pull(skb, l2_hdr_offset); 1492 skb_pull(skb, l2_hdr_offset);
1493
1494 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1495 adapter->stats.rxdropped++;
1496 dev_kfree_skb(skb);
1497 return buffer;
1498 }
1499
1469 skb->protocol = eth_type_trans(skb, netdev); 1500 skb->protocol = eth_type_trans(skb, netdev);
1470 1501
1471 iph = (struct iphdr *)skb->data; 1502 iph = (struct iphdr *)skb->data;
@@ -1480,7 +1511,10 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1480 1511
1481 length = skb->len; 1512 length = skb->len;
1482 1513
1483 netif_receive_skb(skb); 1514 if ((vid != 0xffff) && adapter->vlgrp)
1515 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vid);
1516 else
1517 netif_receive_skb(skb);
1484 1518
1485 adapter->stats.lro_pkts++; 1519 adapter->stats.lro_pkts++;
1486 adapter->stats.lrobytes += length; 1520 adapter->stats.lrobytes += length;
@@ -1584,8 +1618,6 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1584 int producer, count = 0; 1618 int producer, count = 0;
1585 struct list_head *head; 1619 struct list_head *head;
1586 1620
1587 spin_lock(&rds_ring->lock);
1588
1589 producer = rds_ring->producer; 1621 producer = rds_ring->producer;
1590 1622
1591 head = &rds_ring->free_list; 1623 head = &rds_ring->free_list;
@@ -1615,7 +1647,6 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1615 writel((producer-1) & (rds_ring->num_desc-1), 1647 writel((producer-1) & (rds_ring->num_desc-1),
1616 rds_ring->crb_rcv_producer); 1648 rds_ring->crb_rcv_producer);
1617 } 1649 }
1618 spin_unlock(&rds_ring->lock);
1619} 1650}
1620 1651
1621static void 1652static void
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 66eea5972020..a3d7705a2dda 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -50,6 +50,10 @@ static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
50/* Default to restricted 1G auto-neg mode */ 50/* Default to restricted 1G auto-neg mode */
51static int wol_port_mode = 5; 51static int wol_port_mode = 5;
52 52
53static int qlcnic_mac_learn;
54module_param(qlcnic_mac_learn, int, 0644);
55MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
56
53static int use_msi = 1; 57static int use_msi = 1;
54module_param(use_msi, int, 0644); 58module_param(use_msi, int, 0644);
55MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); 59MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
@@ -94,7 +98,7 @@ static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
94static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); 98static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
95 99
96static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); 100static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
97static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter); 101static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
98static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); 102static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
99 103
100static irqreturn_t qlcnic_tmp_intr(int irq, void *data); 104static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
@@ -103,13 +107,17 @@ static irqreturn_t qlcnic_msi_intr(int irq, void *data);
103static irqreturn_t qlcnic_msix_intr(int irq, void *data); 107static irqreturn_t qlcnic_msix_intr(int irq, void *data);
104 108
105static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); 109static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
106static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long); 110static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
107static int qlcnic_start_firmware(struct qlcnic_adapter *); 111static int qlcnic_start_firmware(struct qlcnic_adapter *);
108 112
113static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
114static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
109static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); 115static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
110static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); 116static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
111static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); 117static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
112static int qlcnicvf_start_firmware(struct qlcnic_adapter *); 118static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
119static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
120 struct qlcnic_esw_func_cfg *);
113/* PCI Device ID Table */ 121/* PCI Device ID Table */
114#define ENTRY(device) \ 122#define ENTRY(device) \
115 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ 123 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -164,7 +172,7 @@ qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
164 172
165 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); 173 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
166 174
167 return (recv_ctx->sds_rings == NULL); 175 return recv_ctx->sds_rings == NULL;
168} 176}
169 177
170static void 178static void
@@ -320,7 +328,7 @@ qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
320 struct net_device *netdev = adapter->netdev; 328 struct net_device *netdev = adapter->netdev;
321 struct pci_dev *pdev = adapter->pdev; 329 struct pci_dev *pdev = adapter->pdev;
322 330
323 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0) 331 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
324 return -EIO; 332 return -EIO;
325 333
326 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); 334 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
@@ -341,6 +349,9 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
341 struct qlcnic_adapter *adapter = netdev_priv(netdev); 349 struct qlcnic_adapter *adapter = netdev_priv(netdev);
342 struct sockaddr *addr = p; 350 struct sockaddr *addr = p;
343 351
352 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
353 return -EOPNOTSUPP;
354
344 if (!is_valid_ether_addr(addr->sa_data)) 355 if (!is_valid_ether_addr(addr->sa_data))
345 return -EINVAL; 356 return -EINVAL;
346 357
@@ -360,6 +371,13 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
360 return 0; 371 return 0;
361} 372}
362 373
374static void qlcnic_vlan_rx_register(struct net_device *netdev,
375 struct vlan_group *grp)
376{
377 struct qlcnic_adapter *adapter = netdev_priv(netdev);
378 adapter->vlgrp = grp;
379}
380
363static const struct net_device_ops qlcnic_netdev_ops = { 381static const struct net_device_ops qlcnic_netdev_ops = {
364 .ndo_open = qlcnic_open, 382 .ndo_open = qlcnic_open,
365 .ndo_stop = qlcnic_close, 383 .ndo_stop = qlcnic_close,
@@ -370,20 +388,19 @@ static const struct net_device_ops qlcnic_netdev_ops = {
370 .ndo_set_mac_address = qlcnic_set_mac, 388 .ndo_set_mac_address = qlcnic_set_mac,
371 .ndo_change_mtu = qlcnic_change_mtu, 389 .ndo_change_mtu = qlcnic_change_mtu,
372 .ndo_tx_timeout = qlcnic_tx_timeout, 390 .ndo_tx_timeout = qlcnic_tx_timeout,
391 .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
373#ifdef CONFIG_NET_POLL_CONTROLLER 392#ifdef CONFIG_NET_POLL_CONTROLLER
374 .ndo_poll_controller = qlcnic_poll_controller, 393 .ndo_poll_controller = qlcnic_poll_controller,
375#endif 394#endif
376}; 395};
377 396
378static struct qlcnic_nic_template qlcnic_ops = { 397static struct qlcnic_nic_template qlcnic_ops = {
379 .get_mac_addr = qlcnic_get_mac_address,
380 .config_bridged_mode = qlcnic_config_bridged_mode, 398 .config_bridged_mode = qlcnic_config_bridged_mode,
381 .config_led = qlcnic_config_led, 399 .config_led = qlcnic_config_led,
382 .start_firmware = qlcnic_start_firmware 400 .start_firmware = qlcnic_start_firmware
383}; 401};
384 402
385static struct qlcnic_nic_template qlcnic_vf_ops = { 403static struct qlcnic_nic_template qlcnic_vf_ops = {
386 .get_mac_addr = qlcnic_get_mac_address,
387 .config_bridged_mode = qlcnicvf_config_bridged_mode, 404 .config_bridged_mode = qlcnicvf_config_bridged_mode,
388 .config_led = qlcnicvf_config_led, 405 .config_led = qlcnicvf_config_led,
389 .start_firmware = qlcnicvf_start_firmware 406 .start_firmware = qlcnicvf_start_firmware
@@ -474,7 +491,7 @@ static int
474qlcnic_init_pci_info(struct qlcnic_adapter *adapter) 491qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
475{ 492{
476 struct qlcnic_pci_info *pci_info; 493 struct qlcnic_pci_info *pci_info;
477 int i, ret = 0, err; 494 int i, ret = 0;
478 u8 pfn; 495 u8 pfn;
479 496
480 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); 497 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
@@ -484,14 +501,14 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
484 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) * 501 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
485 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL); 502 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
486 if (!adapter->npars) { 503 if (!adapter->npars) {
487 err = -ENOMEM; 504 ret = -ENOMEM;
488 goto err_pci_info; 505 goto err_pci_info;
489 } 506 }
490 507
491 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) * 508 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
492 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); 509 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
493 if (!adapter->eswitch) { 510 if (!adapter->eswitch) {
494 err = -ENOMEM; 511 ret = -ENOMEM;
495 goto err_npars; 512 goto err_npars;
496 } 513 }
497 514
@@ -506,7 +523,6 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
506 adapter->npars[pfn].active = pci_info[i].active; 523 adapter->npars[pfn].active = pci_info[i].active;
507 adapter->npars[pfn].type = pci_info[i].type; 524 adapter->npars[pfn].type = pci_info[i].type;
508 adapter->npars[pfn].phy_port = pci_info[i].default_port; 525 adapter->npars[pfn].phy_port = pci_info[i].default_port;
509 adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
510 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw; 526 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
511 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw; 527 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
512 } 528 }
@@ -539,12 +555,10 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
539 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; 555 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
540 556
541 /* If other drivers are not in use set their privilege level */ 557 /* If other drivers are not in use set their privilege level */
542 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 558 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
543 ret = qlcnic_api_lock(adapter); 559 ret = qlcnic_api_lock(adapter);
544 if (ret) 560 if (ret)
545 goto err_lock; 561 goto err_lock;
546 if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
547 goto err_npar;
548 562
549 if (qlcnic_config_npars) { 563 if (qlcnic_config_npars) {
550 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 564 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
@@ -562,18 +576,16 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
562 adapter->ahw.pci_func)); 576 adapter->ahw.pci_func));
563 } 577 }
564 writel(data, priv_op); 578 writel(data, priv_op);
565err_npar:
566 qlcnic_api_unlock(adapter); 579 qlcnic_api_unlock(adapter);
567err_lock: 580err_lock:
568 return ret; 581 return ret;
569} 582}
570 583
571static u32 584static void
572qlcnic_get_driver_mode(struct qlcnic_adapter *adapter) 585qlcnic_check_vf(struct qlcnic_adapter *adapter)
573{ 586{
574 void __iomem *msix_base_addr; 587 void __iomem *msix_base_addr;
575 void __iomem *priv_op; 588 void __iomem *priv_op;
576 struct qlcnic_info nic_info;
577 u32 func; 589 u32 func;
578 u32 msix_base; 590 u32 msix_base;
579 u32 op_mode, priv_level; 591 u32 op_mode, priv_level;
@@ -588,20 +600,6 @@ qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
588 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE; 600 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
589 adapter->ahw.pci_func = func; 601 adapter->ahw.pci_func = func;
590 602
591 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
592 adapter->capabilities = nic_info.capabilities;
593
594 if (adapter->capabilities & BIT_6)
595 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
596 else
597 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
598 }
599
600 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
601 adapter->nic_ops = &qlcnic_ops;
602 return adapter->fw_hal_version;
603 }
604
605 /* Determine function privilege level */ 603 /* Determine function privilege level */
606 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE; 604 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
607 op_mode = readl(priv_op); 605 op_mode = readl(priv_op);
@@ -610,37 +608,14 @@ qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
610 else 608 else
611 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func); 609 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
612 610
613 switch (priv_level) { 611 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
614 case QLCNIC_MGMT_FUNC:
615 adapter->op_mode = QLCNIC_MGMT_FUNC;
616 adapter->nic_ops = &qlcnic_ops;
617 qlcnic_init_pci_info(adapter);
618 /* Set privilege level for other functions */
619 qlcnic_set_function_modes(adapter);
620 dev_info(&adapter->pdev->dev,
621 "HAL Version: %d, Management function\n",
622 adapter->fw_hal_version);
623 break;
624 case QLCNIC_PRIV_FUNC:
625 adapter->op_mode = QLCNIC_PRIV_FUNC;
626 dev_info(&adapter->pdev->dev,
627 "HAL Version: %d, Privileged function\n",
628 adapter->fw_hal_version);
629 adapter->nic_ops = &qlcnic_ops;
630 break;
631 case QLCNIC_NON_PRIV_FUNC:
632 adapter->op_mode = QLCNIC_NON_PRIV_FUNC; 612 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
633 dev_info(&adapter->pdev->dev, 613 dev_info(&adapter->pdev->dev,
634 "HAL Version: %d Non Privileged function\n", 614 "HAL Version: %d Non Privileged function\n",
635 adapter->fw_hal_version); 615 adapter->fw_hal_version);
636 adapter->nic_ops = &qlcnic_vf_ops; 616 adapter->nic_ops = &qlcnic_vf_ops;
637 break; 617 } else
638 default: 618 adapter->nic_ops = &qlcnic_ops;
639 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
640 priv_level);
641 return 0;
642 }
643 return adapter->fw_hal_version;
644} 619}
645 620
646static int 621static int
@@ -673,10 +648,7 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
673 adapter->ahw.pci_base0 = mem_ptr0; 648 adapter->ahw.pci_base0 = mem_ptr0;
674 adapter->ahw.pci_len0 = pci_len0; 649 adapter->ahw.pci_len0 = pci_len0;
675 650
676 if (!qlcnic_get_driver_mode(adapter)) { 651 qlcnic_check_vf(adapter);
677 iounmap(adapter->ahw.pci_base0);
678 return -EIO;
679 }
680 652
681 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter, 653 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
682 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func))); 654 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
@@ -711,25 +683,7 @@ static void
711qlcnic_check_options(struct qlcnic_adapter *adapter) 683qlcnic_check_options(struct qlcnic_adapter *adapter)
712{ 684{
713 u32 fw_major, fw_minor, fw_build; 685 u32 fw_major, fw_minor, fw_build;
714 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
715 char serial_num[32];
716 int i, offset, val;
717 int *ptr32;
718 struct pci_dev *pdev = adapter->pdev; 686 struct pci_dev *pdev = adapter->pdev;
719 struct qlcnic_info nic_info;
720 adapter->driver_mismatch = 0;
721
722 ptr32 = (int *)&serial_num;
723 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
724 for (i = 0; i < 8; i++) {
725 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
726 dev_err(&pdev->dev, "error reading board info\n");
727 adapter->driver_mismatch = 1;
728 return;
729 }
730 ptr32[i] = cpu_to_le32(val);
731 offset += sizeof(u32);
732 }
733 687
734 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR); 688 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
735 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR); 689 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
@@ -737,14 +691,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
737 691
738 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); 692 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
739 693
740 if (adapter->portnum == 0) {
741 get_brd_name(adapter, brd_name);
742
743 pr_info("%s: %s Board Chip rev 0x%x\n",
744 module_name(THIS_MODULE),
745 brd_name, adapter->ahw.revision_id);
746 }
747
748 dev_info(&pdev->dev, "firmware v%d.%d.%d\n", 694 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
749 fw_major, fw_minor, fw_build); 695 fw_major, fw_minor, fw_build);
750 696
@@ -758,109 +704,333 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
758 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G; 704 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
759 } 705 }
760 706
761 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
762 adapter->physical_port = nic_info.phys_port;
763 adapter->switch_mode = nic_info.switch_mode;
764 adapter->max_tx_ques = nic_info.max_tx_ques;
765 adapter->max_rx_ques = nic_info.max_rx_ques;
766 adapter->capabilities = nic_info.capabilities;
767 adapter->max_mac_filters = nic_info.max_mac_filters;
768 adapter->max_mtu = nic_info.max_mtu;
769 }
770
771 adapter->msix_supported = !!use_msi_x; 707 adapter->msix_supported = !!use_msi_x;
772 adapter->rss_supported = !!use_msi_x; 708 adapter->rss_supported = !!use_msi_x;
773 709
774 adapter->num_txd = MAX_CMD_DESCRIPTORS; 710 adapter->num_txd = MAX_CMD_DESCRIPTORS;
775 711
776 adapter->max_rds_rings = 2; 712 adapter->max_rds_rings = MAX_RDS_RINGS;
713}
714
715static int
716qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
717{
718 int err;
719 struct qlcnic_info nic_info;
720
721 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
722 if (err)
723 return err;
724
725 adapter->physical_port = nic_info.phys_port;
726 adapter->switch_mode = nic_info.switch_mode;
727 adapter->max_tx_ques = nic_info.max_tx_ques;
728 adapter->max_rx_ques = nic_info.max_rx_ques;
729 adapter->capabilities = nic_info.capabilities;
730 adapter->max_mac_filters = nic_info.max_mac_filters;
731 adapter->max_mtu = nic_info.max_mtu;
732
733 if (adapter->capabilities & BIT_6)
734 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
735 else
736 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
737
738 return err;
739}
740
741static void
742qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
743 struct qlcnic_esw_func_cfg *esw_cfg)
744{
745 if (esw_cfg->discard_tagged)
746 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
747 else
748 adapter->flags |= QLCNIC_TAGGING_ENABLED;
749
750 if (esw_cfg->vlan_id)
751 adapter->pvid = esw_cfg->vlan_id;
752 else
753 adapter->pvid = 0;
754}
755
756static void
757qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
758 struct qlcnic_esw_func_cfg *esw_cfg)
759{
760 adapter->flags &= ~QLCNIC_MACSPOOF;
761 adapter->flags &= ~QLCNIC_MAC_OVERRIDE_DISABLED;
762
763 if (esw_cfg->mac_anti_spoof)
764 adapter->flags |= QLCNIC_MACSPOOF;
765
766 if (!esw_cfg->mac_override)
767 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
768
769 qlcnic_set_netdev_features(adapter, esw_cfg);
770}
771
772static int
773qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
774{
775 struct qlcnic_esw_func_cfg esw_cfg;
776
777 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
778 return 0;
779
780 esw_cfg.pci_func = adapter->ahw.pci_func;
781 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
782 return -EIO;
783 qlcnic_set_vlan_config(adapter, &esw_cfg);
784 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
785
786 return 0;
787}
788
789static void
790qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
791 struct qlcnic_esw_func_cfg *esw_cfg)
792{
793 struct net_device *netdev = adapter->netdev;
794 unsigned long features, vlan_features;
795
796 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
797 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
798 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
799 NETIF_F_IPV6_CSUM);
800
801 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
802 features |= (NETIF_F_TSO | NETIF_F_TSO6);
803 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
804 }
805 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
806 features |= NETIF_F_LRO;
807
808 if (esw_cfg->offload_flags & BIT_0) {
809 netdev->features |= features;
810 adapter->rx_csum = 1;
811 if (!(esw_cfg->offload_flags & BIT_1))
812 netdev->features &= ~NETIF_F_TSO;
813 if (!(esw_cfg->offload_flags & BIT_2))
814 netdev->features &= ~NETIF_F_TSO6;
815 } else {
816 netdev->features &= ~features;
817 adapter->rx_csum = 0;
818 }
819
820 netdev->vlan_features = (features & vlan_features);
821}
822
823static int
824qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
825{
826 void __iomem *priv_op;
827 u32 op_mode, priv_level;
828 int err = 0;
829
830 err = qlcnic_initialize_nic(adapter);
831 if (err)
832 return err;
833
834 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
835 return 0;
836
837 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
838 op_mode = readl(priv_op);
839 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
840
841 if (op_mode == QLC_DEV_DRV_DEFAULT)
842 priv_level = QLCNIC_MGMT_FUNC;
843 else
844 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
845
846 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
847 if (priv_level == QLCNIC_MGMT_FUNC) {
848 adapter->op_mode = QLCNIC_MGMT_FUNC;
849 err = qlcnic_init_pci_info(adapter);
850 if (err)
851 return err;
852 /* Set privilege level for other functions */
853 qlcnic_set_function_modes(adapter);
854 dev_info(&adapter->pdev->dev,
855 "HAL Version: %d, Management function\n",
856 adapter->fw_hal_version);
857 } else if (priv_level == QLCNIC_PRIV_FUNC) {
858 adapter->op_mode = QLCNIC_PRIV_FUNC;
859 dev_info(&adapter->pdev->dev,
860 "HAL Version: %d, Privileged function\n",
861 adapter->fw_hal_version);
862 }
863 }
864
865 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
866
867 return err;
868}
869
870static int
871qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
872{
873 struct qlcnic_esw_func_cfg esw_cfg;
874 struct qlcnic_npar_info *npar;
875 u8 i;
876
877 if (adapter->need_fw_reset)
878 return 0;
879
880 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
881 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
882 continue;
883 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
884 esw_cfg.pci_func = i;
885 esw_cfg.offload_flags = BIT_0;
886 esw_cfg.mac_override = BIT_0;
887 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
888 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
889 if (qlcnic_config_switch_port(adapter, &esw_cfg))
890 return -EIO;
891 npar = &adapter->npars[i];
892 npar->pvid = esw_cfg.vlan_id;
893 npar->mac_override = esw_cfg.mac_override;
894 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
895 npar->discard_tagged = esw_cfg.discard_tagged;
896 npar->promisc_mode = esw_cfg.promisc_mode;
897 npar->offload_flags = esw_cfg.offload_flags;
898 }
899
900 return 0;
901}
902
903static int
904qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
905 struct qlcnic_npar_info *npar, int pci_func)
906{
907 struct qlcnic_esw_func_cfg esw_cfg;
908 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
909 esw_cfg.pci_func = pci_func;
910 esw_cfg.vlan_id = npar->pvid;
911 esw_cfg.mac_override = npar->mac_override;
912 esw_cfg.discard_tagged = npar->discard_tagged;
913 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
914 esw_cfg.offload_flags = npar->offload_flags;
915 esw_cfg.promisc_mode = npar->promisc_mode;
916 if (qlcnic_config_switch_port(adapter, &esw_cfg))
917 return -EIO;
918
919 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
920 if (qlcnic_config_switch_port(adapter, &esw_cfg))
921 return -EIO;
922
923 return 0;
777} 924}
778 925
779static int 926static int
780qlcnic_reset_npar_config(struct qlcnic_adapter *adapter) 927qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
781{ 928{
782 int i, err = 0; 929 int i, err;
783 struct qlcnic_npar_info *npar; 930 struct qlcnic_npar_info *npar;
784 struct qlcnic_info nic_info; 931 struct qlcnic_info nic_info;
785 932
786 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || 933 if (!adapter->need_fw_reset)
787 !adapter->need_fw_reset)
788 return 0; 934 return 0;
789 935
790 if (adapter->op_mode == QLCNIC_MGMT_FUNC) { 936 /* Set the NPAR config data after FW reset */
791 /* Set the NPAR config data after FW reset */ 937 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
792 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 938 npar = &adapter->npars[i];
793 npar = &adapter->npars[i]; 939 if (npar->type != QLCNIC_TYPE_NIC)
794 if (npar->type != QLCNIC_TYPE_NIC) 940 continue;
795 continue; 941 err = qlcnic_get_nic_info(adapter, &nic_info, i);
796 err = qlcnic_get_nic_info(adapter, &nic_info, i); 942 if (err)
797 if (err) 943 return err;
798 goto err_out; 944 nic_info.min_tx_bw = npar->min_bw;
799 nic_info.min_tx_bw = npar->min_bw; 945 nic_info.max_tx_bw = npar->max_bw;
800 nic_info.max_tx_bw = npar->max_bw; 946 err = qlcnic_set_nic_info(adapter, &nic_info);
801 err = qlcnic_set_nic_info(adapter, &nic_info); 947 if (err)
948 return err;
949
950 if (npar->enable_pm) {
951 err = qlcnic_config_port_mirroring(adapter,
952 npar->dest_npar, 1, i);
802 if (err) 953 if (err)
803 goto err_out; 954 return err;
955 }
956 err = qlcnic_reset_eswitch_config(adapter, npar, i);
957 if (err)
958 return err;
959 }
960 return 0;
961}
804 962
805 if (npar->enable_pm) { 963static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
806 err = qlcnic_config_port_mirroring(adapter, 964{
807 npar->dest_npar, 1, i); 965 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
808 if (err) 966 u32 npar_state;
809 goto err_out;
810 967
811 } 968 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
812 npar->mac_learning = DEFAULT_MAC_LEARN; 969 return 0;
813 npar->host_vlan_tag = 0; 970
814 npar->promisc_mode = 0; 971 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
815 npar->discard_tagged = 0; 972 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
816 npar->vlan_id = 0; 973 msleep(1000);
817 } 974 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
818 } 975 }
819err_out: 976 if (!npar_opt_timeo) {
977 dev_err(&adapter->pdev->dev,
978 "Waiting for NPAR state to opertional timeout\n");
979 return -EIO;
980 }
981 return 0;
982}
983
984static int
985qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
986{
987 int err;
988
989 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
990 adapter->op_mode != QLCNIC_MGMT_FUNC)
991 return 0;
992
993 err = qlcnic_set_default_offload_settings(adapter);
994 if (err)
995 return err;
996
997 err = qlcnic_reset_npar_config(adapter);
998 if (err)
999 return err;
1000
1001 qlcnic_dev_set_npar_ready(adapter);
1002
820 return err; 1003 return err;
821} 1004}
822 1005
823static int 1006static int
824qlcnic_start_firmware(struct qlcnic_adapter *adapter) 1007qlcnic_start_firmware(struct qlcnic_adapter *adapter)
825{ 1008{
826 int val, err, first_boot; 1009 int err;
827 1010
828 err = qlcnic_can_start_firmware(adapter); 1011 err = qlcnic_can_start_firmware(adapter);
829 if (err < 0) 1012 if (err < 0)
830 return err; 1013 return err;
831 else if (!err) 1014 else if (!err)
832 goto wait_init; 1015 goto check_fw_status;
833
834 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
835 if (first_boot == 0x55555555)
836 /* This is the first boot after power up */
837 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
838 1016
839 if (load_fw_file) 1017 if (load_fw_file)
840 qlcnic_request_firmware(adapter); 1018 qlcnic_request_firmware(adapter);
841 else { 1019 else {
842 if (qlcnic_check_flash_fw_ver(adapter)) 1020 err = qlcnic_check_flash_fw_ver(adapter);
1021 if (err)
843 goto err_out; 1022 goto err_out;
844 1023
845 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE; 1024 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
846 } 1025 }
847 1026
848 err = qlcnic_need_fw_reset(adapter); 1027 err = qlcnic_need_fw_reset(adapter);
849 if (err < 0)
850 goto err_out;
851 if (err == 0) 1028 if (err == 0)
852 goto wait_init; 1029 goto check_fw_status;
853
854 if (first_boot != 0x55555555) {
855 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
856 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
857 qlcnic_pinit_from_rom(adapter);
858 msleep(1);
859 }
860
861 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
862 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
863 1030
1031 err = qlcnic_pinit_from_rom(adapter);
1032 if (err)
1033 goto err_out;
864 qlcnic_set_port_mode(adapter); 1034 qlcnic_set_port_mode(adapter);
865 1035
866 err = qlcnic_load_firmware(adapter); 1036 err = qlcnic_load_firmware(adapter);
@@ -868,26 +1038,27 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
868 goto err_out; 1038 goto err_out;
869 1039
870 qlcnic_release_firmware(adapter); 1040 qlcnic_release_firmware(adapter);
1041 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
871 1042
872 val = (_QLCNIC_LINUX_MAJOR << 16) 1043check_fw_status:
873 | ((_QLCNIC_LINUX_MINOR << 8)) 1044 err = qlcnic_check_fw_status(adapter);
874 | (_QLCNIC_LINUX_SUBVERSION);
875 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
876
877wait_init:
878 /* Handshake with the card before we register the devices. */
879 err = qlcnic_init_firmware(adapter);
880 if (err) 1045 if (err)
881 goto err_out; 1046 goto err_out;
882 1047
883 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); 1048 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
884 qlcnic_idc_debug_info(adapter, 1); 1049 qlcnic_idc_debug_info(adapter, 1);
885 1050
886 qlcnic_check_options(adapter); 1051 err = qlcnic_check_eswitch_mode(adapter);
887 if (qlcnic_reset_npar_config(adapter)) 1052 if (err) {
1053 dev_err(&adapter->pdev->dev,
1054 "Memory allocation failed for eswitch\n");
1055 goto err_out;
1056 }
1057 err = qlcnic_set_mgmt_operations(adapter);
1058 if (err)
888 goto err_out; 1059 goto err_out;
889 qlcnic_dev_set_npar_ready(adapter);
890 1060
1061 qlcnic_check_options(adapter);
891 adapter->need_fw_reset = 0; 1062 adapter->need_fw_reset = 0;
892 1063
893 qlcnic_release_firmware(adapter); 1064 qlcnic_release_firmware(adapter);
@@ -896,6 +1067,7 @@ wait_init:
896err_out: 1067err_out:
897 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED); 1068 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
898 dev_err(&adapter->pdev->dev, "Device state set to failed\n"); 1069 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
1070
899 qlcnic_release_firmware(adapter); 1071 qlcnic_release_firmware(adapter);
900 return err; 1072 return err;
901} 1073}
@@ -979,6 +1151,8 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
979 1151
980 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 1152 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
981 return 0; 1153 return 0;
1154 if (qlcnic_set_eswitch_port_config(adapter))
1155 return -EIO;
982 1156
983 if (qlcnic_fw_create_ctx(adapter)) 1157 if (qlcnic_fw_create_ctx(adapter))
984 return -EIO; 1158 return -EIO;
@@ -998,7 +1172,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
998 1172
999 qlcnic_config_intr_coalesce(adapter); 1173 qlcnic_config_intr_coalesce(adapter);
1000 1174
1001 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 1175 if (netdev->features & NETIF_F_LRO)
1002 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED); 1176 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1003 1177
1004 qlcnic_napi_enable(adapter); 1178 qlcnic_napi_enable(adapter);
@@ -1041,6 +1215,9 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1041 1215
1042 qlcnic_free_mac_list(adapter); 1216 qlcnic_free_mac_list(adapter);
1043 1217
1218 if (adapter->fhash.fnum)
1219 qlcnic_delete_lb_filters(adapter);
1220
1044 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE); 1221 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1045 1222
1046 qlcnic_napi_disable(adapter); 1223 qlcnic_napi_disable(adapter);
@@ -1277,7 +1454,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1277 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); 1454 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1278 1455
1279 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | 1456 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1280 NETIF_F_IPV6_CSUM | NETIF_F_GRO); 1457 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
1281 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | 1458 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1282 NETIF_F_IPV6_CSUM); 1459 NETIF_F_IPV6_CSUM);
1283 1460
@@ -1296,12 +1473,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1296 1473
1297 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 1474 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1298 netdev->features |= NETIF_F_LRO; 1475 netdev->features |= NETIF_F_LRO;
1299
1300 netdev->irq = adapter->msix_entries[0].vector; 1476 netdev->irq = adapter->msix_entries[0].vector;
1301 1477
1302 if (qlcnic_read_mac_addr(adapter))
1303 dev_warn(&pdev->dev, "failed to read mac addr\n");
1304
1305 netif_carrier_off(netdev); 1478 netif_carrier_off(netdev);
1306 netif_stop_queue(netdev); 1479 netif_stop_queue(netdev);
1307 1480
@@ -1338,6 +1511,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1338 int err; 1511 int err;
1339 uint8_t revision_id; 1512 uint8_t revision_id;
1340 uint8_t pci_using_dac; 1513 uint8_t pci_using_dac;
1514 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
1341 1515
1342 err = pci_enable_device(pdev); 1516 err = pci_enable_device(pdev);
1343 if (err) 1517 if (err)
@@ -1395,10 +1569,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1395 goto err_out_iounmap; 1569 goto err_out_iounmap;
1396 } 1570 }
1397 1571
1398 if (qlcnic_read_mac_addr(adapter)) 1572 err = qlcnic_setup_idc_param(adapter);
1399 dev_warn(&pdev->dev, "failed to read mac addr\n"); 1573 if (err)
1400
1401 if (qlcnic_setup_idc_param(adapter))
1402 goto err_out_iounmap; 1574 goto err_out_iounmap;
1403 1575
1404 err = adapter->nic_ops->start_firmware(adapter); 1576 err = adapter->nic_ops->start_firmware(adapter);
@@ -1407,6 +1579,17 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1407 goto err_out_decr_ref; 1579 goto err_out_decr_ref;
1408 } 1580 }
1409 1581
1582 if (qlcnic_read_mac_addr(adapter))
1583 dev_warn(&pdev->dev, "failed to read mac addr\n");
1584
1585 if (adapter->portnum == 0) {
1586 get_brd_name(adapter, brd_name);
1587
1588 pr_info("%s: %s Board Chip rev 0x%x\n",
1589 module_name(THIS_MODULE),
1590 brd_name, adapter->ahw.revision_id);
1591 }
1592
1410 qlcnic_clear_stats(adapter); 1593 qlcnic_clear_stats(adapter);
1411 1594
1412 qlcnic_setup_intr(adapter); 1595 qlcnic_setup_intr(adapter);
@@ -1430,6 +1613,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1430 break; 1613 break;
1431 } 1614 }
1432 1615
1616 qlcnic_alloc_lb_filters_mem(adapter);
1433 qlcnic_create_diag_entries(adapter); 1617 qlcnic_create_diag_entries(adapter);
1434 1618
1435 return 0; 1619 return 0;
@@ -1438,7 +1622,7 @@ err_out_disable_msi:
1438 qlcnic_teardown_intr(adapter); 1622 qlcnic_teardown_intr(adapter);
1439 1623
1440err_out_decr_ref: 1624err_out_decr_ref:
1441 qlcnic_clr_all_drv_state(adapter); 1625 qlcnic_clr_all_drv_state(adapter, 0);
1442 1626
1443err_out_iounmap: 1627err_out_iounmap:
1444 qlcnic_cleanup_pci_map(adapter); 1628 qlcnic_cleanup_pci_map(adapter);
@@ -1477,10 +1661,12 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
1477 if (adapter->eswitch != NULL) 1661 if (adapter->eswitch != NULL)
1478 kfree(adapter->eswitch); 1662 kfree(adapter->eswitch);
1479 1663
1480 qlcnic_clr_all_drv_state(adapter); 1664 qlcnic_clr_all_drv_state(adapter, 0);
1481 1665
1482 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1666 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1483 1667
1668 qlcnic_free_lb_filters_mem(adapter);
1669
1484 qlcnic_teardown_intr(adapter); 1670 qlcnic_teardown_intr(adapter);
1485 1671
1486 qlcnic_remove_diag_entries(adapter); 1672 qlcnic_remove_diag_entries(adapter);
@@ -1509,7 +1695,7 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
1509 if (netif_running(netdev)) 1695 if (netif_running(netdev))
1510 qlcnic_down(adapter, netdev); 1696 qlcnic_down(adapter, netdev);
1511 1697
1512 qlcnic_clr_all_drv_state(adapter); 1698 qlcnic_clr_all_drv_state(adapter, 0);
1513 1699
1514 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1700 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1515 1701
@@ -1573,7 +1759,7 @@ qlcnic_resume(struct pci_dev *pdev)
1573 if (err) 1759 if (err)
1574 goto done; 1760 goto done;
1575 1761
1576 qlcnic_config_indev_addr(netdev, NETDEV_UP); 1762 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1577 } 1763 }
1578done: 1764done:
1579 netif_device_attach(netdev); 1765 netif_device_attach(netdev);
@@ -1587,9 +1773,6 @@ static int qlcnic_open(struct net_device *netdev)
1587 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1773 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1588 int err; 1774 int err;
1589 1775
1590 if (adapter->driver_mismatch)
1591 return -EIO;
1592
1593 err = qlcnic_attach(adapter); 1776 err = qlcnic_attach(adapter);
1594 if (err) 1777 if (err)
1595 return err; 1778 return err;
@@ -1619,6 +1802,119 @@ static int qlcnic_close(struct net_device *netdev)
1619} 1802}
1620 1803
1621static void 1804static void
1805qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1806{
1807 void *head;
1808 int i;
1809
1810 if (!qlcnic_mac_learn)
1811 return;
1812
1813 spin_lock_init(&adapter->mac_learn_lock);
1814
1815 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1816 GFP_KERNEL);
1817 if (!head)
1818 return;
1819
1820 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1821 adapter->fhash.fhead = (struct hlist_head *)head;
1822
1823 for (i = 0; i < adapter->fhash.fmax; i++)
1824 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1825}
1826
1827static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1828{
1829 if (adapter->fhash.fmax && adapter->fhash.fhead)
1830 kfree(adapter->fhash.fhead);
1831
1832 adapter->fhash.fhead = NULL;
1833 adapter->fhash.fmax = 0;
1834}
1835
1836static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
1837 u64 uaddr, u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
1838{
1839 struct cmd_desc_type0 *hwdesc;
1840 struct qlcnic_nic_req *req;
1841 struct qlcnic_mac_req *mac_req;
1842 u32 producer;
1843 u64 word;
1844
1845 producer = tx_ring->producer;
1846 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1847
1848 req = (struct qlcnic_nic_req *)hwdesc;
1849 memset(req, 0, sizeof(struct qlcnic_nic_req));
1850 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1851
1852 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1853 req->req_hdr = cpu_to_le64(word);
1854
1855 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
1856 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
1857 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1858
1859 req->words[1] = cpu_to_le64(vlan_id);
1860
1861 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1862}
1863
1864#define QLCNIC_MAC_HASH(MAC)\
1865 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1866
1867static void
1868qlcnic_send_filter(struct qlcnic_adapter *adapter,
1869 struct qlcnic_host_tx_ring *tx_ring,
1870 struct cmd_desc_type0 *first_desc,
1871 struct sk_buff *skb)
1872{
1873 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1874 struct qlcnic_filter *fil, *tmp_fil;
1875 struct hlist_node *tmp_hnode, *n;
1876 struct hlist_head *head;
1877 u64 src_addr = 0;
1878 u16 vlan_id = 0;
1879 u8 hindex;
1880
1881 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1882 return;
1883
1884 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1885 return;
1886
1887 /* Only NPAR capable devices support vlan based learning*/
1888 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1889 vlan_id = first_desc->vlan_TCI;
1890 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1891 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1892 head = &(adapter->fhash.fhead[hindex]);
1893
1894 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
1895 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1896 tmp_fil->vlan_id == vlan_id) {
1897 tmp_fil->ftime = jiffies;
1898 return;
1899 }
1900 }
1901
1902 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1903 if (!fil)
1904 return;
1905
1906 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
1907
1908 fil->ftime = jiffies;
1909 fil->vlan_id = vlan_id;
1910 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1911 spin_lock(&adapter->mac_learn_lock);
1912 hlist_add_head(&(fil->fnode), head);
1913 adapter->fhash.fnum++;
1914 spin_unlock(&adapter->mac_learn_lock);
1915}
1916
1917static void
1622qlcnic_tso_check(struct net_device *netdev, 1918qlcnic_tso_check(struct net_device *netdev,
1623 struct qlcnic_host_tx_ring *tx_ring, 1919 struct qlcnic_host_tx_ring *tx_ring,
1624 struct cmd_desc_type0 *first_desc, 1920 struct cmd_desc_type0 *first_desc,
@@ -1626,26 +1922,13 @@ qlcnic_tso_check(struct net_device *netdev,
1626{ 1922{
1627 u8 opcode = TX_ETHER_PKT; 1923 u8 opcode = TX_ETHER_PKT;
1628 __be16 protocol = skb->protocol; 1924 __be16 protocol = skb->protocol;
1629 u16 flags = 0, vid = 0; 1925 u16 flags = 0;
1630 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0; 1926 int copied, offset, copy_len, hdr_len = 0, tso = 0;
1631 struct cmd_desc_type0 *hwdesc; 1927 struct cmd_desc_type0 *hwdesc;
1632 struct vlan_ethhdr *vh; 1928 struct vlan_ethhdr *vh;
1633 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1929 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1634 u32 producer = tx_ring->producer; 1930 u32 producer = tx_ring->producer;
1635 1931 int vlan_oob = first_desc->flags_opcode & cpu_to_le16(FLAGS_VLAN_OOB);
1636 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1637
1638 vh = (struct vlan_ethhdr *)skb->data;
1639 protocol = vh->h_vlan_encapsulated_proto;
1640 flags = FLAGS_VLAN_TAGGED;
1641
1642 } else if (vlan_tx_tag_present(skb)) {
1643
1644 flags = FLAGS_VLAN_OOB;
1645 vid = vlan_tx_tag_get(skb);
1646 qlcnic_set_tx_vlan_tci(first_desc, vid);
1647 vlan_oob = 1;
1648 }
1649 1932
1650 if (*(skb->data) & BIT_0) { 1933 if (*(skb->data) & BIT_0) {
1651 flags |= BIT_0; 1934 flags |= BIT_0;
@@ -1716,7 +1999,7 @@ qlcnic_tso_check(struct net_device *netdev,
1716 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); 1999 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1717 skb_copy_from_linear_data(skb, vh, 12); 2000 skb_copy_from_linear_data(skb, vh, 12);
1718 vh->h_vlan_proto = htons(ETH_P_8021Q); 2001 vh->h_vlan_proto = htons(ETH_P_8021Q);
1719 vh->h_vlan_TCI = htons(vid); 2002 vh->h_vlan_TCI = htons(first_desc->vlan_TCI);
1720 skb_copy_from_linear_data_offset(skb, 12, 2003 skb_copy_from_linear_data_offset(skb, 12,
1721 (char *)vh + 16, copy_len - 16); 2004 (char *)vh + 16, copy_len - 16);
1722 2005
@@ -1796,11 +2079,47 @@ out_err:
1796 return -ENOMEM; 2079 return -ENOMEM;
1797} 2080}
1798 2081
2082static int
2083qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
2084 struct sk_buff *skb,
2085 struct cmd_desc_type0 *first_desc)
2086{
2087 u8 opcode = 0;
2088 u16 flags = 0;
2089 __be16 protocol = skb->protocol;
2090 struct vlan_ethhdr *vh;
2091
2092 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
2093 vh = (struct vlan_ethhdr *)skb->data;
2094 protocol = vh->h_vlan_encapsulated_proto;
2095 flags = FLAGS_VLAN_TAGGED;
2096 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
2097 } else if (vlan_tx_tag_present(skb)) {
2098 flags = FLAGS_VLAN_OOB;
2099 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
2100 }
2101 if (unlikely(adapter->pvid)) {
2102 if (first_desc->vlan_TCI &&
2103 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2104 return -EIO;
2105 if (first_desc->vlan_TCI &&
2106 (adapter->flags & QLCNIC_TAGGING_ENABLED))
2107 goto set_flags;
2108
2109 flags = FLAGS_VLAN_OOB;
2110 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
2111 }
2112set_flags:
2113 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2114 return 0;
2115}
2116
1799static inline void 2117static inline void
1800qlcnic_clear_cmddesc(u64 *desc) 2118qlcnic_clear_cmddesc(u64 *desc)
1801{ 2119{
1802 desc[0] = 0ULL; 2120 desc[0] = 0ULL;
1803 desc[2] = 0ULL; 2121 desc[2] = 0ULL;
2122 desc[7] = 0ULL;
1804} 2123}
1805 2124
1806netdev_tx_t 2125netdev_tx_t
@@ -1812,6 +2131,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1812 struct qlcnic_skb_frag *buffrag; 2131 struct qlcnic_skb_frag *buffrag;
1813 struct cmd_desc_type0 *hwdesc, *first_desc; 2132 struct cmd_desc_type0 *hwdesc, *first_desc;
1814 struct pci_dev *pdev; 2133 struct pci_dev *pdev;
2134 struct ethhdr *phdr;
1815 int i, k; 2135 int i, k;
1816 2136
1817 u32 producer; 2137 u32 producer;
@@ -1823,6 +2143,13 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1823 return NETDEV_TX_BUSY; 2143 return NETDEV_TX_BUSY;
1824 } 2144 }
1825 2145
2146 if (adapter->flags & QLCNIC_MACSPOOF) {
2147 phdr = (struct ethhdr *)skb->data;
2148 if (compare_ether_addr(phdr->h_source,
2149 adapter->mac_addr))
2150 goto drop_packet;
2151 }
2152
1826 frag_count = skb_shinfo(skb)->nr_frags + 1; 2153 frag_count = skb_shinfo(skb)->nr_frags + 1;
1827 2154
1828 /* 4 fragments per cmd des */ 2155 /* 4 fragments per cmd des */
@@ -1844,6 +2171,12 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1844 2171
1845 pdev = adapter->pdev; 2172 pdev = adapter->pdev;
1846 2173
2174 first_desc = hwdesc = &tx_ring->desc_head[producer];
2175 qlcnic_clear_cmddesc((u64 *)hwdesc);
2176
2177 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
2178 goto drop_packet;
2179
1847 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { 2180 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1848 adapter->stats.tx_dma_map_error++; 2181 adapter->stats.tx_dma_map_error++;
1849 goto drop_packet; 2182 goto drop_packet;
@@ -1852,9 +2185,6 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1852 pbuf->skb = skb; 2185 pbuf->skb = skb;
1853 pbuf->frag_count = frag_count; 2186 pbuf->frag_count = frag_count;
1854 2187
1855 first_desc = hwdesc = &tx_ring->desc_head[producer];
1856 qlcnic_clear_cmddesc((u64 *)hwdesc);
1857
1858 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len); 2188 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1859 qlcnic_set_tx_port(first_desc, adapter->portnum); 2189 qlcnic_set_tx_port(first_desc, adapter->portnum);
1860 2190
@@ -1893,6 +2223,9 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1893 2223
1894 qlcnic_tso_check(netdev, tx_ring, first_desc, skb); 2224 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1895 2225
2226 if (qlcnic_mac_learn)
2227 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2228
1896 qlcnic_update_cmd_producer(adapter, tx_ring); 2229 qlcnic_update_cmd_producer(adapter, tx_ring);
1897 2230
1898 adapter->stats.txbytes += skb->len; 2231 adapter->stats.txbytes += skb->len;
@@ -1947,14 +2280,14 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1947 struct net_device *netdev = adapter->netdev; 2280 struct net_device *netdev = adapter->netdev;
1948 2281
1949 if (adapter->ahw.linkup && !linkup) { 2282 if (adapter->ahw.linkup && !linkup) {
1950 dev_info(&netdev->dev, "NIC Link is down\n"); 2283 netdev_info(netdev, "NIC Link is down\n");
1951 adapter->ahw.linkup = 0; 2284 adapter->ahw.linkup = 0;
1952 if (netif_running(netdev)) { 2285 if (netif_running(netdev)) {
1953 netif_carrier_off(netdev); 2286 netif_carrier_off(netdev);
1954 netif_stop_queue(netdev); 2287 netif_stop_queue(netdev);
1955 } 2288 }
1956 } else if (!adapter->ahw.linkup && linkup) { 2289 } else if (!adapter->ahw.linkup && linkup) {
1957 dev_info(&netdev->dev, "NIC Link is up\n"); 2290 netdev_info(netdev, "NIC Link is up\n");
1958 adapter->ahw.linkup = 1; 2291 adapter->ahw.linkup = 1;
1959 if (netif_running(netdev)) { 2292 if (netif_running(netdev)) {
1960 netif_carrier_on(netdev); 2293 netif_carrier_on(netdev);
@@ -2258,18 +2591,22 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2258} 2591}
2259 2592
2260static void 2593static void
2261qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter) 2594qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
2262{ 2595{
2263 u32 val; 2596 u32 val;
2264 2597
2265 if (qlcnic_api_lock(adapter)) 2598 if (qlcnic_api_lock(adapter))
2266 goto err; 2599 goto err;
2267 2600
2268 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 2601 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2269 QLC_DEV_CLR_REF_CNT(val, adapter->portnum); 2602 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
2270 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val); 2603 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2271 2604
2272 if (!(val & 0x11111111)) 2605 if (failed) {
2606 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2607 dev_info(&adapter->pdev->dev,
2608 "Device state set to Failed. Please Reboot\n");
2609 } else if (!(val & 0x11111111))
2273 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD); 2610 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2274 2611
2275 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 2612 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
@@ -2290,7 +2627,7 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2290 int act, state; 2627 int act, state;
2291 2628
2292 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 2629 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2293 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 2630 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2294 2631
2295 if (((state & 0x11111111) == (act & 0x11111111)) || 2632 if (((state & 0x11111111) == (act & 0x11111111)) ||
2296 ((act & 0x11111111) == ((state >> 1) & 0x11111111))) 2633 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
@@ -2325,10 +2662,10 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2325 if (qlcnic_api_lock(adapter)) 2662 if (qlcnic_api_lock(adapter))
2326 return -1; 2663 return -1;
2327 2664
2328 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 2665 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2329 if (!(val & (1 << (portnum * 4)))) { 2666 if (!(val & (1 << (portnum * 4)))) {
2330 QLC_DEV_SET_REF_CNT(val, portnum); 2667 QLC_DEV_SET_REF_CNT(val, portnum);
2331 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val); 2668 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
2332 } 2669 }
2333 2670
2334 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2671 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
@@ -2403,7 +2740,7 @@ qlcnic_fwinit_work(struct work_struct *work)
2403{ 2740{
2404 struct qlcnic_adapter *adapter = container_of(work, 2741 struct qlcnic_adapter *adapter = container_of(work,
2405 struct qlcnic_adapter, fw_work.work); 2742 struct qlcnic_adapter, fw_work.work);
2406 u32 dev_state = 0xf, npar_state; 2743 u32 dev_state = 0xf;
2407 2744
2408 if (qlcnic_api_lock(adapter)) 2745 if (qlcnic_api_lock(adapter))
2409 goto err_ret; 2746 goto err_ret;
@@ -2417,16 +2754,8 @@ qlcnic_fwinit_work(struct work_struct *work)
2417 } 2754 }
2418 2755
2419 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { 2756 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2420 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); 2757 qlcnic_api_unlock(adapter);
2421 if (npar_state == QLCNIC_DEV_NPAR_RDY) { 2758 goto wait_npar;
2422 qlcnic_api_unlock(adapter);
2423 goto wait_npar;
2424 } else {
2425 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2426 FW_POLL_DELAY);
2427 qlcnic_api_unlock(adapter);
2428 return;
2429 }
2430 } 2759 }
2431 2760
2432 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { 2761 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
@@ -2463,6 +2792,7 @@ skip_ack_check:
2463 2792
2464 if (!adapter->nic_ops->start_firmware(adapter)) { 2793 if (!adapter->nic_ops->start_firmware(adapter)) {
2465 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2794 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2795 adapter->fw_wait_cnt = 0;
2466 return; 2796 return;
2467 } 2797 }
2468 goto err_ret; 2798 goto err_ret;
@@ -2475,27 +2805,25 @@ wait_npar:
2475 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state); 2805 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2476 2806
2477 switch (dev_state) { 2807 switch (dev_state) {
2478 case QLCNIC_DEV_QUISCENT: 2808 case QLCNIC_DEV_READY:
2479 case QLCNIC_DEV_NEED_QUISCENT:
2480 case QLCNIC_DEV_NEED_RESET:
2481 qlcnic_schedule_work(adapter,
2482 qlcnic_fwinit_work, FW_POLL_DELAY);
2483 return;
2484 case QLCNIC_DEV_FAILED:
2485 break;
2486
2487 default:
2488 if (!adapter->nic_ops->start_firmware(adapter)) { 2809 if (!adapter->nic_ops->start_firmware(adapter)) {
2489 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2810 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2811 adapter->fw_wait_cnt = 0;
2490 return; 2812 return;
2491 } 2813 }
2814 case QLCNIC_DEV_FAILED:
2815 break;
2816 default:
2817 qlcnic_schedule_work(adapter,
2818 qlcnic_fwinit_work, FW_POLL_DELAY);
2819 return;
2492 } 2820 }
2493 2821
2494err_ret: 2822err_ret:
2495 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u " 2823 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2496 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt); 2824 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
2497 netif_device_attach(adapter->netdev); 2825 netif_device_attach(adapter->netdev);
2498 qlcnic_clr_all_drv_state(adapter); 2826 qlcnic_clr_all_drv_state(adapter, 0);
2499} 2827}
2500 2828
2501static void 2829static void
@@ -2531,8 +2859,23 @@ err_ret:
2531 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n", 2859 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2532 status, adapter->temp); 2860 status, adapter->temp);
2533 netif_device_attach(netdev); 2861 netif_device_attach(netdev);
2534 qlcnic_clr_all_drv_state(adapter); 2862 qlcnic_clr_all_drv_state(adapter, 1);
2863}
2864
2865/*Transit NPAR state to NON Operational */
2866static void
2867qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2868{
2869 u32 state;
2870
2871 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2872 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2873 return;
2535 2874
2875 if (qlcnic_api_lock(adapter))
2876 return;
2877 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2878 qlcnic_api_unlock(adapter);
2536} 2879}
2537 2880
2538/*Transit to RESET state from READY state only */ 2881/*Transit to RESET state from READY state only */
@@ -2553,6 +2896,7 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2553 qlcnic_idc_debug_info(adapter, 0); 2896 qlcnic_idc_debug_info(adapter, 0);
2554 } 2897 }
2555 2898
2899 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2556 qlcnic_api_unlock(adapter); 2900 qlcnic_api_unlock(adapter);
2557} 2901}
2558 2902
@@ -2560,21 +2904,11 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2560static void 2904static void
2561qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter) 2905qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2562{ 2906{
2563 u32 state;
2564
2565 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
2566 adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
2567 return;
2568 if (qlcnic_api_lock(adapter)) 2907 if (qlcnic_api_lock(adapter))
2569 return; 2908 return;
2570 2909
2571 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); 2910 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2572 2911 QLCDB(adapter, DRV, "NPAR operational state set\n");
2573 if (state != QLCNIC_DEV_NPAR_RDY) {
2574 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
2575 QLCNIC_DEV_NPAR_RDY);
2576 QLCDB(adapter, DRV, "NPAR READY state set\n");
2577 }
2578 2912
2579 qlcnic_api_unlock(adapter); 2913 qlcnic_api_unlock(adapter);
2580} 2914}
@@ -2605,12 +2939,26 @@ qlcnic_attach_work(struct work_struct *work)
2605 struct qlcnic_adapter *adapter = container_of(work, 2939 struct qlcnic_adapter *adapter = container_of(work,
2606 struct qlcnic_adapter, fw_work.work); 2940 struct qlcnic_adapter, fw_work.work);
2607 struct net_device *netdev = adapter->netdev; 2941 struct net_device *netdev = adapter->netdev;
2942 u32 npar_state;
2608 2943
2944 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2945 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2946 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2947 qlcnic_clr_all_drv_state(adapter, 0);
2948 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2949 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2950 FW_POLL_DELAY);
2951 else
2952 goto attach;
2953 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2954 return;
2955 }
2956attach:
2609 if (netif_running(netdev)) { 2957 if (netif_running(netdev)) {
2610 if (qlcnic_up(adapter, netdev)) 2958 if (qlcnic_up(adapter, netdev))
2611 goto done; 2959 goto done;
2612 2960
2613 qlcnic_config_indev_addr(netdev, NETDEV_UP); 2961 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2614 } 2962 }
2615 2963
2616done: 2964done:
@@ -2626,7 +2974,7 @@ done:
2626static int 2974static int
2627qlcnic_check_health(struct qlcnic_adapter *adapter) 2975qlcnic_check_health(struct qlcnic_adapter *adapter)
2628{ 2976{
2629 u32 state = 0, heartbit; 2977 u32 state = 0, heartbeat;
2630 struct net_device *netdev = adapter->netdev; 2978 struct net_device *netdev = adapter->netdev;
2631 2979
2632 if (qlcnic_check_temp(adapter)) 2980 if (qlcnic_check_temp(adapter))
@@ -2636,12 +2984,15 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2636 qlcnic_dev_request_reset(adapter); 2984 qlcnic_dev_request_reset(adapter);
2637 2985
2638 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2986 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2639 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT) 2987 if (state == QLCNIC_DEV_NEED_RESET ||
2988 state == QLCNIC_DEV_NEED_QUISCENT) {
2989 qlcnic_set_npar_non_operational(adapter);
2640 adapter->need_fw_reset = 1; 2990 adapter->need_fw_reset = 1;
2991 }
2641 2992
2642 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); 2993 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2643 if (heartbit != adapter->heartbit) { 2994 if (heartbeat != adapter->heartbeat) {
2644 adapter->heartbit = heartbit; 2995 adapter->heartbeat = heartbeat;
2645 adapter->fw_fail_cnt = 0; 2996 adapter->fw_fail_cnt = 0;
2646 if (adapter->need_fw_reset) 2997 if (adapter->need_fw_reset)
2647 goto detach; 2998 goto detach;
@@ -2692,6 +3043,9 @@ qlcnic_fw_poll_work(struct work_struct *work)
2692 if (qlcnic_check_health(adapter)) 3043 if (qlcnic_check_health(adapter))
2693 return; 3044 return;
2694 3045
3046 if (adapter->fhash.fnum)
3047 qlcnic_prune_lb_filters(adapter);
3048
2695reschedule: 3049reschedule:
2696 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); 3050 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2697} 3051}
@@ -2738,7 +3092,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
2738 if (qlcnic_api_lock(adapter)) 3092 if (qlcnic_api_lock(adapter))
2739 return -EINVAL; 3093 return -EINVAL;
2740 3094
2741 if (first_func) { 3095 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
2742 adapter->need_fw_reset = 1; 3096 adapter->need_fw_reset = 1;
2743 set_bit(__QLCNIC_START_FW, &adapter->state); 3097 set_bit(__QLCNIC_START_FW, &adapter->state);
2744 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); 3098 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
@@ -2756,7 +3110,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
2756 if (netif_running(netdev)) { 3110 if (netif_running(netdev)) {
2757 err = qlcnic_attach(adapter); 3111 err = qlcnic_attach(adapter);
2758 if (err) { 3112 if (err) {
2759 qlcnic_clr_all_drv_state(adapter); 3113 qlcnic_clr_all_drv_state(adapter, 1);
2760 clear_bit(__QLCNIC_AER, &adapter->state); 3114 clear_bit(__QLCNIC_AER, &adapter->state);
2761 netif_device_attach(netdev); 3115 netif_device_attach(netdev);
2762 return err; 3116 return err;
@@ -2766,7 +3120,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
2766 if (err) 3120 if (err)
2767 goto done; 3121 goto done;
2768 3122
2769 qlcnic_config_indev_addr(netdev, NETDEV_UP); 3123 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2770 } 3124 }
2771 done: 3125 done:
2772 netif_device_attach(netdev); 3126 netif_device_attach(netdev);
@@ -2822,7 +3176,6 @@ static void qlcnic_io_resume(struct pci_dev *pdev)
2822 FW_POLL_DELAY); 3176 FW_POLL_DELAY);
2823} 3177}
2824 3178
2825
2826static int 3179static int
2827qlcnicvf_start_firmware(struct qlcnic_adapter *adapter) 3180qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2828{ 3181{
@@ -2832,8 +3185,20 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2832 if (err) 3185 if (err)
2833 return err; 3186 return err;
2834 3187
3188 err = qlcnic_check_npar_opertional(adapter);
3189 if (err)
3190 return err;
3191
3192 err = qlcnic_initialize_nic(adapter);
3193 if (err)
3194 return err;
3195
2835 qlcnic_check_options(adapter); 3196 qlcnic_check_options(adapter);
2836 3197
3198 err = qlcnic_set_eswitch_port_config(adapter);
3199 if (err)
3200 return err;
3201
2837 adapter->need_fw_reset = 0; 3202 adapter->need_fw_reset = 0;
2838 3203
2839 return err; 3204 return err;
@@ -3093,9 +3458,6 @@ validate_pm_config(struct qlcnic_adapter *adapter,
3093 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC) 3458 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3094 return QL_STATUS_INVALID_PARAM; 3459 return QL_STATUS_INVALID_PARAM;
3095 3460
3096 if (!IS_VALID_MODE(pm_cfg[i].action))
3097 return QL_STATUS_INVALID_PARAM;
3098
3099 s_esw_id = adapter->npars[src_pci_func].phy_port; 3461 s_esw_id = adapter->npars[src_pci_func].phy_port;
3100 d_esw_id = adapter->npars[dest_pci_func].phy_port; 3462 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3101 3463
@@ -3129,7 +3491,7 @@ qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3129 return ret; 3491 return ret;
3130 for (i = 0; i < count; i++) { 3492 for (i = 0; i < count; i++) {
3131 pci_func = pm_cfg[i].pci_func; 3493 pci_func = pm_cfg[i].pci_func;
3132 action = pm_cfg[i].action; 3494 action = !!pm_cfg[i].action;
3133 id = adapter->npars[pci_func].phy_port; 3495 id = adapter->npars[pci_func].phy_port;
3134 ret = qlcnic_config_port_mirroring(adapter, id, 3496 ret = qlcnic_config_port_mirroring(adapter, id,
3135 action, pci_func); 3497 action, pci_func);
@@ -3140,7 +3502,7 @@ qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3140 for (i = 0; i < count; i++) { 3502 for (i = 0; i < count; i++) {
3141 pci_func = pm_cfg[i].pci_func; 3503 pci_func = pm_cfg[i].pci_func;
3142 id = adapter->npars[pci_func].phy_port; 3504 id = adapter->npars[pci_func].phy_port;
3143 adapter->npars[pci_func].enable_pm = pm_cfg[i].action; 3505 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
3144 adapter->npars[pci_func].dest_npar = id; 3506 adapter->npars[pci_func].dest_npar = id;
3145 } 3507 }
3146 return size; 3508 return size;
@@ -3172,30 +3534,45 @@ qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3172 3534
3173static int 3535static int
3174validate_esw_config(struct qlcnic_adapter *adapter, 3536validate_esw_config(struct qlcnic_adapter *adapter,
3175 struct qlcnic_esw_func_cfg *esw_cfg, int count) 3537 struct qlcnic_esw_func_cfg *esw_cfg, int count)
3176{ 3538{
3539 u32 op_mode;
3177 u8 pci_func; 3540 u8 pci_func;
3178 int i; 3541 int i;
3179 3542
3543 op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
3544
3180 for (i = 0; i < count; i++) { 3545 for (i = 0; i < count; i++) {
3181 pci_func = esw_cfg[i].pci_func; 3546 pci_func = esw_cfg[i].pci_func;
3182 if (pci_func >= QLCNIC_MAX_PCI_FUNC) 3547 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3183 return QL_STATUS_INVALID_PARAM; 3548 return QL_STATUS_INVALID_PARAM;
3184 3549
3185 if (adapter->npars[i].type != QLCNIC_TYPE_NIC) 3550 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3186 return QL_STATUS_INVALID_PARAM; 3551 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3552 return QL_STATUS_INVALID_PARAM;
3187 3553
3188 if (esw_cfg->host_vlan_tag == 1) 3554 switch (esw_cfg[i].op_mode) {
3555 case QLCNIC_PORT_DEFAULTS:
3556 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
3557 QLCNIC_NON_PRIV_FUNC) {
3558 esw_cfg[i].mac_anti_spoof = 0;
3559 esw_cfg[i].mac_override = 1;
3560 }
3561 break;
3562 case QLCNIC_ADD_VLAN:
3189 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id)) 3563 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3190 return QL_STATUS_INVALID_PARAM; 3564 return QL_STATUS_INVALID_PARAM;
3191 3565 if (!esw_cfg[i].op_type)
3192 if (!IS_VALID_MODE(esw_cfg[i].promisc_mode) 3566 return QL_STATUS_INVALID_PARAM;
3193 || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag) 3567 break;
3194 || !IS_VALID_MODE(esw_cfg[i].mac_learning) 3568 case QLCNIC_DEL_VLAN:
3195 || !IS_VALID_MODE(esw_cfg[i].discard_tagged)) 3569 if (!esw_cfg[i].op_type)
3570 return QL_STATUS_INVALID_PARAM;
3571 break;
3572 default:
3196 return QL_STATUS_INVALID_PARAM; 3573 return QL_STATUS_INVALID_PARAM;
3574 }
3197 } 3575 }
3198
3199 return 0; 3576 return 0;
3200} 3577}
3201 3578
@@ -3206,8 +3583,9 @@ qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3206 struct device *dev = container_of(kobj, struct device, kobj); 3583 struct device *dev = container_of(kobj, struct device, kobj);
3207 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 3584 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3208 struct qlcnic_esw_func_cfg *esw_cfg; 3585 struct qlcnic_esw_func_cfg *esw_cfg;
3586 struct qlcnic_npar_info *npar;
3209 int count, rem, i, ret; 3587 int count, rem, i, ret;
3210 u8 id, pci_func; 3588 u8 pci_func, op_mode = 0;
3211 3589
3212 count = size / sizeof(struct qlcnic_esw_func_cfg); 3590 count = size / sizeof(struct qlcnic_esw_func_cfg);
3213 rem = size % sizeof(struct qlcnic_esw_func_cfg); 3591 rem = size % sizeof(struct qlcnic_esw_func_cfg);
@@ -3220,30 +3598,55 @@ qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3220 return ret; 3598 return ret;
3221 3599
3222 for (i = 0; i < count; i++) { 3600 for (i = 0; i < count; i++) {
3223 pci_func = esw_cfg[i].pci_func; 3601 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3224 id = adapter->npars[pci_func].phy_port; 3602 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3225 ret = qlcnic_config_switch_port(adapter, id, 3603 return QL_STATUS_INVALID_PARAM;
3226 esw_cfg[i].host_vlan_tag, 3604
3227 esw_cfg[i].discard_tagged, 3605 if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
3228 esw_cfg[i].promisc_mode, 3606 continue;
3229 esw_cfg[i].mac_learning, 3607
3230 esw_cfg[i].pci_func, 3608 op_mode = esw_cfg[i].op_mode;
3231 esw_cfg[i].vlan_id); 3609 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3232 if (ret) 3610 esw_cfg[i].op_mode = op_mode;
3233 return ret; 3611 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3612
3613 switch (esw_cfg[i].op_mode) {
3614 case QLCNIC_PORT_DEFAULTS:
3615 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3616 break;
3617 case QLCNIC_ADD_VLAN:
3618 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3619 break;
3620 case QLCNIC_DEL_VLAN:
3621 esw_cfg[i].vlan_id = 0;
3622 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3623 break;
3624 }
3234 } 3625 }
3235 3626
3627 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3628 goto out;
3629
3236 for (i = 0; i < count; i++) { 3630 for (i = 0; i < count; i++) {
3237 pci_func = esw_cfg[i].pci_func; 3631 pci_func = esw_cfg[i].pci_func;
3238 adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode; 3632 npar = &adapter->npars[pci_func];
3239 adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning; 3633 switch (esw_cfg[i].op_mode) {
3240 adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id; 3634 case QLCNIC_PORT_DEFAULTS:
3241 adapter->npars[pci_func].discard_tagged = 3635 npar->promisc_mode = esw_cfg[i].promisc_mode;
3242 esw_cfg[i].discard_tagged; 3636 npar->mac_override = esw_cfg[i].mac_override;
3243 adapter->npars[pci_func].host_vlan_tag = 3637 npar->offload_flags = esw_cfg[i].offload_flags;
3244 esw_cfg[i].host_vlan_tag; 3638 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3639 npar->discard_tagged = esw_cfg[i].discard_tagged;
3640 break;
3641 case QLCNIC_ADD_VLAN:
3642 npar->pvid = esw_cfg[i].vlan_id;
3643 break;
3644 case QLCNIC_DEL_VLAN:
3645 npar->pvid = 0;
3646 break;
3647 }
3245 } 3648 }
3246 3649out:
3247 return size; 3650 return size;
3248} 3651}
3249 3652
@@ -3254,7 +3657,7 @@ qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3254 struct device *dev = container_of(kobj, struct device, kobj); 3657 struct device *dev = container_of(kobj, struct device, kobj);
3255 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 3658 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3256 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC]; 3659 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
3257 int i; 3660 u8 i;
3258 3661
3259 if (size != sizeof(esw_cfg)) 3662 if (size != sizeof(esw_cfg))
3260 return QL_STATUS_INVALID_PARAM; 3663 return QL_STATUS_INVALID_PARAM;
@@ -3262,12 +3665,9 @@ qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3262 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 3665 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3263 if (adapter->npars[i].type != QLCNIC_TYPE_NIC) 3666 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3264 continue; 3667 continue;
3265 3668 esw_cfg[i].pci_func = i;
3266 esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag; 3669 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3267 esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode; 3670 return QL_STATUS_INVALID_PARAM;
3268 esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
3269 esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
3270 esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
3271 } 3671 }
3272 memcpy(buf, &esw_cfg, size); 3672 memcpy(buf, &esw_cfg, size);
3273 3673
@@ -3370,6 +3770,115 @@ qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3370} 3770}
3371 3771
3372static ssize_t 3772static ssize_t
3773qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3774 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3775{
3776 struct device *dev = container_of(kobj, struct device, kobj);
3777 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3778 struct qlcnic_esw_statistics port_stats;
3779 int ret;
3780
3781 if (size != sizeof(struct qlcnic_esw_statistics))
3782 return QL_STATUS_INVALID_PARAM;
3783
3784 if (offset >= QLCNIC_MAX_PCI_FUNC)
3785 return QL_STATUS_INVALID_PARAM;
3786
3787 memset(&port_stats, 0, size);
3788 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3789 &port_stats.rx);
3790 if (ret)
3791 return ret;
3792
3793 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3794 &port_stats.tx);
3795 if (ret)
3796 return ret;
3797
3798 memcpy(buf, &port_stats, size);
3799 return size;
3800}
3801
3802static ssize_t
3803qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3804 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3805{
3806 struct device *dev = container_of(kobj, struct device, kobj);
3807 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3808 struct qlcnic_esw_statistics esw_stats;
3809 int ret;
3810
3811 if (size != sizeof(struct qlcnic_esw_statistics))
3812 return QL_STATUS_INVALID_PARAM;
3813
3814 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3815 return QL_STATUS_INVALID_PARAM;
3816
3817 memset(&esw_stats, 0, size);
3818 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3819 &esw_stats.rx);
3820 if (ret)
3821 return ret;
3822
3823 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3824 &esw_stats.tx);
3825 if (ret)
3826 return ret;
3827
3828 memcpy(buf, &esw_stats, size);
3829 return size;
3830}
3831
3832static ssize_t
3833qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3834 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3835{
3836 struct device *dev = container_of(kobj, struct device, kobj);
3837 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3838 int ret;
3839
3840 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3841 return QL_STATUS_INVALID_PARAM;
3842
3843 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3844 QLCNIC_QUERY_RX_COUNTER);
3845 if (ret)
3846 return ret;
3847
3848 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3849 QLCNIC_QUERY_TX_COUNTER);
3850 if (ret)
3851 return ret;
3852
3853 return size;
3854}
3855
3856static ssize_t
3857qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3858 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3859{
3860
3861 struct device *dev = container_of(kobj, struct device, kobj);
3862 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3863 int ret;
3864
3865 if (offset >= QLCNIC_MAX_PCI_FUNC)
3866 return QL_STATUS_INVALID_PARAM;
3867
3868 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3869 QLCNIC_QUERY_RX_COUNTER);
3870 if (ret)
3871 return ret;
3872
3873 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3874 QLCNIC_QUERY_TX_COUNTER);
3875 if (ret)
3876 return ret;
3877
3878 return size;
3879}
3880
3881static ssize_t
3373qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj, 3882qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3374 struct bin_attribute *attr, char *buf, loff_t offset, size_t size) 3883 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3375{ 3884{
@@ -3418,6 +3927,20 @@ static struct bin_attribute bin_attr_pci_config = {
3418 .write = NULL, 3927 .write = NULL,
3419}; 3928};
3420 3929
3930static struct bin_attribute bin_attr_port_stats = {
3931 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3932 .size = 0,
3933 .read = qlcnic_sysfs_get_port_stats,
3934 .write = qlcnic_sysfs_clear_port_stats,
3935};
3936
3937static struct bin_attribute bin_attr_esw_stats = {
3938 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3939 .size = 0,
3940 .read = qlcnic_sysfs_get_esw_stats,
3941 .write = qlcnic_sysfs_clear_esw_stats,
3942};
3943
3421static struct bin_attribute bin_attr_esw_config = { 3944static struct bin_attribute bin_attr_esw_config = {
3422 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)}, 3945 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3423 .size = 0, 3946 .size = 0,
@@ -3457,6 +3980,9 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3457{ 3980{
3458 struct device *dev = &adapter->pdev->dev; 3981 struct device *dev = &adapter->pdev->dev;
3459 3982
3983 if (device_create_bin_file(dev, &bin_attr_port_stats))
3984 dev_info(dev, "failed to create port stats sysfs entry");
3985
3460 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) 3986 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3461 return; 3987 return;
3462 if (device_create_file(dev, &dev_attr_diag_mode)) 3988 if (device_create_file(dev, &dev_attr_diag_mode))
@@ -3465,18 +3991,20 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3465 dev_info(dev, "failed to create crb sysfs entry\n"); 3991 dev_info(dev, "failed to create crb sysfs entry\n");
3466 if (device_create_bin_file(dev, &bin_attr_mem)) 3992 if (device_create_bin_file(dev, &bin_attr_mem))
3467 dev_info(dev, "failed to create mem sysfs entry\n"); 3993 dev_info(dev, "failed to create mem sysfs entry\n");
3468 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || 3994 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3469 adapter->op_mode != QLCNIC_MGMT_FUNC) 3995 return;
3996 if (device_create_bin_file(dev, &bin_attr_esw_config))
3997 dev_info(dev, "failed to create esw config sysfs entry");
3998 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3470 return; 3999 return;
3471 if (device_create_bin_file(dev, &bin_attr_pci_config)) 4000 if (device_create_bin_file(dev, &bin_attr_pci_config))
3472 dev_info(dev, "failed to create pci config sysfs entry"); 4001 dev_info(dev, "failed to create pci config sysfs entry");
3473 if (device_create_bin_file(dev, &bin_attr_npar_config)) 4002 if (device_create_bin_file(dev, &bin_attr_npar_config))
3474 dev_info(dev, "failed to create npar config sysfs entry"); 4003 dev_info(dev, "failed to create npar config sysfs entry");
3475 if (device_create_bin_file(dev, &bin_attr_esw_config))
3476 dev_info(dev, "failed to create esw config sysfs entry");
3477 if (device_create_bin_file(dev, &bin_attr_pm_config)) 4004 if (device_create_bin_file(dev, &bin_attr_pm_config))
3478 dev_info(dev, "failed to create pm config sysfs entry"); 4005 dev_info(dev, "failed to create pm config sysfs entry");
3479 4006 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4007 dev_info(dev, "failed to create eswitch stats sysfs entry");
3480} 4008}
3481 4009
3482static void 4010static void
@@ -3484,18 +4012,22 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3484{ 4012{
3485 struct device *dev = &adapter->pdev->dev; 4013 struct device *dev = &adapter->pdev->dev;
3486 4014
4015 device_remove_bin_file(dev, &bin_attr_port_stats);
4016
3487 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) 4017 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3488 return; 4018 return;
3489 device_remove_file(dev, &dev_attr_diag_mode); 4019 device_remove_file(dev, &dev_attr_diag_mode);
3490 device_remove_bin_file(dev, &bin_attr_crb); 4020 device_remove_bin_file(dev, &bin_attr_crb);
3491 device_remove_bin_file(dev, &bin_attr_mem); 4021 device_remove_bin_file(dev, &bin_attr_mem);
3492 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || 4022 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3493 adapter->op_mode != QLCNIC_MGMT_FUNC) 4023 return;
4024 device_remove_bin_file(dev, &bin_attr_esw_config);
4025 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3494 return; 4026 return;
3495 device_remove_bin_file(dev, &bin_attr_pci_config); 4027 device_remove_bin_file(dev, &bin_attr_pci_config);
3496 device_remove_bin_file(dev, &bin_attr_npar_config); 4028 device_remove_bin_file(dev, &bin_attr_npar_config);
3497 device_remove_bin_file(dev, &bin_attr_esw_config);
3498 device_remove_bin_file(dev, &bin_attr_pm_config); 4029 device_remove_bin_file(dev, &bin_attr_pm_config);
4030 device_remove_bin_file(dev, &bin_attr_esw_stats);
3499} 4031}
3500 4032
3501#ifdef CONFIG_INET 4033#ifdef CONFIG_INET
@@ -3503,10 +4035,10 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3503#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops) 4035#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
3504 4036
3505static void 4037static void
3506qlcnic_config_indev_addr(struct net_device *dev, unsigned long event) 4038qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4039 struct net_device *dev, unsigned long event)
3507{ 4040{
3508 struct in_device *indev; 4041 struct in_device *indev;
3509 struct qlcnic_adapter *adapter = netdev_priv(dev);
3510 4042
3511 indev = in_dev_get(dev); 4043 indev = in_dev_get(dev);
3512 if (!indev) 4044 if (!indev)
@@ -3530,6 +4062,27 @@ qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3530 in_dev_put(indev); 4062 in_dev_put(indev);
3531} 4063}
3532 4064
4065static void
4066qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4067{
4068 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4069 struct net_device *dev;
4070 u16 vid;
4071
4072 qlcnic_config_indev_addr(adapter, netdev, event);
4073
4074 if (!adapter->vlgrp)
4075 return;
4076
4077 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4078 dev = vlan_group_get_device(adapter->vlgrp, vid);
4079 if (!dev)
4080 continue;
4081
4082 qlcnic_config_indev_addr(adapter, dev, event);
4083 }
4084}
4085
3533static int qlcnic_netdev_event(struct notifier_block *this, 4086static int qlcnic_netdev_event(struct notifier_block *this,
3534 unsigned long event, void *ptr) 4087 unsigned long event, void *ptr)
3535{ 4088{
@@ -3556,7 +4109,7 @@ recheck:
3556 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) 4109 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
3557 goto done; 4110 goto done;
3558 4111
3559 qlcnic_config_indev_addr(dev, event); 4112 qlcnic_config_indev_addr(adapter, dev, event);
3560done: 4113done:
3561 return NOTIFY_DONE; 4114 return NOTIFY_DONE;
3562} 4115}
@@ -3573,7 +4126,7 @@ qlcnic_inetaddr_event(struct notifier_block *this,
3573 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; 4126 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3574 4127
3575recheck: 4128recheck:
3576 if (dev == NULL || !netif_running(dev)) 4129 if (dev == NULL)
3577 goto done; 4130 goto done;
3578 4131
3579 if (dev->priv_flags & IFF_802_1Q_VLAN) { 4132 if (dev->priv_flags & IFF_802_1Q_VLAN) {
@@ -3616,7 +4169,7 @@ static struct notifier_block qlcnic_inetaddr_cb = {
3616}; 4169};
3617#else 4170#else
3618static void 4171static void
3619qlcnic_config_indev_addr(struct net_device *dev, unsigned long event) 4172qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
3620{ } 4173{ }
3621#endif 4174#endif
3622static struct pci_error_handlers qlcnic_err_handler = { 4175static struct pci_error_handlers qlcnic_err_handler = {
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 5f89e83501f4..4ffebe83d883 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1566,7 +1566,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1566 rx_ring->rx_packets++; 1566 rx_ring->rx_packets++;
1567 rx_ring->rx_bytes += skb->len; 1567 rx_ring->rx_bytes += skb->len;
1568 skb->protocol = eth_type_trans(skb, ndev); 1568 skb->protocol = eth_type_trans(skb, ndev);
1569 skb->ip_summed = CHECKSUM_NONE; 1569 skb_checksum_none_assert(skb);
1570 1570
1571 if (qdev->rx_csum && 1571 if (qdev->rx_csum &&
1572 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { 1572 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
@@ -1676,7 +1676,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1676 rx_ring->rx_packets++; 1676 rx_ring->rx_packets++;
1677 rx_ring->rx_bytes += skb->len; 1677 rx_ring->rx_bytes += skb->len;
1678 skb->protocol = eth_type_trans(skb, ndev); 1678 skb->protocol = eth_type_trans(skb, ndev);
1679 skb->ip_summed = CHECKSUM_NONE; 1679 skb_checksum_none_assert(skb);
1680 1680
1681 /* If rx checksum is on, and there are no 1681 /* If rx checksum is on, and there are no
1682 * csum or frame errors. 1682 * csum or frame errors.
@@ -1996,7 +1996,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1996 } 1996 }
1997 1997
1998 skb->protocol = eth_type_trans(skb, ndev); 1998 skb->protocol = eth_type_trans(skb, ndev);
1999 skb->ip_summed = CHECKSUM_NONE; 1999 skb_checksum_none_assert(skb);
2000 2000
2001 /* If rx checksum is on, and there are no 2001 /* If rx checksum is on, and there are no
2002 * csum or frame errors. 2002 * csum or frame errors.
@@ -2222,10 +2222,11 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2222 ql_update_cq(rx_ring); 2222 ql_update_cq(rx_ring);
2223 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2223 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224 } 2224 }
2225 if (!net_rsp)
2226 return 0;
2225 ql_write_cq_idx(rx_ring); 2227 ql_write_cq_idx(rx_ring);
2226 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; 2228 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2227 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) && 2229 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2228 net_rsp != NULL) {
2229 if (atomic_read(&tx_ring->queue_stopped) && 2230 if (atomic_read(&tx_ring->queue_stopped) &&
2230 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) 2231 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2231 /* 2232 /*
@@ -3888,11 +3889,8 @@ int ql_wol(struct ql_adapter *qdev)
3888 return status; 3889 return status;
3889} 3890}
3890 3891
3891static int ql_adapter_down(struct ql_adapter *qdev) 3892static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3892{ 3893{
3893 int i, status = 0;
3894
3895 ql_link_off(qdev);
3896 3894
3897 /* Don't kill the reset worker thread if we 3895 /* Don't kill the reset worker thread if we
3898 * are in the process of recovery. 3896 * are in the process of recovery.
@@ -3904,6 +3902,15 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3904 cancel_delayed_work_sync(&qdev->mpi_idc_work); 3902 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3905 cancel_delayed_work_sync(&qdev->mpi_core_to_log); 3903 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3906 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 3904 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3905}
3906
3907static int ql_adapter_down(struct ql_adapter *qdev)
3908{
3909 int i, status = 0;
3910
3911 ql_link_off(qdev);
3912
3913 ql_cancel_all_work_sync(qdev);
3907 3914
3908 for (i = 0; i < qdev->rss_ring_count; i++) 3915 for (i = 0; i < qdev->rss_ring_count; i++)
3909 napi_disable(&qdev->rx_ring[i].napi); 3916 napi_disable(&qdev->rx_ring[i].napi);
@@ -4726,6 +4733,7 @@ static void __devexit qlge_remove(struct pci_dev *pdev)
4726 struct net_device *ndev = pci_get_drvdata(pdev); 4733 struct net_device *ndev = pci_get_drvdata(pdev);
4727 struct ql_adapter *qdev = netdev_priv(ndev); 4734 struct ql_adapter *qdev = netdev_priv(ndev);
4728 del_timer_sync(&qdev->timer); 4735 del_timer_sync(&qdev->timer);
4736 ql_cancel_all_work_sync(qdev);
4729 unregister_netdev(ndev); 4737 unregister_netdev(ndev);
4730 ql_release_all(pdev); 4738 ql_release_all(pdev);
4731 pci_disable_device(pdev); 4739 pci_disable_device(pdev);
@@ -4745,13 +4753,7 @@ static void ql_eeh_close(struct net_device *ndev)
4745 4753
4746 /* Disabling the timer */ 4754 /* Disabling the timer */
4747 del_timer_sync(&qdev->timer); 4755 del_timer_sync(&qdev->timer);
4748 if (test_bit(QL_ADAPTER_UP, &qdev->flags)) 4756 ql_cancel_all_work_sync(qdev);
4749 cancel_delayed_work_sync(&qdev->asic_reset_work);
4750 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4751 cancel_delayed_work_sync(&qdev->mpi_work);
4752 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4753 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
4754 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4755 4757
4756 for (i = 0; i < qdev->rss_ring_count; i++) 4758 for (i = 0; i < qdev->rss_ring_count; i++)
4757 netif_napi_del(&qdev->rx_ring[i].napi); 4759 netif_napi_del(&qdev->rx_ring[i].napi);
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 142c381e1d73..68a84198eb05 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -200,7 +200,7 @@ struct r6040_private {
200 int old_duplex; 200 int old_duplex;
201}; 201};
202 202
203static char version[] __devinitdata = KERN_INFO DRV_NAME 203static char version[] __devinitdata = DRV_NAME
204 ": RDC R6040 NAPI net driver," 204 ": RDC R6040 NAPI net driver,"
205 "version "DRV_VERSION " (" DRV_RELDATE ")"; 205 "version "DRV_VERSION " (" DRV_RELDATE ")";
206 206
@@ -224,7 +224,8 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
224} 224}
225 225
226/* Write a word data from PHY Chip */ 226/* Write a word data from PHY Chip */
227static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val) 227static void r6040_phy_write(void __iomem *ioaddr,
228 int phy_addr, int reg, u16 val)
228{ 229{
229 int limit = 2048; 230 int limit = 2048;
230 u16 cmd; 231 u16 cmd;
@@ -348,8 +349,8 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
348 } 349 }
349 desc->skb_ptr = skb; 350 desc->skb_ptr = skb;
350 desc->buf = cpu_to_le32(pci_map_single(lp->pdev, 351 desc->buf = cpu_to_le32(pci_map_single(lp->pdev,
351 desc->skb_ptr->data, 352 desc->skb_ptr->data,
352 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE)); 353 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
353 desc->status = DSC_OWNER_MAC; 354 desc->status = DSC_OWNER_MAC;
354 desc = desc->vndescp; 355 desc = desc->vndescp;
355 } while (desc != lp->rx_ring); 356 } while (desc != lp->rx_ring);
@@ -491,12 +492,14 @@ static int r6040_close(struct net_device *dev)
491 492
492 /* Free Descriptor memory */ 493 /* Free Descriptor memory */
493 if (lp->rx_ring) { 494 if (lp->rx_ring) {
494 pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); 495 pci_free_consistent(pdev,
496 RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
495 lp->rx_ring = NULL; 497 lp->rx_ring = NULL;
496 } 498 }
497 499
498 if (lp->tx_ring) { 500 if (lp->tx_ring) {
499 pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma); 501 pci_free_consistent(pdev,
502 TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
500 lp->tx_ring = NULL; 503 lp->tx_ring = NULL;
501 } 504 }
502 505
@@ -547,7 +550,7 @@ static int r6040_rx(struct net_device *dev, int limit)
547 } 550 }
548 goto next_descr; 551 goto next_descr;
549 } 552 }
550 553
551 /* Packet successfully received */ 554 /* Packet successfully received */
552 new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); 555 new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
553 if (!new_skb) { 556 if (!new_skb) {
@@ -556,13 +559,13 @@ static int r6040_rx(struct net_device *dev, int limit)
556 } 559 }
557 skb_ptr = descptr->skb_ptr; 560 skb_ptr = descptr->skb_ptr;
558 skb_ptr->dev = priv->dev; 561 skb_ptr->dev = priv->dev;
559 562
560 /* Do not count the CRC */ 563 /* Do not count the CRC */
561 skb_put(skb_ptr, descptr->len - 4); 564 skb_put(skb_ptr, descptr->len - 4);
562 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), 565 pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
563 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); 566 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
564 skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev); 567 skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
565 568
566 /* Send to upper layer */ 569 /* Send to upper layer */
567 netif_receive_skb(skb_ptr); 570 netif_receive_skb(skb_ptr);
568 dev->stats.rx_packets++; 571 dev->stats.rx_packets++;
@@ -710,8 +713,10 @@ static int r6040_up(struct net_device *dev)
710 return ret; 713 return ret;
711 714
712 /* improve performance (by RDC guys) */ 715 /* improve performance (by RDC guys) */
713 r6040_phy_write(ioaddr, 30, 17, (r6040_phy_read(ioaddr, 30, 17) | 0x4000)); 716 r6040_phy_write(ioaddr, 30, 17,
714 r6040_phy_write(ioaddr, 30, 17, ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000)); 717 (r6040_phy_read(ioaddr, 30, 17) | 0x4000));
718 r6040_phy_write(ioaddr, 30, 17,
719 ~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
715 r6040_phy_write(ioaddr, 0, 19, 0x0000); 720 r6040_phy_write(ioaddr, 0, 19, 0x0000);
716 r6040_phy_write(ioaddr, 0, 30, 0x01F0); 721 r6040_phy_write(ioaddr, 0, 30, 0x01F0);
717 722
@@ -740,6 +745,9 @@ static void r6040_mac_address(struct net_device *dev)
740 iowrite16(adrp[0], ioaddr + MID_0L); 745 iowrite16(adrp[0], ioaddr + MID_0L);
741 iowrite16(adrp[1], ioaddr + MID_0M); 746 iowrite16(adrp[1], ioaddr + MID_0M);
742 iowrite16(adrp[2], ioaddr + MID_0H); 747 iowrite16(adrp[2], ioaddr + MID_0H);
748
749 /* Store MAC Address in perm_addr */
750 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
743} 751}
744 752
745static int r6040_open(struct net_device *dev) 753static int r6040_open(struct net_device *dev)
@@ -751,7 +759,7 @@ static int r6040_open(struct net_device *dev)
751 ret = request_irq(dev->irq, r6040_interrupt, 759 ret = request_irq(dev->irq, r6040_interrupt,
752 IRQF_SHARED, dev->name, dev); 760 IRQF_SHARED, dev->name, dev);
753 if (ret) 761 if (ret)
754 return ret; 762 goto out;
755 763
756 /* Set MAC address */ 764 /* Set MAC address */
757 r6040_mac_address(dev); 765 r6040_mac_address(dev);
@@ -759,30 +767,37 @@ static int r6040_open(struct net_device *dev)
759 /* Allocate Descriptor memory */ 767 /* Allocate Descriptor memory */
760 lp->rx_ring = 768 lp->rx_ring =
761 pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma); 769 pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma);
762 if (!lp->rx_ring) 770 if (!lp->rx_ring) {
763 return -ENOMEM; 771 ret = -ENOMEM;
772 goto err_free_irq;
773 }
764 774
765 lp->tx_ring = 775 lp->tx_ring =
766 pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma); 776 pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma);
767 if (!lp->tx_ring) { 777 if (!lp->tx_ring) {
768 pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring, 778 ret = -ENOMEM;
769 lp->rx_ring_dma); 779 goto err_free_rx_ring;
770 return -ENOMEM;
771 } 780 }
772 781
773 ret = r6040_up(dev); 782 ret = r6040_up(dev);
774 if (ret) { 783 if (ret)
775 pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring, 784 goto err_free_tx_ring;
776 lp->tx_ring_dma);
777 pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
778 lp->rx_ring_dma);
779 return ret;
780 }
781 785
782 napi_enable(&lp->napi); 786 napi_enable(&lp->napi);
783 netif_start_queue(dev); 787 netif_start_queue(dev);
784 788
785 return 0; 789 return 0;
790
791err_free_tx_ring:
792 pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
793 lp->tx_ring_dma);
794err_free_rx_ring:
795 pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
796 lp->rx_ring_dma);
797err_free_irq:
798 free_irq(dev->irq, dev);
799out:
800 return ret;
786} 801}
787 802
788static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, 803static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
@@ -946,7 +961,7 @@ static const struct net_device_ops r6040_netdev_ops = {
946 .ndo_set_multicast_list = r6040_multicast_list, 961 .ndo_set_multicast_list = r6040_multicast_list,
947 .ndo_change_mtu = eth_change_mtu, 962 .ndo_change_mtu = eth_change_mtu,
948 .ndo_validate_addr = eth_validate_addr, 963 .ndo_validate_addr = eth_validate_addr,
949 .ndo_set_mac_address = eth_mac_addr, 964 .ndo_set_mac_address = eth_mac_addr,
950 .ndo_do_ioctl = r6040_ioctl, 965 .ndo_do_ioctl = r6040_ioctl,
951 .ndo_tx_timeout = r6040_tx_timeout, 966 .ndo_tx_timeout = r6040_tx_timeout,
952#ifdef CONFIG_NET_POLL_CONTROLLER 967#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1039,7 +1054,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1039 u16 *adrp; 1054 u16 *adrp;
1040 int i; 1055 int i;
1041 1056
1042 printk("%s\n", version); 1057 pr_info("%s\n", version);
1043 1058
1044 err = pci_enable_device(pdev); 1059 err = pci_enable_device(pdev);
1045 if (err) 1060 if (err)
@@ -1113,7 +1128,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1113 /* Some bootloader/BIOSes do not initialize 1128 /* Some bootloader/BIOSes do not initialize
1114 * MAC address, warn about that */ 1129 * MAC address, warn about that */
1115 if (!(adrp[0] || adrp[1] || adrp[2])) { 1130 if (!(adrp[0] || adrp[1] || adrp[2])) {
1116 netdev_warn(dev, "MAC address not initialized, generating random\n"); 1131 netdev_warn(dev, "MAC address not initialized, "
1132 "generating random\n");
1117 random_ether_addr(dev->dev_addr); 1133 random_ether_addr(dev->dev_addr);
1118 } 1134 }
1119 1135
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index a0da4a17b025..fe3b7622fba0 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1076,7 +1076,12 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1076 int ret; 1076 int ret;
1077 1077
1078 if (vlgrp && (opts2 & RxVlanTag)) { 1078 if (vlgrp && (opts2 & RxVlanTag)) {
1079 __vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling); 1079 u16 vtag = swab16(opts2 & 0xffff);
1080
1081 if (likely(polling))
1082 vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
1083 else
1084 __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
1080 ret = 0; 1085 ret = 0;
1081 } else 1086 } else
1082 ret = -1; 1087 ret = -1;
@@ -3186,6 +3191,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3186#ifdef CONFIG_R8169_VLAN 3191#ifdef CONFIG_R8169_VLAN
3187 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3192 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3188#endif 3193#endif
3194 dev->features |= NETIF_F_GRO;
3189 3195
3190 tp->intr_mask = 0xffff; 3196 tp->intr_mask = 0xffff;
3191 tp->align = cfg->align; 3197 tp->align = cfg->align;
@@ -4450,9 +4456,8 @@ static inline int rtl8169_fragmented_frame(u32 status)
4450 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); 4456 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
4451} 4457}
4452 4458
4453static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc) 4459static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
4454{ 4460{
4455 u32 opts1 = le32_to_cpu(desc->opts1);
4456 u32 status = opts1 & RxProtoMask; 4461 u32 status = opts1 & RxProtoMask;
4457 4462
4458 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || 4463 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
@@ -4460,7 +4465,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
4460 ((status == RxProtoIP) && !(opts1 & IPFail))) 4465 ((status == RxProtoIP) && !(opts1 & IPFail)))
4461 skb->ip_summed = CHECKSUM_UNNECESSARY; 4466 skb->ip_summed = CHECKSUM_UNNECESSARY;
4462 else 4467 else
4463 skb->ip_summed = CHECKSUM_NONE; 4468 skb_checksum_none_assert(skb);
4464} 4469}
4465 4470
4466static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, 4471static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
@@ -4546,8 +4551,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4546 continue; 4551 continue;
4547 } 4552 }
4548 4553
4549 rtl8169_rx_csum(skb, desc);
4550
4551 if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { 4554 if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
4552 pci_dma_sync_single_for_device(pdev, addr, 4555 pci_dma_sync_single_for_device(pdev, addr,
4553 pkt_size, PCI_DMA_FROMDEVICE); 4556 pkt_size, PCI_DMA_FROMDEVICE);
@@ -4558,12 +4561,13 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4558 tp->Rx_skbuff[entry] = NULL; 4561 tp->Rx_skbuff[entry] = NULL;
4559 } 4562 }
4560 4563
4564 rtl8169_rx_csum(skb, status);
4561 skb_put(skb, pkt_size); 4565 skb_put(skb, pkt_size);
4562 skb->protocol = eth_type_trans(skb, dev); 4566 skb->protocol = eth_type_trans(skb, dev);
4563 4567
4564 if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) { 4568 if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
4565 if (likely(polling)) 4569 if (likely(polling))
4566 netif_receive_skb(skb); 4570 napi_gro_receive(&tp->napi, skb);
4567 else 4571 else
4568 netif_rx(skb); 4572 netif_rx(skb);
4569 } 4573 }
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index e26e107f93e0..e68c941926f1 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1245,7 +1245,7 @@ static int rr_open(struct net_device *dev)
1245 init_timer(&rrpriv->timer); 1245 init_timer(&rrpriv->timer);
1246 rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */ 1246 rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */
1247 rrpriv->timer.data = (unsigned long)dev; 1247 rrpriv->timer.data = (unsigned long)dev;
1248 rrpriv->timer.function = &rr_timer; /* timer handler */ 1248 rrpriv->timer.function = rr_timer; /* timer handler */
1249 add_timer(&rrpriv->timer); 1249 add_timer(&rrpriv->timer);
1250 1250
1251 netif_start_queue(dev); 1251 netif_start_queue(dev);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 18bc5b718bbb..c70ad515383a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -38,8 +38,6 @@
38 * Tx descriptors that can be associated with each corresponding FIFO. 38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA), 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)' 40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be 41 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet 42 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx) 43 * napi: This parameter used to enable/disable NAPI (polling Rx)
@@ -90,7 +88,7 @@
90#include "s2io.h" 88#include "s2io.h"
91#include "s2io-regs.h" 89#include "s2io-regs.h"
92 90
93#define DRV_VERSION "2.0.26.26" 91#define DRV_VERSION "2.0.26.27"
94 92
95/* S2io Driver name & version. */ 93/* S2io Driver name & version. */
96static char s2io_driver_name[] = "Neterion"; 94static char s2io_driver_name[] = "Neterion";
@@ -496,8 +494,6 @@ S2IO_PARM_INT(rxsync_frequency, 3);
496/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ 494/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
497S2IO_PARM_INT(intr_type, 2); 495S2IO_PARM_INT(intr_type, 2);
498/* Large receive offload feature */ 496/* Large receive offload feature */
499static unsigned int lro_enable = 1;
500module_param_named(lro, lro_enable, uint, 0);
501 497
502/* Max pkts to be aggregated by LRO at one time. If not specified, 498/* Max pkts to be aggregated by LRO at one time. If not specified,
503 * aggregation happens until we hit max IP pkt size(64K) 499 * aggregation happens until we hit max IP pkt size(64K)
@@ -5124,8 +5120,6 @@ static void s2io_set_multicast(struct net_device *dev)
5124 /* Create the new Rx filter list and update the same in H/W. */ 5120 /* Create the new Rx filter list and update the same in H/W. */
5125 i = 0; 5121 i = 0;
5126 netdev_for_each_mc_addr(ha, dev) { 5122 netdev_for_each_mc_addr(ha, dev) {
5127 memcpy(sp->usr_addrs[i].addr, ha->addr,
5128 ETH_ALEN);
5129 mac_addr = 0; 5123 mac_addr = 0;
5130 for (j = 0; j < ETH_ALEN; j++) { 5124 for (j = 0; j < ETH_ALEN; j++) {
5131 mac_addr |= ha->addr[j]; 5125 mac_addr |= ha->addr[j];
@@ -6735,13 +6729,10 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6735 return -EINVAL; 6729 return -EINVAL;
6736 6730
6737 if (data & ETH_FLAG_LRO) { 6731 if (data & ETH_FLAG_LRO) {
6738 if (lro_enable) { 6732 if (!(dev->features & NETIF_F_LRO)) {
6739 if (!(dev->features & NETIF_F_LRO)) { 6733 dev->features |= NETIF_F_LRO;
6740 dev->features |= NETIF_F_LRO; 6734 changed = 1;
6741 changed = 1; 6735 }
6742 }
6743 } else
6744 rc = -EINVAL;
6745 } else if (dev->features & NETIF_F_LRO) { 6736 } else if (dev->features & NETIF_F_LRO) {
6746 dev->features &= ~NETIF_F_LRO; 6737 dev->features &= ~NETIF_F_LRO;
6747 changed = 1; 6738 changed = 1;
@@ -6750,7 +6741,6 @@ static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
6750 if (changed && netif_running(dev)) { 6741 if (changed && netif_running(dev)) {
6751 s2io_stop_all_tx_queue(sp); 6742 s2io_stop_all_tx_queue(sp);
6752 s2io_card_down(sp); 6743 s2io_card_down(sp);
6753 sp->lro = !!(dev->features & NETIF_F_LRO);
6754 rc = s2io_card_up(sp); 6744 rc = s2io_card_up(sp);
6755 if (rc) 6745 if (rc)
6756 s2io_reset(sp); 6746 s2io_reset(sp);
@@ -7307,7 +7297,7 @@ static int s2io_card_up(struct s2io_nic *sp)
7307 struct ring_info *ring = &mac_control->rings[i]; 7297 struct ring_info *ring = &mac_control->rings[i];
7308 7298
7309 ring->mtu = dev->mtu; 7299 ring->mtu = dev->mtu;
7310 ring->lro = sp->lro; 7300 ring->lro = !!(dev->features & NETIF_F_LRO);
7311 ret = fill_rx_buffers(sp, ring, 1); 7301 ret = fill_rx_buffers(sp, ring, 1);
7312 if (ret) { 7302 if (ret) {
7313 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", 7303 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
@@ -7341,7 +7331,7 @@ static int s2io_card_up(struct s2io_nic *sp)
7341 /* Setting its receive mode */ 7331 /* Setting its receive mode */
7342 s2io_set_multicast(dev); 7332 s2io_set_multicast(dev);
7343 7333
7344 if (sp->lro) { 7334 if (dev->features & NETIF_F_LRO) {
7345 /* Initialize max aggregatable pkts per session based on MTU */ 7335 /* Initialize max aggregatable pkts per session based on MTU */
7346 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; 7336 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7347 /* Check if we can use (if specified) user provided value */ 7337 /* Check if we can use (if specified) user provided value */
@@ -7613,10 +7603,10 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7613 * Packet with erroneous checksum, let the 7603 * Packet with erroneous checksum, let the
7614 * upper layers deal with it. 7604 * upper layers deal with it.
7615 */ 7605 */
7616 skb->ip_summed = CHECKSUM_NONE; 7606 skb_checksum_none_assert(skb);
7617 } 7607 }
7618 } else 7608 } else
7619 skb->ip_summed = CHECKSUM_NONE; 7609 skb_checksum_none_assert(skb);
7620 7610
7621 swstats->mem_freed += skb->truesize; 7611 swstats->mem_freed += skb->truesize;
7622send_up: 7612send_up:
@@ -7911,7 +7901,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7911 else 7901 else
7912 sp->device_type = XFRAME_I_DEVICE; 7902 sp->device_type = XFRAME_I_DEVICE;
7913 7903
7914 sp->lro = lro_enable;
7915 7904
7916 /* Initialize some PCI/PCI-X fields of the NIC. */ 7905 /* Initialize some PCI/PCI-X fields of the NIC. */
7917 s2io_init_pci(sp); 7906 s2io_init_pci(sp);
@@ -8047,8 +8036,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8047 dev->netdev_ops = &s2io_netdev_ops; 8036 dev->netdev_ops = &s2io_netdev_ops;
8048 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 8037 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
8049 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 8038 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8050 if (lro_enable) 8039 dev->features |= NETIF_F_LRO;
8051 dev->features |= NETIF_F_LRO;
8052 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 8040 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8053 if (sp->high_dma_flag == true) 8041 if (sp->high_dma_flag == true)
8054 dev->features |= NETIF_F_HIGHDMA; 8042 dev->features |= NETIF_F_HIGHDMA;
@@ -8283,9 +8271,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8283 dev->name); 8271 dev->name);
8284 } 8272 }
8285 8273
8286 if (sp->lro) 8274 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8287 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", 8275 dev->name);
8288 dev->name);
8289 if (ufo) 8276 if (ufo)
8290 DBG_PRINT(ERR_DBG, 8277 DBG_PRINT(ERR_DBG,
8291 "%s: UDP Fragmentation Offload(UFO) enabled\n", 8278 "%s: UDP Fragmentation Offload(UFO) enabled\n",
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 0af033533905..00b8614efe48 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -816,12 +816,6 @@ struct mac_info {
816 struct stat_block *stats_info; /* Logical address of the stat block */ 816 struct stat_block *stats_info; /* Logical address of the stat block */
817}; 817};
818 818
819/* structure representing the user defined MAC addresses */
820struct usr_addr {
821 char addr[ETH_ALEN];
822 int usage_cnt;
823};
824
825/* Default Tunable parameters of the NIC. */ 819/* Default Tunable parameters of the NIC. */
826#define DEFAULT_FIFO_0_LEN 4096 820#define DEFAULT_FIFO_0_LEN 4096
827#define DEFAULT_FIFO_1_7_LEN 512 821#define DEFAULT_FIFO_1_7_LEN 512
@@ -894,9 +888,7 @@ struct s2io_nic {
894#define ALL_MULTI 2 888#define ALL_MULTI 2
895 889
896#define MAX_ADDRS_SUPPORTED 64 890#define MAX_ADDRS_SUPPORTED 64
897 u16 usr_addr_count;
898 u16 mc_addr_count; 891 u16 mc_addr_count;
899 struct usr_addr usr_addrs[256];
900 892
901 u16 m_cast_flg; 893 u16 m_cast_flg;
902 u16 all_multi_pos; 894 u16 all_multi_pos;
@@ -971,7 +963,6 @@ struct s2io_nic {
971 963
972 unsigned long clubbed_frms_cnt; 964 unsigned long clubbed_frms_cnt;
973 unsigned long sending_both; 965 unsigned long sending_both;
974 u8 lro;
975 u16 lro_max_aggr_per_sess; 966 u16 lro_max_aggr_per_sess;
976 volatile unsigned long state; 967 volatile unsigned long state;
977 u64 general_int_mask; 968 u64 general_int_mask;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 8e6bd45b9f31..d8249d7653c6 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -1170,7 +1170,7 @@ again:
1170 sb->ip_summed = CHECKSUM_UNNECESSARY; 1170 sb->ip_summed = CHECKSUM_UNNECESSARY;
1171 /* don't need to set sb->csum */ 1171 /* don't need to set sb->csum */
1172 } else { 1172 } else {
1173 sb->ip_summed = CHECKSUM_NONE; 1173 skb_checksum_none_assert(sb);
1174 } 1174 }
1175 } 1175 }
1176 prefetch(sb->data); 1176 prefetch(sb->data);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8c4067af32b0..31b92f5f32cb 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1251,16 +1251,6 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
1251 return 0; 1251 return 0;
1252} 1252}
1253 1253
1254static void sc92031_ethtool_get_drvinfo(struct net_device *dev,
1255 struct ethtool_drvinfo *drvinfo)
1256{
1257 struct sc92031_priv *priv = netdev_priv(dev);
1258 struct pci_dev *pdev = priv->pdev;
1259
1260 strcpy(drvinfo->driver, SC92031_NAME);
1261 strcpy(drvinfo->bus_info, pci_name(pdev));
1262}
1263
1264static void sc92031_ethtool_get_wol(struct net_device *dev, 1254static void sc92031_ethtool_get_wol(struct net_device *dev,
1265 struct ethtool_wolinfo *wolinfo) 1255 struct ethtool_wolinfo *wolinfo)
1266{ 1256{
@@ -1382,7 +1372,6 @@ static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
1382static const struct ethtool_ops sc92031_ethtool_ops = { 1372static const struct ethtool_ops sc92031_ethtool_ops = {
1383 .get_settings = sc92031_ethtool_get_settings, 1373 .get_settings = sc92031_ethtool_get_settings,
1384 .set_settings = sc92031_ethtool_set_settings, 1374 .set_settings = sc92031_ethtool_set_settings,
1385 .get_drvinfo = sc92031_ethtool_get_drvinfo,
1386 .get_wol = sc92031_ethtool_get_wol, 1375 .get_wol = sc92031_ethtool_get_wol,
1387 .set_wol = sc92031_ethtool_set_wol, 1376 .set_wol = sc92031_ethtool_set_wol,
1388 .nway_reset = sc92031_ethtool_nway_reset, 1377 .nway_reset = sc92031_ethtool_nway_reset,
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 1047b19c60a5..ab31c7124db1 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,7 +1,8 @@
1sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o \ 1sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
2 falcon_gmac.o falcon_xmac.o mcdi_mac.o \ 2 falcon_xmac.o mcdi_mac.o \
3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ 3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
4 tenxpress.o falcon_boards.o mcdi.o mcdi_phy.o 4 tenxpress.o txc43128_phy.o falcon_boards.o \
5 mcdi.o mcdi_phy.o
5sfc-$(CONFIG_SFC_MTD) += mtd.o 6sfc-$(CONFIG_SFC_MTD) += mtd.o
6 7
7obj-$(CONFIG_SFC) += sfc.o 8obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index ba674c5ca29e..5be71f49a205 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -114,7 +114,7 @@ static struct workqueue_struct *reset_workqueue;
114 * This is only used in MSI-X interrupt mode 114 * This is only used in MSI-X interrupt mode
115 */ 115 */
116static unsigned int separate_tx_channels; 116static unsigned int separate_tx_channels;
117module_param(separate_tx_channels, uint, 0644); 117module_param(separate_tx_channels, uint, 0444);
118MODULE_PARM_DESC(separate_tx_channels, 118MODULE_PARM_DESC(separate_tx_channels,
119 "Use separate channels for TX and RX"); 119 "Use separate channels for TX and RX");
120 120
@@ -124,8 +124,9 @@ MODULE_PARM_DESC(separate_tx_channels,
124static int napi_weight = 64; 124static int napi_weight = 64;
125 125
126/* This is the time (in jiffies) between invocations of the hardware 126/* This is the time (in jiffies) between invocations of the hardware
127 * monitor, which checks for known hardware bugs and resets the 127 * monitor. On Falcon-based NICs, this will:
128 * hardware and driver as necessary. 128 * - Check the on-board hardware monitor;
129 * - Poll the link state and reconfigure the hardware as necessary.
129 */ 130 */
130unsigned int efx_monitor_interval = 1 * HZ; 131unsigned int efx_monitor_interval = 1 * HZ;
131 132
@@ -201,10 +202,13 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
201 * Utility functions and prototypes 202 * Utility functions and prototypes
202 * 203 *
203 *************************************************************************/ 204 *************************************************************************/
204static void efx_remove_channel(struct efx_channel *channel); 205
206static void efx_remove_channels(struct efx_nic *efx);
205static void efx_remove_port(struct efx_nic *efx); 207static void efx_remove_port(struct efx_nic *efx);
206static void efx_fini_napi(struct efx_nic *efx); 208static void efx_fini_napi(struct efx_nic *efx);
207static void efx_fini_channels(struct efx_nic *efx); 209static void efx_fini_struct(struct efx_nic *efx);
210static void efx_start_all(struct efx_nic *efx);
211static void efx_stop_all(struct efx_nic *efx);
208 212
209#define EFX_ASSERT_RESET_SERIALISED(efx) \ 213#define EFX_ASSERT_RESET_SERIALISED(efx) \
210 do { \ 214 do { \
@@ -248,7 +252,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
248 252
249 efx_rx_strategy(channel); 253 efx_rx_strategy(channel);
250 254
251 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 255 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
252 256
253 return spent; 257 return spent;
254} 258}
@@ -334,6 +338,7 @@ void efx_process_channel_now(struct efx_channel *channel)
334{ 338{
335 struct efx_nic *efx = channel->efx; 339 struct efx_nic *efx = channel->efx;
336 340
341 BUG_ON(channel->channel >= efx->n_channels);
337 BUG_ON(!channel->enabled); 342 BUG_ON(!channel->enabled);
338 343
339 /* Disable interrupts and wait for ISRs to complete */ 344 /* Disable interrupts and wait for ISRs to complete */
@@ -347,7 +352,7 @@ void efx_process_channel_now(struct efx_channel *channel)
347 napi_disable(&channel->napi_str); 352 napi_disable(&channel->napi_str);
348 353
349 /* Poll the channel */ 354 /* Poll the channel */
350 efx_process_channel(channel, EFX_EVQ_SIZE); 355 efx_process_channel(channel, channel->eventq_mask + 1);
351 356
352 /* Ack the eventq. This may cause an interrupt to be generated 357 /* Ack the eventq. This may cause an interrupt to be generated
353 * when they are reenabled */ 358 * when they are reenabled */
@@ -364,9 +369,18 @@ void efx_process_channel_now(struct efx_channel *channel)
364 */ 369 */
365static int efx_probe_eventq(struct efx_channel *channel) 370static int efx_probe_eventq(struct efx_channel *channel)
366{ 371{
372 struct efx_nic *efx = channel->efx;
373 unsigned long entries;
374
367 netif_dbg(channel->efx, probe, channel->efx->net_dev, 375 netif_dbg(channel->efx, probe, channel->efx->net_dev,
368 "chan %d create event queue\n", channel->channel); 376 "chan %d create event queue\n", channel->channel);
369 377
378 /* Build an event queue with room for one event per tx and rx buffer,
379 * plus some extra for link state events and MCDI completions. */
380 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
381 EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
382 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
383
370 return efx_nic_probe_eventq(channel); 384 return efx_nic_probe_eventq(channel);
371} 385}
372 386
@@ -403,6 +417,63 @@ static void efx_remove_eventq(struct efx_channel *channel)
403 * 417 *
404 *************************************************************************/ 418 *************************************************************************/
405 419
420/* Allocate and initialise a channel structure, optionally copying
421 * parameters (but not resources) from an old channel structure. */
422static struct efx_channel *
423efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
424{
425 struct efx_channel *channel;
426 struct efx_rx_queue *rx_queue;
427 struct efx_tx_queue *tx_queue;
428 int j;
429
430 if (old_channel) {
431 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
432 if (!channel)
433 return NULL;
434
435 *channel = *old_channel;
436
437 memset(&channel->eventq, 0, sizeof(channel->eventq));
438
439 rx_queue = &channel->rx_queue;
440 rx_queue->buffer = NULL;
441 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
442
443 for (j = 0; j < EFX_TXQ_TYPES; j++) {
444 tx_queue = &channel->tx_queue[j];
445 if (tx_queue->channel)
446 tx_queue->channel = channel;
447 tx_queue->buffer = NULL;
448 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
449 }
450 } else {
451 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
452 if (!channel)
453 return NULL;
454
455 channel->efx = efx;
456 channel->channel = i;
457
458 for (j = 0; j < EFX_TXQ_TYPES; j++) {
459 tx_queue = &channel->tx_queue[j];
460 tx_queue->efx = efx;
461 tx_queue->queue = i * EFX_TXQ_TYPES + j;
462 tx_queue->channel = channel;
463 }
464 }
465
466 spin_lock_init(&channel->tx_stop_lock);
467 atomic_set(&channel->tx_stop_count, 1);
468
469 rx_queue = &channel->rx_queue;
470 rx_queue->efx = efx;
471 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
472 (unsigned long)rx_queue);
473
474 return channel;
475}
476
406static int efx_probe_channel(struct efx_channel *channel) 477static int efx_probe_channel(struct efx_channel *channel)
407{ 478{
408 struct efx_tx_queue *tx_queue; 479 struct efx_tx_queue *tx_queue;
@@ -459,11 +530,38 @@ static void efx_set_channel_names(struct efx_nic *efx)
459 number -= efx->n_rx_channels; 530 number -= efx->n_rx_channels;
460 } 531 }
461 } 532 }
462 snprintf(channel->name, sizeof(channel->name), 533 snprintf(efx->channel_name[channel->channel],
534 sizeof(efx->channel_name[0]),
463 "%s%s-%d", efx->name, type, number); 535 "%s%s-%d", efx->name, type, number);
464 } 536 }
465} 537}
466 538
539static int efx_probe_channels(struct efx_nic *efx)
540{
541 struct efx_channel *channel;
542 int rc;
543
544 /* Restart special buffer allocation */
545 efx->next_buffer_table = 0;
546
547 efx_for_each_channel(channel, efx) {
548 rc = efx_probe_channel(channel);
549 if (rc) {
550 netif_err(efx, probe, efx->net_dev,
551 "failed to create channel %d\n",
552 channel->channel);
553 goto fail;
554 }
555 }
556 efx_set_channel_names(efx);
557
558 return 0;
559
560fail:
561 efx_remove_channels(efx);
562 return rc;
563}
564
467/* Channels are shutdown and reinitialised whilst the NIC is running 565/* Channels are shutdown and reinitialised whilst the NIC is running
468 * to propagate configuration changes (mtu, checksum offload), or 566 * to propagate configuration changes (mtu, checksum offload), or
469 * to clear hardware error conditions 567 * to clear hardware error conditions
@@ -601,6 +699,75 @@ static void efx_remove_channel(struct efx_channel *channel)
601 efx_remove_eventq(channel); 699 efx_remove_eventq(channel);
602} 700}
603 701
702static void efx_remove_channels(struct efx_nic *efx)
703{
704 struct efx_channel *channel;
705
706 efx_for_each_channel(channel, efx)
707 efx_remove_channel(channel);
708}
709
710int
711efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
712{
713 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
714 u32 old_rxq_entries, old_txq_entries;
715 unsigned i;
716 int rc;
717
718 efx_stop_all(efx);
719 efx_fini_channels(efx);
720
721 /* Clone channels */
722 memset(other_channel, 0, sizeof(other_channel));
723 for (i = 0; i < efx->n_channels; i++) {
724 channel = efx_alloc_channel(efx, i, efx->channel[i]);
725 if (!channel) {
726 rc = -ENOMEM;
727 goto out;
728 }
729 other_channel[i] = channel;
730 }
731
732 /* Swap entry counts and channel pointers */
733 old_rxq_entries = efx->rxq_entries;
734 old_txq_entries = efx->txq_entries;
735 efx->rxq_entries = rxq_entries;
736 efx->txq_entries = txq_entries;
737 for (i = 0; i < efx->n_channels; i++) {
738 channel = efx->channel[i];
739 efx->channel[i] = other_channel[i];
740 other_channel[i] = channel;
741 }
742
743 rc = efx_probe_channels(efx);
744 if (rc)
745 goto rollback;
746
747 /* Destroy old channels */
748 for (i = 0; i < efx->n_channels; i++)
749 efx_remove_channel(other_channel[i]);
750out:
751 /* Free unused channel structures */
752 for (i = 0; i < efx->n_channels; i++)
753 kfree(other_channel[i]);
754
755 efx_init_channels(efx);
756 efx_start_all(efx);
757 return rc;
758
759rollback:
760 /* Swap back */
761 efx->rxq_entries = old_rxq_entries;
762 efx->txq_entries = old_txq_entries;
763 for (i = 0; i < efx->n_channels; i++) {
764 channel = efx->channel[i];
765 efx->channel[i] = other_channel[i];
766 other_channel[i] = channel;
767 }
768 goto out;
769}
770
604void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) 771void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
605{ 772{
606 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); 773 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
@@ -761,7 +928,7 @@ static int efx_probe_port(struct efx_nic *efx)
761 /* Connect up MAC/PHY operations table */ 928 /* Connect up MAC/PHY operations table */
762 rc = efx->type->probe_port(efx); 929 rc = efx->type->probe_port(efx);
763 if (rc) 930 if (rc)
764 goto err; 931 return rc;
765 932
766 /* Sanity check MAC address */ 933 /* Sanity check MAC address */
767 if (is_valid_ether_addr(efx->mac_address)) { 934 if (is_valid_ether_addr(efx->mac_address)) {
@@ -782,7 +949,7 @@ static int efx_probe_port(struct efx_nic *efx)
782 return 0; 949 return 0;
783 950
784 err: 951 err:
785 efx_remove_port(efx); 952 efx->type->remove_port(efx);
786 return rc; 953 return rc;
787} 954}
788 955
@@ -1050,7 +1217,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1050 efx->n_rx_channels = efx->n_channels; 1217 efx->n_rx_channels = efx->n_channels;
1051 } 1218 }
1052 for (i = 0; i < n_channels; i++) 1219 for (i = 0; i < n_channels; i++)
1053 efx->channel[i].irq = xentries[i].vector; 1220 efx_get_channel(efx, i)->irq =
1221 xentries[i].vector;
1054 } else { 1222 } else {
1055 /* Fall back to single channel MSI */ 1223 /* Fall back to single channel MSI */
1056 efx->interrupt_mode = EFX_INT_MODE_MSI; 1224 efx->interrupt_mode = EFX_INT_MODE_MSI;
@@ -1066,7 +1234,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1066 efx->n_tx_channels = 1; 1234 efx->n_tx_channels = 1;
1067 rc = pci_enable_msi(efx->pci_dev); 1235 rc = pci_enable_msi(efx->pci_dev);
1068 if (rc == 0) { 1236 if (rc == 0) {
1069 efx->channel[0].irq = efx->pci_dev->irq; 1237 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1070 } else { 1238 } else {
1071 netif_err(efx, drv, efx->net_dev, 1239 netif_err(efx, drv, efx->net_dev,
1072 "could not enable MSI\n"); 1240 "could not enable MSI\n");
@@ -1097,26 +1265,32 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1097 efx->legacy_irq = 0; 1265 efx->legacy_irq = 0;
1098} 1266}
1099 1267
1268struct efx_tx_queue *
1269efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
1270{
1271 unsigned tx_channel_offset =
1272 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1273 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
1274 type >= EFX_TXQ_TYPES);
1275 return &efx->channel[tx_channel_offset + index]->tx_queue[type];
1276}
1277
1100static void efx_set_channels(struct efx_nic *efx) 1278static void efx_set_channels(struct efx_nic *efx)
1101{ 1279{
1102 struct efx_channel *channel; 1280 struct efx_channel *channel;
1103 struct efx_tx_queue *tx_queue; 1281 struct efx_tx_queue *tx_queue;
1104 struct efx_rx_queue *rx_queue;
1105 unsigned tx_channel_offset = 1282 unsigned tx_channel_offset =
1106 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; 1283 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1107 1284
1285 /* Channel pointers were set in efx_init_struct() but we now
1286 * need to clear them for TX queues in any RX-only channels. */
1108 efx_for_each_channel(channel, efx) { 1287 efx_for_each_channel(channel, efx) {
1109 if (channel->channel - tx_channel_offset < efx->n_tx_channels) { 1288 if (channel->channel - tx_channel_offset >=
1110 channel->tx_queue = &efx->tx_queue[ 1289 efx->n_tx_channels) {
1111 (channel->channel - tx_channel_offset) *
1112 EFX_TXQ_TYPES];
1113 efx_for_each_channel_tx_queue(tx_queue, channel) 1290 efx_for_each_channel_tx_queue(tx_queue, channel)
1114 tx_queue->channel = channel; 1291 tx_queue->channel = NULL;
1115 } 1292 }
1116 } 1293 }
1117
1118 efx_for_each_rx_queue(rx_queue, efx)
1119 rx_queue->channel = &efx->channel[rx_queue->queue];
1120} 1294}
1121 1295
1122static int efx_probe_nic(struct efx_nic *efx) 1296static int efx_probe_nic(struct efx_nic *efx)
@@ -1165,40 +1339,37 @@ static void efx_remove_nic(struct efx_nic *efx)
1165 1339
1166static int efx_probe_all(struct efx_nic *efx) 1340static int efx_probe_all(struct efx_nic *efx)
1167{ 1341{
1168 struct efx_channel *channel;
1169 int rc; 1342 int rc;
1170 1343
1171 /* Create NIC */
1172 rc = efx_probe_nic(efx); 1344 rc = efx_probe_nic(efx);
1173 if (rc) { 1345 if (rc) {
1174 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); 1346 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1175 goto fail1; 1347 goto fail1;
1176 } 1348 }
1177 1349
1178 /* Create port */
1179 rc = efx_probe_port(efx); 1350 rc = efx_probe_port(efx);
1180 if (rc) { 1351 if (rc) {
1181 netif_err(efx, probe, efx->net_dev, "failed to create port\n"); 1352 netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1182 goto fail2; 1353 goto fail2;
1183 } 1354 }
1184 1355
1185 /* Create channels */ 1356 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1186 efx_for_each_channel(channel, efx) { 1357 rc = efx_probe_channels(efx);
1187 rc = efx_probe_channel(channel); 1358 if (rc)
1188 if (rc) { 1359 goto fail3;
1189 netif_err(efx, probe, efx->net_dev, 1360
1190 "failed to create channel %d\n", 1361 rc = efx_probe_filters(efx);
1191 channel->channel); 1362 if (rc) {
1192 goto fail3; 1363 netif_err(efx, probe, efx->net_dev,
1193 } 1364 "failed to create filter tables\n");
1365 goto fail4;
1194 } 1366 }
1195 efx_set_channel_names(efx);
1196 1367
1197 return 0; 1368 return 0;
1198 1369
1370 fail4:
1371 efx_remove_channels(efx);
1199 fail3: 1372 fail3:
1200 efx_for_each_channel(channel, efx)
1201 efx_remove_channel(channel);
1202 efx_remove_port(efx); 1373 efx_remove_port(efx);
1203 fail2: 1374 fail2:
1204 efx_remove_nic(efx); 1375 efx_remove_nic(efx);
@@ -1328,10 +1499,8 @@ static void efx_stop_all(struct efx_nic *efx)
1328 1499
1329static void efx_remove_all(struct efx_nic *efx) 1500static void efx_remove_all(struct efx_nic *efx)
1330{ 1501{
1331 struct efx_channel *channel; 1502 efx_remove_filters(efx);
1332 1503 efx_remove_channels(efx);
1333 efx_for_each_channel(channel, efx)
1334 efx_remove_channel(channel);
1335 efx_remove_port(efx); 1504 efx_remove_port(efx);
1336 efx_remove_nic(efx); 1505 efx_remove_nic(efx);
1337} 1506}
@@ -1355,20 +1524,20 @@ static unsigned irq_mod_ticks(int usecs, int resolution)
1355void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, 1524void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1356 bool rx_adaptive) 1525 bool rx_adaptive)
1357{ 1526{
1358 struct efx_tx_queue *tx_queue; 1527 struct efx_channel *channel;
1359 struct efx_rx_queue *rx_queue;
1360 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); 1528 unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
1361 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); 1529 unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
1362 1530
1363 EFX_ASSERT_RESET_SERIALISED(efx); 1531 EFX_ASSERT_RESET_SERIALISED(efx);
1364 1532
1365 efx_for_each_tx_queue(tx_queue, efx)
1366 tx_queue->channel->irq_moderation = tx_ticks;
1367
1368 efx->irq_rx_adaptive = rx_adaptive; 1533 efx->irq_rx_adaptive = rx_adaptive;
1369 efx->irq_rx_moderation = rx_ticks; 1534 efx->irq_rx_moderation = rx_ticks;
1370 efx_for_each_rx_queue(rx_queue, efx) 1535 efx_for_each_channel(channel, efx) {
1371 rx_queue->channel->irq_moderation = rx_ticks; 1536 if (efx_channel_get_rx_queue(channel))
1537 channel->irq_moderation = rx_ticks;
1538 else if (efx_channel_get_tx_queue(channel, 0))
1539 channel->irq_moderation = tx_ticks;
1540 }
1372} 1541}
1373 1542
1374/************************************************************************** 1543/**************************************************************************
@@ -1377,8 +1546,7 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1377 * 1546 *
1378 **************************************************************************/ 1547 **************************************************************************/
1379 1548
1380/* Run periodically off the general workqueue. Serialised against 1549/* Run periodically off the general workqueue */
1381 * efx_reconfigure_port via the mac_lock */
1382static void efx_monitor(struct work_struct *data) 1550static void efx_monitor(struct work_struct *data)
1383{ 1551{
1384 struct efx_nic *efx = container_of(data, struct efx_nic, 1552 struct efx_nic *efx = container_of(data, struct efx_nic,
@@ -1391,16 +1559,13 @@ static void efx_monitor(struct work_struct *data)
1391 1559
1392 /* If the mac_lock is already held then it is likely a port 1560 /* If the mac_lock is already held then it is likely a port
1393 * reconfiguration is already in place, which will likely do 1561 * reconfiguration is already in place, which will likely do
1394 * most of the work of check_hw() anyway. */ 1562 * most of the work of monitor() anyway. */
1395 if (!mutex_trylock(&efx->mac_lock)) 1563 if (mutex_trylock(&efx->mac_lock)) {
1396 goto out_requeue; 1564 if (efx->port_enabled)
1397 if (!efx->port_enabled) 1565 efx->type->monitor(efx);
1398 goto out_unlock; 1566 mutex_unlock(&efx->mac_lock);
1399 efx->type->monitor(efx); 1567 }
1400 1568
1401out_unlock:
1402 mutex_unlock(&efx->mac_lock);
1403out_requeue:
1404 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1569 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1405 efx_monitor_interval); 1570 efx_monitor_interval);
1406} 1571}
@@ -1546,11 +1711,11 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struc
1546 stats->tx_packets = mac_stats->tx_packets; 1711 stats->tx_packets = mac_stats->tx_packets;
1547 stats->rx_bytes = mac_stats->rx_bytes; 1712 stats->rx_bytes = mac_stats->rx_bytes;
1548 stats->tx_bytes = mac_stats->tx_bytes; 1713 stats->tx_bytes = mac_stats->tx_bytes;
1714 stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
1549 stats->multicast = mac_stats->rx_multicast; 1715 stats->multicast = mac_stats->rx_multicast;
1550 stats->collisions = mac_stats->tx_collision; 1716 stats->collisions = mac_stats->tx_collision;
1551 stats->rx_length_errors = (mac_stats->rx_gtjumbo + 1717 stats->rx_length_errors = (mac_stats->rx_gtjumbo +
1552 mac_stats->rx_length_error); 1718 mac_stats->rx_length_error);
1553 stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
1554 stats->rx_crc_errors = mac_stats->rx_bad; 1719 stats->rx_crc_errors = mac_stats->rx_bad;
1555 stats->rx_frame_errors = mac_stats->rx_align_error; 1720 stats->rx_frame_errors = mac_stats->rx_align_error;
1556 stats->rx_fifo_errors = mac_stats->rx_overflow; 1721 stats->rx_fifo_errors = mac_stats->rx_overflow;
@@ -1767,6 +1932,7 @@ fail_registered:
1767 1932
1768static void efx_unregister_netdev(struct efx_nic *efx) 1933static void efx_unregister_netdev(struct efx_nic *efx)
1769{ 1934{
1935 struct efx_channel *channel;
1770 struct efx_tx_queue *tx_queue; 1936 struct efx_tx_queue *tx_queue;
1771 1937
1772 if (!efx->net_dev) 1938 if (!efx->net_dev)
@@ -1777,8 +1943,10 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1777 /* Free up any skbs still remaining. This has to happen before 1943 /* Free up any skbs still remaining. This has to happen before
1778 * we try to unregister the netdev as running their destructors 1944 * we try to unregister the netdev as running their destructors
1779 * may be needed to get the device ref. count to 0. */ 1945 * may be needed to get the device ref. count to 0. */
1780 efx_for_each_tx_queue(tx_queue, efx) 1946 efx_for_each_channel(channel, efx) {
1781 efx_release_tx_buffers(tx_queue); 1947 efx_for_each_channel_tx_queue(tx_queue, channel)
1948 efx_release_tx_buffers(tx_queue);
1949 }
1782 1950
1783 if (efx_dev_registered(efx)) { 1951 if (efx_dev_registered(efx)) {
1784 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1952 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
@@ -1841,6 +2009,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
1841 efx->mac_op->reconfigure(efx); 2009 efx->mac_op->reconfigure(efx);
1842 2010
1843 efx_init_channels(efx); 2011 efx_init_channels(efx);
2012 efx_restore_filters(efx);
1844 2013
1845 mutex_unlock(&efx->spi_lock); 2014 mutex_unlock(&efx->spi_lock);
1846 mutex_unlock(&efx->mac_lock); 2015 mutex_unlock(&efx->mac_lock);
@@ -2037,9 +2206,6 @@ static struct efx_phy_operations efx_dummy_phy_operations = {
2037static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, 2206static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2038 struct pci_dev *pci_dev, struct net_device *net_dev) 2207 struct pci_dev *pci_dev, struct net_device *net_dev)
2039{ 2208{
2040 struct efx_channel *channel;
2041 struct efx_tx_queue *tx_queue;
2042 struct efx_rx_queue *rx_queue;
2043 int i; 2209 int i;
2044 2210
2045 /* Initialise common structures */ 2211 /* Initialise common structures */
@@ -2068,36 +2234,13 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2068 INIT_WORK(&efx->mac_work, efx_mac_work); 2234 INIT_WORK(&efx->mac_work, efx_mac_work);
2069 2235
2070 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2236 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2071 channel = &efx->channel[i]; 2237 efx->channel[i] = efx_alloc_channel(efx, i, NULL);
2072 channel->efx = efx; 2238 if (!efx->channel[i])
2073 channel->channel = i; 2239 goto fail;
2074 channel->work_pending = false;
2075 spin_lock_init(&channel->tx_stop_lock);
2076 atomic_set(&channel->tx_stop_count, 1);
2077 }
2078 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
2079 tx_queue = &efx->tx_queue[i];
2080 tx_queue->efx = efx;
2081 tx_queue->queue = i;
2082 tx_queue->buffer = NULL;
2083 tx_queue->channel = &efx->channel[0]; /* for safety */
2084 tx_queue->tso_headers_free = NULL;
2085 }
2086 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
2087 rx_queue = &efx->rx_queue[i];
2088 rx_queue->efx = efx;
2089 rx_queue->queue = i;
2090 rx_queue->channel = &efx->channel[0]; /* for safety */
2091 rx_queue->buffer = NULL;
2092 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
2093 (unsigned long)rx_queue);
2094 } 2240 }
2095 2241
2096 efx->type = type; 2242 efx->type = type;
2097 2243
2098 /* As close as we can get to guaranteeing that we don't overflow */
2099 BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
2100
2101 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 2244 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
2102 2245
2103 /* Higher numbered interrupt modes are less capable! */ 2246 /* Higher numbered interrupt modes are less capable! */
@@ -2109,13 +2252,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2109 pci_name(pci_dev)); 2252 pci_name(pci_dev));
2110 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); 2253 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2111 if (!efx->workqueue) 2254 if (!efx->workqueue)
2112 return -ENOMEM; 2255 goto fail;
2113 2256
2114 return 0; 2257 return 0;
2258
2259fail:
2260 efx_fini_struct(efx);
2261 return -ENOMEM;
2115} 2262}
2116 2263
2117static void efx_fini_struct(struct efx_nic *efx) 2264static void efx_fini_struct(struct efx_nic *efx)
2118{ 2265{
2266 int i;
2267
2268 for (i = 0; i < EFX_MAX_CHANNELS; i++)
2269 kfree(efx->channel[i]);
2270
2119 if (efx->workqueue) { 2271 if (efx->workqueue) {
2120 destroy_workqueue(efx->workqueue); 2272 destroy_workqueue(efx->workqueue);
2121 efx->workqueue = NULL; 2273 efx->workqueue = NULL;
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 060dc952a0fd..f502b14eb22c 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -12,6 +12,7 @@
12#define EFX_EFX_H 12#define EFX_EFX_H
13 13
14#include "net_driver.h" 14#include "net_driver.h"
15#include "filter.h"
15 16
16/* PCI IDs */ 17/* PCI IDs */
17#define EFX_VENDID_SFC 0x1924 18#define EFX_VENDID_SFC 0x1924
@@ -37,8 +38,6 @@ efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 38extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
38extern void efx_stop_queue(struct efx_channel *channel); 39extern void efx_stop_queue(struct efx_channel *channel);
39extern void efx_wake_queue(struct efx_channel *channel); 40extern void efx_wake_queue(struct efx_channel *channel);
40#define EFX_TXQ_SIZE 1024
41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
42 41
43/* RX */ 42/* RX */
44extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 43extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -53,13 +52,36 @@ extern void __efx_rx_packet(struct efx_channel *channel,
53extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 52extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
54 unsigned int len, bool checksummed, bool discard); 53 unsigned int len, bool checksummed, bool discard);
55extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); 54extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
56#define EFX_RXQ_SIZE 1024 55
57#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1) 56#define EFX_MAX_DMAQ_SIZE 4096UL
57#define EFX_DEFAULT_DMAQ_SIZE 1024UL
58#define EFX_MIN_DMAQ_SIZE 512UL
59
60#define EFX_MAX_EVQ_SIZE 16384UL
61#define EFX_MIN_EVQ_SIZE 512UL
62
63/* The smallest [rt]xq_entries that the driver supports. Callers of
64 * efx_wake_queue() assume that they can subsequently send at least one
65 * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
66#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
67
68/* Filters */
69extern int efx_probe_filters(struct efx_nic *efx);
70extern void efx_restore_filters(struct efx_nic *efx);
71extern void efx_remove_filters(struct efx_nic *efx);
72extern int efx_filter_insert_filter(struct efx_nic *efx,
73 struct efx_filter_spec *spec,
74 bool replace);
75extern int efx_filter_remove_filter(struct efx_nic *efx,
76 struct efx_filter_spec *spec);
77extern void efx_filter_table_clear(struct efx_nic *efx,
78 enum efx_filter_table_id table_id,
79 enum efx_filter_priority priority);
58 80
59/* Channels */ 81/* Channels */
60extern void efx_process_channel_now(struct efx_channel *channel); 82extern void efx_process_channel_now(struct efx_channel *channel);
61#define EFX_EVQ_SIZE 4096 83extern int
62#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1) 84efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
63 85
64/* Ports */ 86/* Ports */
65extern int efx_reconfigure_port(struct efx_nic *efx); 87extern int efx_reconfigure_port(struct efx_nic *efx);
@@ -81,8 +103,6 @@ extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
81extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); 103extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
82extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, 104extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
83 int rx_usecs, bool rx_adaptive); 105 int rx_usecs, bool rx_adaptive);
84extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
85extern void efx_hex_dump(const u8 *, unsigned int, const char *);
86 106
87/* Dummy PHY ops for PHY drivers */ 107/* Dummy PHY ops for PHY drivers */
88extern int efx_port_dummy_op_int(struct efx_nic *efx); 108extern int efx_port_dummy_op_int(struct efx_nic *efx);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index fd19d6ab97a2..c95328fa3ee8 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -15,6 +15,7 @@
15#include "workarounds.h" 15#include "workarounds.h"
16#include "selftest.h" 16#include "selftest.h"
17#include "efx.h" 17#include "efx.h"
18#include "filter.h"
18#include "nic.h" 19#include "nic.h"
19#include "spi.h" 20#include "spi.h"
20#include "mdio_10g.h" 21#include "mdio_10g.h"
@@ -328,9 +329,10 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
328 unsigned int test_index, 329 unsigned int test_index,
329 struct ethtool_string *strings, u64 *data) 330 struct ethtool_string *strings, u64 *data)
330{ 331{
332 struct efx_channel *channel = efx_get_channel(efx, 0);
331 struct efx_tx_queue *tx_queue; 333 struct efx_tx_queue *tx_queue;
332 334
333 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) { 335 efx_for_each_channel_tx_queue(tx_queue, channel) {
334 efx_fill_test(test_index++, strings, data, 336 efx_fill_test(test_index++, strings, data,
335 &lb_tests->tx_sent[tx_queue->queue], 337 &lb_tests->tx_sent[tx_queue->queue],
336 EFX_TX_QUEUE_NAME(tx_queue), 338 EFX_TX_QUEUE_NAME(tx_queue),
@@ -550,9 +552,22 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
550static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data) 552static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
551{ 553{
552 struct efx_nic *efx = netdev_priv(net_dev); 554 struct efx_nic *efx = netdev_priv(net_dev);
553 u32 supported = efx->type->offload_features & ETH_FLAG_RXHASH; 555 u32 supported = (efx->type->offload_features &
556 (ETH_FLAG_RXHASH | ETH_FLAG_NTUPLE));
557 int rc;
558
559 rc = ethtool_op_set_flags(net_dev, data, supported);
560 if (rc)
561 return rc;
554 562
555 return ethtool_op_set_flags(net_dev, data, supported); 563 if (!(data & ETH_FLAG_NTUPLE)) {
564 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP,
565 EFX_FILTER_PRI_MANUAL);
566 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC,
567 EFX_FILTER_PRI_MANUAL);
568 }
569
570 return 0;
556} 571}
557 572
558static void efx_ethtool_self_test(struct net_device *net_dev, 573static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -673,15 +688,15 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
673 struct ethtool_coalesce *coalesce) 688 struct ethtool_coalesce *coalesce)
674{ 689{
675 struct efx_nic *efx = netdev_priv(net_dev); 690 struct efx_nic *efx = netdev_priv(net_dev);
676 struct efx_tx_queue *tx_queue;
677 struct efx_channel *channel; 691 struct efx_channel *channel;
678 692
679 memset(coalesce, 0, sizeof(*coalesce)); 693 memset(coalesce, 0, sizeof(*coalesce));
680 694
681 /* Find lowest IRQ moderation across all used TX queues */ 695 /* Find lowest IRQ moderation across all used TX queues */
682 coalesce->tx_coalesce_usecs_irq = ~((u32) 0); 696 coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
683 efx_for_each_tx_queue(tx_queue, efx) { 697 efx_for_each_channel(channel, efx) {
684 channel = tx_queue->channel; 698 if (!efx_channel_get_tx_queue(channel, 0))
699 continue;
685 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 700 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
686 if (channel->channel < efx->n_rx_channels) 701 if (channel->channel < efx->n_rx_channels)
687 coalesce->tx_coalesce_usecs_irq = 702 coalesce->tx_coalesce_usecs_irq =
@@ -708,7 +723,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
708{ 723{
709 struct efx_nic *efx = netdev_priv(net_dev); 724 struct efx_nic *efx = netdev_priv(net_dev);
710 struct efx_channel *channel; 725 struct efx_channel *channel;
711 struct efx_tx_queue *tx_queue;
712 unsigned tx_usecs, rx_usecs, adaptive; 726 unsigned tx_usecs, rx_usecs, adaptive;
713 727
714 if (coalesce->use_adaptive_tx_coalesce) 728 if (coalesce->use_adaptive_tx_coalesce)
@@ -725,8 +739,9 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
725 adaptive = coalesce->use_adaptive_rx_coalesce; 739 adaptive = coalesce->use_adaptive_rx_coalesce;
726 740
727 /* If the channel is shared only allow RX parameters to be set */ 741 /* If the channel is shared only allow RX parameters to be set */
728 efx_for_each_tx_queue(tx_queue, efx) { 742 efx_for_each_channel(channel, efx) {
729 if ((tx_queue->channel->channel < efx->n_rx_channels) && 743 if (efx_channel_get_rx_queue(channel) &&
744 efx_channel_get_tx_queue(channel, 0) &&
730 tx_usecs) { 745 tx_usecs) {
731 netif_err(efx, drv, efx->net_dev, "Channel is shared. " 746 netif_err(efx, drv, efx->net_dev, "Channel is shared. "
732 "Only RX coalescing may be set\n"); 747 "Only RX coalescing may be set\n");
@@ -741,6 +756,42 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
741 return 0; 756 return 0;
742} 757}
743 758
759static void efx_ethtool_get_ringparam(struct net_device *net_dev,
760 struct ethtool_ringparam *ring)
761{
762 struct efx_nic *efx = netdev_priv(net_dev);
763
764 ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
765 ring->tx_max_pending = EFX_MAX_DMAQ_SIZE;
766 ring->rx_mini_max_pending = 0;
767 ring->rx_jumbo_max_pending = 0;
768 ring->rx_pending = efx->rxq_entries;
769 ring->tx_pending = efx->txq_entries;
770 ring->rx_mini_pending = 0;
771 ring->rx_jumbo_pending = 0;
772}
773
774static int efx_ethtool_set_ringparam(struct net_device *net_dev,
775 struct ethtool_ringparam *ring)
776{
777 struct efx_nic *efx = netdev_priv(net_dev);
778
779 if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
780 ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
781 ring->tx_pending > EFX_MAX_DMAQ_SIZE)
782 return -EINVAL;
783
784 if (ring->rx_pending < EFX_MIN_RING_SIZE ||
785 ring->tx_pending < EFX_MIN_RING_SIZE) {
786 netif_err(efx, drv, efx->net_dev,
787 "TX and RX queues cannot be smaller than %ld\n",
788 EFX_MIN_RING_SIZE);
789 return -EINVAL;
790 }
791
792 return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
793}
794
744static int efx_ethtool_set_pauseparam(struct net_device *net_dev, 795static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
745 struct ethtool_pauseparam *pause) 796 struct ethtool_pauseparam *pause)
746{ 797{
@@ -918,6 +969,105 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
918 } 969 }
919} 970}
920 971
972static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
973 struct ethtool_rx_ntuple *ntuple)
974{
975 struct efx_nic *efx = netdev_priv(net_dev);
976 struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
977 struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
978 struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
979 struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
980 struct efx_filter_spec filter;
981
982 /* Range-check action */
983 if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
984 ntuple->fs.action >= (s32)efx->n_rx_channels)
985 return -EINVAL;
986
987 if (~ntuple->fs.data_mask)
988 return -EINVAL;
989
990 switch (ntuple->fs.flow_type) {
991 case TCP_V4_FLOW:
992 case UDP_V4_FLOW:
993 /* Must match all of destination, */
994 if (ip_mask->ip4dst | ip_mask->pdst)
995 return -EINVAL;
996 /* all or none of source, */
997 if ((ip_mask->ip4src | ip_mask->psrc) &&
998 ((__force u32)~ip_mask->ip4src |
999 (__force u16)~ip_mask->psrc))
1000 return -EINVAL;
1001 /* and nothing else */
1002 if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
1003 return -EINVAL;
1004 break;
1005 case ETHER_FLOW:
1006 /* Must match all of destination, */
1007 if (!is_zero_ether_addr(mac_mask->h_dest))
1008 return -EINVAL;
1009 /* all or none of VID, */
1010 if (ntuple->fs.vlan_tag_mask != 0xf000 &&
1011 ntuple->fs.vlan_tag_mask != 0xffff)
1012 return -EINVAL;
1013 /* and nothing else */
1014 if (!is_broadcast_ether_addr(mac_mask->h_source) ||
1015 mac_mask->h_proto != htons(0xffff))
1016 return -EINVAL;
1017 break;
1018 default:
1019 return -EINVAL;
1020 }
1021
1022 filter.priority = EFX_FILTER_PRI_MANUAL;
1023 filter.flags = 0;
1024
1025 switch (ntuple->fs.flow_type) {
1026 case TCP_V4_FLOW:
1027 if (!ip_mask->ip4src)
1028 efx_filter_set_rx_tcp_full(&filter,
1029 htonl(ip_entry->ip4src),
1030 htons(ip_entry->psrc),
1031 htonl(ip_entry->ip4dst),
1032 htons(ip_entry->pdst));
1033 else
1034 efx_filter_set_rx_tcp_wild(&filter,
1035 htonl(ip_entry->ip4dst),
1036 htons(ip_entry->pdst));
1037 break;
1038 case UDP_V4_FLOW:
1039 if (!ip_mask->ip4src)
1040 efx_filter_set_rx_udp_full(&filter,
1041 htonl(ip_entry->ip4src),
1042 htons(ip_entry->psrc),
1043 htonl(ip_entry->ip4dst),
1044 htons(ip_entry->pdst));
1045 else
1046 efx_filter_set_rx_udp_wild(&filter,
1047 htonl(ip_entry->ip4dst),
1048 htons(ip_entry->pdst));
1049 break;
1050 case ETHER_FLOW:
1051 if (ntuple->fs.vlan_tag_mask == 0xf000)
1052 efx_filter_set_rx_mac_full(&filter,
1053 ntuple->fs.vlan_tag & 0xfff,
1054 mac_entry->h_dest);
1055 else
1056 efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest);
1057 break;
1058 }
1059
1060 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) {
1061 return efx_filter_remove_filter(efx, &filter);
1062 } else {
1063 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
1064 filter.dmaq_id = 0xfff;
1065 else
1066 filter.dmaq_id = ntuple->fs.action;
1067 return efx_filter_insert_filter(efx, &filter, true);
1068 }
1069}
1070
921static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, 1071static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
922 struct ethtool_rxfh_indir *indir) 1072 struct ethtool_rxfh_indir *indir)
923{ 1073{
@@ -971,6 +1121,8 @@ const struct ethtool_ops efx_ethtool_ops = {
971 .set_eeprom = efx_ethtool_set_eeprom, 1121 .set_eeprom = efx_ethtool_set_eeprom,
972 .get_coalesce = efx_ethtool_get_coalesce, 1122 .get_coalesce = efx_ethtool_get_coalesce,
973 .set_coalesce = efx_ethtool_set_coalesce, 1123 .set_coalesce = efx_ethtool_set_coalesce,
1124 .get_ringparam = efx_ethtool_get_ringparam,
1125 .set_ringparam = efx_ethtool_set_ringparam,
974 .get_pauseparam = efx_ethtool_get_pauseparam, 1126 .get_pauseparam = efx_ethtool_get_pauseparam,
975 .set_pauseparam = efx_ethtool_set_pauseparam, 1127 .set_pauseparam = efx_ethtool_set_pauseparam,
976 .get_rx_csum = efx_ethtool_get_rx_csum, 1128 .get_rx_csum = efx_ethtool_get_rx_csum,
@@ -994,6 +1146,7 @@ const struct ethtool_ops efx_ethtool_ops = {
994 .set_wol = efx_ethtool_set_wol, 1146 .set_wol = efx_ethtool_set_wol,
995 .reset = efx_ethtool_reset, 1147 .reset = efx_ethtool_reset,
996 .get_rxnfc = efx_ethtool_get_rxnfc, 1148 .get_rxnfc = efx_ethtool_get_rxnfc,
1149 .set_rx_ntuple = efx_ethtool_set_rx_ntuple,
997 .get_rxfh_indir = efx_ethtool_get_rxfh_indir, 1150 .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
998 .set_rxfh_indir = efx_ethtool_set_rxfh_indir, 1151 .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
999}; 1152};
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 4f9d33f3cca1..267019bb2b15 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -159,7 +159,6 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
159{ 159{
160 struct efx_nic *efx = dev_id; 160 struct efx_nic *efx = dev_id;
161 efx_oword_t *int_ker = efx->irq_status.addr; 161 efx_oword_t *int_ker = efx->irq_status.addr;
162 struct efx_channel *channel;
163 int syserr; 162 int syserr;
164 int queues; 163 int queues;
165 164
@@ -194,15 +193,10 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
194 wmb(); /* Ensure the vector is cleared before interrupt ack */ 193 wmb(); /* Ensure the vector is cleared before interrupt ack */
195 falcon_irq_ack_a1(efx); 194 falcon_irq_ack_a1(efx);
196 195
197 /* Schedule processing of any interrupting queues */ 196 if (queues & 1)
198 channel = &efx->channel[0]; 197 efx_schedule_channel(efx_get_channel(efx, 0));
199 while (queues) { 198 if (queues & 2)
200 if (queues & 0x01) 199 efx_schedule_channel(efx_get_channel(efx, 1));
201 efx_schedule_channel(channel);
202 channel++;
203 queues >>= 1;
204 }
205
206 return IRQ_HANDLED; 200 return IRQ_HANDLED;
207} 201}
208/************************************************************************** 202/**************************************************************************
@@ -452,30 +446,19 @@ static void falcon_reset_macs(struct efx_nic *efx)
452 /* It's not safe to use GLB_CTL_REG to reset the 446 /* It's not safe to use GLB_CTL_REG to reset the
453 * macs, so instead use the internal MAC resets 447 * macs, so instead use the internal MAC resets
454 */ 448 */
455 if (!EFX_IS10G(efx)) { 449 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
456 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1); 450 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
457 efx_writeo(efx, &reg, FR_AB_GM_CFG1); 451
458 udelay(1000); 452 for (count = 0; count < 10000; count++) {
459 453 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
460 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0); 454 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
461 efx_writeo(efx, &reg, FR_AB_GM_CFG1); 455 0)
462 udelay(1000); 456 return;
463 return; 457 udelay(10);
464 } else {
465 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
466 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
467
468 for (count = 0; count < 10000; count++) {
469 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
470 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
471 0)
472 return;
473 udelay(10);
474 }
475
476 netif_err(efx, hw, efx->net_dev,
477 "timed out waiting for XMAC core reset\n");
478 } 458 }
459
460 netif_err(efx, hw, efx->net_dev,
461 "timed out waiting for XMAC core reset\n");
479 } 462 }
480 463
481 /* Mac stats will fail whist the TX fifo is draining */ 464 /* Mac stats will fail whist the TX fifo is draining */
@@ -514,7 +497,6 @@ static void falcon_reset_macs(struct efx_nic *efx)
514 * are re-enabled by the caller */ 497 * are re-enabled by the caller */
515 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); 498 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
516 499
517 /* This can run even when the GMAC is selected */
518 falcon_setup_xaui(efx); 500 falcon_setup_xaui(efx);
519} 501}
520 502
@@ -652,8 +634,6 @@ static void falcon_stats_timer_func(unsigned long context)
652 spin_unlock(&efx->stats_lock); 634 spin_unlock(&efx->stats_lock);
653} 635}
654 636
655static void falcon_switch_mac(struct efx_nic *efx);
656
657static bool falcon_loopback_link_poll(struct efx_nic *efx) 637static bool falcon_loopback_link_poll(struct efx_nic *efx)
658{ 638{
659 struct efx_link_state old_state = efx->link_state; 639 struct efx_link_state old_state = efx->link_state;
@@ -664,11 +644,7 @@ static bool falcon_loopback_link_poll(struct efx_nic *efx)
664 efx->link_state.fd = true; 644 efx->link_state.fd = true;
665 efx->link_state.fc = efx->wanted_fc; 645 efx->link_state.fc = efx->wanted_fc;
666 efx->link_state.up = true; 646 efx->link_state.up = true;
667 647 efx->link_state.speed = 10000;
668 if (efx->loopback_mode == LOOPBACK_GMAC)
669 efx->link_state.speed = 1000;
670 else
671 efx->link_state.speed = 10000;
672 648
673 return !efx_link_state_equal(&efx->link_state, &old_state); 649 return !efx_link_state_equal(&efx->link_state, &old_state);
674} 650}
@@ -691,7 +667,7 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
691 falcon_stop_nic_stats(efx); 667 falcon_stop_nic_stats(efx);
692 falcon_deconfigure_mac_wrapper(efx); 668 falcon_deconfigure_mac_wrapper(efx);
693 669
694 falcon_switch_mac(efx); 670 falcon_reset_macs(efx);
695 671
696 efx->phy_op->reconfigure(efx); 672 efx->phy_op->reconfigure(efx);
697 rc = efx->mac_op->reconfigure(efx); 673 rc = efx->mac_op->reconfigure(efx);
@@ -841,73 +817,23 @@ out:
841 return rc; 817 return rc;
842} 818}
843 819
844static void falcon_clock_mac(struct efx_nic *efx)
845{
846 unsigned strap_val;
847 efx_oword_t nic_stat;
848
849 /* Configure the NIC generated MAC clock correctly */
850 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
851 strap_val = EFX_IS10G(efx) ? 5 : 3;
852 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
853 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
854 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
855 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
856 } else {
857 /* Falcon A1 does not support 1G/10G speed switching
858 * and must not be used with a PHY that does. */
859 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
860 strap_val);
861 }
862}
863
864static void falcon_switch_mac(struct efx_nic *efx)
865{
866 struct efx_mac_operations *old_mac_op = efx->mac_op;
867 struct falcon_nic_data *nic_data = efx->nic_data;
868 unsigned int stats_done_offset;
869
870 WARN_ON(!mutex_is_locked(&efx->mac_lock));
871 WARN_ON(nic_data->stats_disable_count == 0);
872
873 efx->mac_op = (EFX_IS10G(efx) ?
874 &falcon_xmac_operations : &falcon_gmac_operations);
875
876 if (EFX_IS10G(efx))
877 stats_done_offset = XgDmaDone_offset;
878 else
879 stats_done_offset = GDmaDone_offset;
880 nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;
881
882 if (old_mac_op == efx->mac_op)
883 return;
884
885 falcon_clock_mac(efx);
886
887 netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n",
888 EFX_IS10G(efx) ? 'X' : 'G');
889 /* Not all macs support a mac-level link state */
890 efx->xmac_poll_required = false;
891 falcon_reset_macs(efx);
892}
893
894/* This call is responsible for hooking in the MAC and PHY operations */ 820/* This call is responsible for hooking in the MAC and PHY operations */
895static int falcon_probe_port(struct efx_nic *efx) 821static int falcon_probe_port(struct efx_nic *efx)
896{ 822{
823 struct falcon_nic_data *nic_data = efx->nic_data;
897 int rc; 824 int rc;
898 825
899 switch (efx->phy_type) { 826 switch (efx->phy_type) {
900 case PHY_TYPE_SFX7101: 827 case PHY_TYPE_SFX7101:
901 efx->phy_op = &falcon_sfx7101_phy_ops; 828 efx->phy_op = &falcon_sfx7101_phy_ops;
902 break; 829 break;
903 case PHY_TYPE_SFT9001A:
904 case PHY_TYPE_SFT9001B:
905 efx->phy_op = &falcon_sft9001_phy_ops;
906 break;
907 case PHY_TYPE_QT2022C2: 830 case PHY_TYPE_QT2022C2:
908 case PHY_TYPE_QT2025C: 831 case PHY_TYPE_QT2025C:
909 efx->phy_op = &falcon_qt202x_phy_ops; 832 efx->phy_op = &falcon_qt202x_phy_ops;
910 break; 833 break;
834 case PHY_TYPE_TXC43128:
835 efx->phy_op = &falcon_txc_phy_ops;
836 break;
911 default: 837 default:
912 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n", 838 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
913 efx->phy_type); 839 efx->phy_type);
@@ -943,6 +869,7 @@ static int falcon_probe_port(struct efx_nic *efx)
943 (u64)efx->stats_buffer.dma_addr, 869 (u64)efx->stats_buffer.dma_addr,
944 efx->stats_buffer.addr, 870 efx->stats_buffer.addr,
945 (u64)virt_to_phys(efx->stats_buffer.addr)); 871 (u64)virt_to_phys(efx->stats_buffer.addr));
872 nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
946 873
947 return 0; 874 return 0;
948} 875}
@@ -1207,7 +1134,7 @@ static void falcon_monitor(struct efx_nic *efx)
1207 falcon_stop_nic_stats(efx); 1134 falcon_stop_nic_stats(efx);
1208 falcon_deconfigure_mac_wrapper(efx); 1135 falcon_deconfigure_mac_wrapper(efx);
1209 1136
1210 falcon_switch_mac(efx); 1137 falcon_reset_macs(efx);
1211 rc = efx->mac_op->reconfigure(efx); 1138 rc = efx->mac_op->reconfigure(efx);
1212 BUG_ON(rc); 1139 BUG_ON(rc);
1213 1140
@@ -1216,8 +1143,7 @@ static void falcon_monitor(struct efx_nic *efx)
1216 efx_link_status_changed(efx); 1143 efx_link_status_changed(efx);
1217 } 1144 }
1218 1145
1219 if (EFX_IS10G(efx)) 1146 falcon_poll_xmac(efx);
1220 falcon_poll_xmac(efx);
1221} 1147}
1222 1148
1223/* Zeroes out the SRAM contents. This routine must be called in 1149/* Zeroes out the SRAM contents. This routine must be called in
@@ -1610,16 +1536,6 @@ static int falcon_init_nic(struct efx_nic *efx)
1610 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1); 1536 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
1611 efx_writeo(efx, &temp, FR_AB_NIC_STAT); 1537 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
1612 1538
1613 /* Set the source of the GMAC clock */
1614 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
1615 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
1616 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
1617 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
1618 }
1619
1620 /* Select the correct MAC */
1621 falcon_clock_mac(efx);
1622
1623 rc = falcon_reset_sram(efx); 1539 rc = falcon_reset_sram(efx);
1624 if (rc) 1540 if (rc)
1625 return rc; 1541 return rc;
@@ -1880,7 +1796,7 @@ struct efx_nic_type falcon_b0_nic_type = {
1880 * channels */ 1796 * channels */
1881 .tx_dc_base = 0x130000, 1797 .tx_dc_base = 0x130000,
1882 .rx_dc_base = 0x100000, 1798 .rx_dc_base = 0x100000,
1883 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH, 1799 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
1884 .reset_world_flags = ETH_RESET_IRQ, 1800 .reset_world_flags = ETH_RESET_IRQ,
1885}; 1801};
1886 1802
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 3d950c2cf205..cfc6a5b5a477 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -26,7 +26,7 @@
26/* Board types */ 26/* Board types */
27#define FALCON_BOARD_SFE4001 0x01 27#define FALCON_BOARD_SFE4001 0x01
28#define FALCON_BOARD_SFE4002 0x02 28#define FALCON_BOARD_SFE4002 0x02
29#define FALCON_BOARD_SFN4111T 0x51 29#define FALCON_BOARD_SFE4003 0x03
30#define FALCON_BOARD_SFN4112F 0x52 30#define FALCON_BOARD_SFN4112F 0x52
31 31
32/* Board temperature is about 15°C above ambient when air flow is 32/* Board temperature is about 15°C above ambient when air flow is
@@ -142,17 +142,17 @@ static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
142#endif /* CONFIG_SENSORS_LM87 */ 142#endif /* CONFIG_SENSORS_LM87 */
143 143
144/***************************************************************************** 144/*****************************************************************************
145 * Support for the SFE4001 and SFN4111T NICs. 145 * Support for the SFE4001 NIC.
146 * 146 *
147 * The SFE4001 does not power-up fully at reset due to its high power 147 * The SFE4001 does not power-up fully at reset due to its high power
148 * consumption. We control its power via a PCA9539 I/O expander. 148 * consumption. We control its power via a PCA9539 I/O expander.
149 * Both boards have a MAX6647 temperature monitor which we expose to 149 * It also has a MAX6647 temperature monitor which we expose to
150 * the lm90 driver. 150 * the lm90 driver.
151 * 151 *
152 * This also provides minimal support for reflashing the PHY, which is 152 * This also provides minimal support for reflashing the PHY, which is
153 * initiated by resetting it with the FLASH_CFG_1 pin pulled down. 153 * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
154 * On SFE4001 rev A2 and later this is connected to the 3V3X output of 154 * On SFE4001 rev A2 and later this is connected to the 3V3X output of
155 * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3. 155 * the IO-expander.
156 * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually 156 * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
157 * exclusive with the network device being open. 157 * exclusive with the network device being open.
158 */ 158 */
@@ -304,34 +304,6 @@ fail_on:
304 return rc; 304 return rc;
305} 305}
306 306
307static int sfn4111t_reset(struct efx_nic *efx)
308{
309 struct falcon_board *board = falcon_board(efx);
310 efx_oword_t reg;
311
312 /* GPIO 3 and the GPIO register are shared with I2C, so block that */
313 i2c_lock_adapter(&board->i2c_adap);
314
315 /* Pull RST_N (GPIO 2) low then let it up again, setting the
316 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
317 * output enables; the output levels should always be 0 (low)
318 * and we rely on external pull-ups. */
319 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
320 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, true);
321 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
322 msleep(1000);
323 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, false);
324 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN,
325 !!(efx->phy_mode & PHY_MODE_SPECIAL));
326 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
327 msleep(1);
328
329 i2c_unlock_adapter(&board->i2c_adap);
330
331 ssleep(1);
332 return 0;
333}
334
335static ssize_t show_phy_flash_cfg(struct device *dev, 307static ssize_t show_phy_flash_cfg(struct device *dev,
336 struct device_attribute *attr, char *buf) 308 struct device_attribute *attr, char *buf)
337{ 309{
@@ -363,10 +335,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
363 efx->phy_mode = new_mode; 335 efx->phy_mode = new_mode;
364 if (new_mode & PHY_MODE_SPECIAL) 336 if (new_mode & PHY_MODE_SPECIAL)
365 falcon_stop_nic_stats(efx); 337 falcon_stop_nic_stats(efx);
366 if (falcon_board(efx)->type->id == FALCON_BOARD_SFE4001) 338 err = sfe4001_poweron(efx);
367 err = sfe4001_poweron(efx);
368 else
369 err = sfn4111t_reset(efx);
370 if (!err) 339 if (!err)
371 err = efx_reconfigure_port(efx); 340 err = efx_reconfigure_port(efx);
372 if (!(new_mode & PHY_MODE_SPECIAL)) 341 if (!(new_mode & PHY_MODE_SPECIAL))
@@ -479,83 +448,6 @@ fail_hwmon:
479 return rc; 448 return rc;
480} 449}
481 450
482static int sfn4111t_check_hw(struct efx_nic *efx)
483{
484 s32 status;
485
486 /* If XAUI link is up then do not monitor */
487 if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
488 return 0;
489
490 /* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
491 status = i2c_smbus_read_byte_data(falcon_board(efx)->hwmon_client,
492 MAX664X_REG_RSL);
493 if (status < 0)
494 return -EIO;
495 if (status & 0x57)
496 return -ERANGE;
497 return 0;
498}
499
500static void sfn4111t_fini(struct efx_nic *efx)
501{
502 netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
503
504 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
505 i2c_unregister_device(falcon_board(efx)->hwmon_client);
506}
507
508static struct i2c_board_info sfn4111t_a0_hwmon_info = {
509 I2C_BOARD_INFO("max6647", 0x4e),
510};
511
512static struct i2c_board_info sfn4111t_r5_hwmon_info = {
513 I2C_BOARD_INFO("max6646", 0x4d),
514};
515
516static void sfn4111t_init_phy(struct efx_nic *efx)
517{
518 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
519 if (sft9001_wait_boot(efx) != -EINVAL)
520 return;
521
522 efx->phy_mode = PHY_MODE_SPECIAL;
523 falcon_stop_nic_stats(efx);
524 }
525
526 sfn4111t_reset(efx);
527 sft9001_wait_boot(efx);
528}
529
530static int sfn4111t_init(struct efx_nic *efx)
531{
532 struct falcon_board *board = falcon_board(efx);
533 int rc;
534
535 board->hwmon_client =
536 i2c_new_device(&board->i2c_adap,
537 (board->minor < 5) ?
538 &sfn4111t_a0_hwmon_info :
539 &sfn4111t_r5_hwmon_info);
540 if (!board->hwmon_client)
541 return -EIO;
542
543 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
544 if (rc)
545 goto fail_hwmon;
546
547 if (efx->phy_mode & PHY_MODE_SPECIAL)
548 /* PHY may not generate a 156.25 MHz clock and MAC
549 * stats fetch will fail. */
550 falcon_stop_nic_stats(efx);
551
552 return 0;
553
554fail_hwmon:
555 i2c_unregister_device(board->hwmon_client);
556 return rc;
557}
558
559/***************************************************************************** 451/*****************************************************************************
560 * Support for the SFE4002 452 * Support for the SFE4002
561 * 453 *
@@ -691,6 +583,75 @@ static int sfn4112f_init(struct efx_nic *efx)
691 return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs); 583 return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
692} 584}
693 585
586/*****************************************************************************
587 * Support for the SFE4003
588 *
589 */
590static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */
591
592static const u8 sfe4003_lm87_regs[] = {
593 LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */
594 LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
595 LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
596 LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
597 LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
598 LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS),
599 0
600};
601
602static struct i2c_board_info sfe4003_hwmon_info = {
603 I2C_BOARD_INFO("lm87", 0x2e),
604 .platform_data = &sfe4003_lm87_channel,
605};
606
607/* Board-specific LED info. */
608#define SFE4003_RED_LED_GPIO 11
609#define SFE4003_LED_ON 1
610#define SFE4003_LED_OFF 0
611
612static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
613{
614 struct falcon_board *board = falcon_board(efx);
615
616 /* The LEDs were not wired to GPIOs before A3 */
617 if (board->minor < 3 && board->major == 0)
618 return;
619
620 falcon_txc_set_gpio_val(
621 efx, SFE4003_RED_LED_GPIO,
622 (mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
623}
624
625static void sfe4003_init_phy(struct efx_nic *efx)
626{
627 struct falcon_board *board = falcon_board(efx);
628
629 /* The LEDs were not wired to GPIOs before A3 */
630 if (board->minor < 3 && board->major == 0)
631 return;
632
633 falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT);
634 falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
635}
636
637static int sfe4003_check_hw(struct efx_nic *efx)
638{
639 struct falcon_board *board = falcon_board(efx);
640
641 /* A0/A1/A2 board rev. 4003s report a temperature fault the whole time
642 * (bad sensor) so we mask it out. */
643 unsigned alarm_mask =
644 (board->major == 0 && board->minor <= 2) ?
645 ~LM87_ALARM_TEMP_EXT1 : ~0;
646
647 return efx_check_lm87(efx, alarm_mask);
648}
649
650static int sfe4003_init(struct efx_nic *efx)
651{
652 return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
653}
654
694static const struct falcon_board_type board_types[] = { 655static const struct falcon_board_type board_types[] = {
695 { 656 {
696 .id = FALCON_BOARD_SFE4001, 657 .id = FALCON_BOARD_SFE4001,
@@ -713,14 +674,14 @@ static const struct falcon_board_type board_types[] = {
713 .monitor = sfe4002_check_hw, 674 .monitor = sfe4002_check_hw,
714 }, 675 },
715 { 676 {
716 .id = FALCON_BOARD_SFN4111T, 677 .id = FALCON_BOARD_SFE4003,
717 .ref_model = "SFN4111T", 678 .ref_model = "SFE4003",
718 .gen_type = "100/1000/10GBASE-T adapter", 679 .gen_type = "10GBASE-CX4 adapter",
719 .init = sfn4111t_init, 680 .init = sfe4003_init,
720 .init_phy = sfn4111t_init_phy, 681 .init_phy = sfe4003_init_phy,
721 .fini = sfn4111t_fini, 682 .fini = efx_fini_lm87,
722 .set_id_led = tenxpress_set_id_led, 683 .set_id_led = sfe4003_set_id_led,
723 .monitor = sfn4111t_check_hw, 684 .monitor = sfe4003_check_hw,
724 }, 685 },
725 { 686 {
726 .id = FALCON_BOARD_SFN4112F, 687 .id = FALCON_BOARD_SFN4112F,
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c
deleted file mode 100644
index 7dadfcbd6ce7..000000000000
--- a/drivers/net/sfc/falcon_gmac.c
+++ /dev/null
@@ -1,230 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "efx.h"
14#include "nic.h"
15#include "mac.h"
16#include "regs.h"
17#include "io.h"
18
19/**************************************************************************
20 *
21 * MAC operations
22 *
23 *************************************************************************/
24
25static int falcon_reconfigure_gmac(struct efx_nic *efx)
26{
27 struct efx_link_state *link_state = &efx->link_state;
28 bool loopback, tx_fc, rx_fc, bytemode;
29 int if_mode;
30 unsigned int max_frame_len;
31 efx_oword_t reg;
32
33 /* Configuration register 1 */
34 tx_fc = (link_state->fc & EFX_FC_TX) || !link_state->fd;
35 rx_fc = !!(link_state->fc & EFX_FC_RX);
36 loopback = (efx->loopback_mode == LOOPBACK_GMAC);
37 bytemode = (link_state->speed == 1000);
38
39 EFX_POPULATE_OWORD_5(reg,
40 FRF_AB_GM_LOOP, loopback,
41 FRF_AB_GM_TX_EN, 1,
42 FRF_AB_GM_TX_FC_EN, tx_fc,
43 FRF_AB_GM_RX_EN, 1,
44 FRF_AB_GM_RX_FC_EN, rx_fc);
45 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
46 udelay(10);
47
48 /* Configuration register 2 */
49 if_mode = (bytemode) ? 2 : 1;
50 EFX_POPULATE_OWORD_5(reg,
51 FRF_AB_GM_IF_MODE, if_mode,
52 FRF_AB_GM_PAD_CRC_EN, 1,
53 FRF_AB_GM_LEN_CHK, 1,
54 FRF_AB_GM_FD, link_state->fd,
55 FRF_AB_GM_PAMBL_LEN, 0x7/*datasheet recommended */);
56
57 efx_writeo(efx, &reg, FR_AB_GM_CFG2);
58 udelay(10);
59
60 /* Max frame len register */
61 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
62 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_MAX_FLEN, max_frame_len);
63 efx_writeo(efx, &reg, FR_AB_GM_MAX_FLEN);
64 udelay(10);
65
66 /* FIFO configuration register 0 */
67 EFX_POPULATE_OWORD_5(reg,
68 FRF_AB_GMF_FTFENREQ, 1,
69 FRF_AB_GMF_STFENREQ, 1,
70 FRF_AB_GMF_FRFENREQ, 1,
71 FRF_AB_GMF_SRFENREQ, 1,
72 FRF_AB_GMF_WTMENREQ, 1);
73 efx_writeo(efx, &reg, FR_AB_GMF_CFG0);
74 udelay(10);
75
76 /* FIFO configuration register 1 */
77 EFX_POPULATE_OWORD_2(reg,
78 FRF_AB_GMF_CFGFRTH, 0x12,
79 FRF_AB_GMF_CFGXOFFRTX, 0xffff);
80 efx_writeo(efx, &reg, FR_AB_GMF_CFG1);
81 udelay(10);
82
83 /* FIFO configuration register 2 */
84 EFX_POPULATE_OWORD_2(reg,
85 FRF_AB_GMF_CFGHWM, 0x3f,
86 FRF_AB_GMF_CFGLWM, 0xa);
87 efx_writeo(efx, &reg, FR_AB_GMF_CFG2);
88 udelay(10);
89
90 /* FIFO configuration register 3 */
91 EFX_POPULATE_OWORD_2(reg,
92 FRF_AB_GMF_CFGHWMFT, 0x1c,
93 FRF_AB_GMF_CFGFTTH, 0x08);
94 efx_writeo(efx, &reg, FR_AB_GMF_CFG3);
95 udelay(10);
96
97 /* FIFO configuration register 4 */
98 EFX_POPULATE_OWORD_1(reg, FRF_AB_GMF_HSTFLTRFRM_PAUSE, 1);
99 efx_writeo(efx, &reg, FR_AB_GMF_CFG4);
100 udelay(10);
101
102 /* FIFO configuration register 5 */
103 efx_reado(efx, &reg, FR_AB_GMF_CFG5);
104 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGBYTMODE, bytemode);
105 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGHDPLX, !link_state->fd);
106 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTDRPLT64, !link_state->fd);
107 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTFLTRFRMDC_PAUSE, 0);
108 efx_writeo(efx, &reg, FR_AB_GMF_CFG5);
109 udelay(10);
110
111 /* MAC address */
112 EFX_POPULATE_OWORD_4(reg,
113 FRF_AB_GM_ADR_B0, efx->net_dev->dev_addr[5],
114 FRF_AB_GM_ADR_B1, efx->net_dev->dev_addr[4],
115 FRF_AB_GM_ADR_B2, efx->net_dev->dev_addr[3],
116 FRF_AB_GM_ADR_B3, efx->net_dev->dev_addr[2]);
117 efx_writeo(efx, &reg, FR_AB_GM_ADR1);
118 udelay(10);
119 EFX_POPULATE_OWORD_2(reg,
120 FRF_AB_GM_ADR_B4, efx->net_dev->dev_addr[1],
121 FRF_AB_GM_ADR_B5, efx->net_dev->dev_addr[0]);
122 efx_writeo(efx, &reg, FR_AB_GM_ADR2);
123 udelay(10);
124
125 falcon_reconfigure_mac_wrapper(efx);
126
127 return 0;
128}
129
130static void falcon_update_stats_gmac(struct efx_nic *efx)
131{
132 struct efx_mac_stats *mac_stats = &efx->mac_stats;
133 unsigned long old_rx_pause, old_tx_pause;
134 unsigned long new_rx_pause, new_tx_pause;
135
136 /* Pause frames are erroneously counted as errors (SFC bug 3269) */
137 old_rx_pause = mac_stats->rx_pause;
138 old_tx_pause = mac_stats->tx_pause;
139
140 /* Update MAC stats from DMAed values */
141 FALCON_STAT(efx, GRxGoodOct, rx_good_bytes);
142 FALCON_STAT(efx, GRxBadOct, rx_bad_bytes);
143 FALCON_STAT(efx, GRxMissPkt, rx_missed);
144 FALCON_STAT(efx, GRxFalseCRS, rx_false_carrier);
145 FALCON_STAT(efx, GRxPausePkt, rx_pause);
146 FALCON_STAT(efx, GRxBadPkt, rx_bad);
147 FALCON_STAT(efx, GRxUcastPkt, rx_unicast);
148 FALCON_STAT(efx, GRxMcastPkt, rx_multicast);
149 FALCON_STAT(efx, GRxBcastPkt, rx_broadcast);
150 FALCON_STAT(efx, GRxGoodLt64Pkt, rx_good_lt64);
151 FALCON_STAT(efx, GRxBadLt64Pkt, rx_bad_lt64);
152 FALCON_STAT(efx, GRx64Pkt, rx_64);
153 FALCON_STAT(efx, GRx65to127Pkt, rx_65_to_127);
154 FALCON_STAT(efx, GRx128to255Pkt, rx_128_to_255);
155 FALCON_STAT(efx, GRx256to511Pkt, rx_256_to_511);
156 FALCON_STAT(efx, GRx512to1023Pkt, rx_512_to_1023);
157 FALCON_STAT(efx, GRx1024to15xxPkt, rx_1024_to_15xx);
158 FALCON_STAT(efx, GRx15xxtoJumboPkt, rx_15xx_to_jumbo);
159 FALCON_STAT(efx, GRxGtJumboPkt, rx_gtjumbo);
160 FALCON_STAT(efx, GRxFcsErr64to15xxPkt, rx_bad_64_to_15xx);
161 FALCON_STAT(efx, GRxFcsErr15xxtoJumboPkt, rx_bad_15xx_to_jumbo);
162 FALCON_STAT(efx, GRxFcsErrGtJumboPkt, rx_bad_gtjumbo);
163 FALCON_STAT(efx, GTxGoodBadOct, tx_bytes);
164 FALCON_STAT(efx, GTxGoodOct, tx_good_bytes);
165 FALCON_STAT(efx, GTxSglColPkt, tx_single_collision);
166 FALCON_STAT(efx, GTxMultColPkt, tx_multiple_collision);
167 FALCON_STAT(efx, GTxExColPkt, tx_excessive_collision);
168 FALCON_STAT(efx, GTxDefPkt, tx_deferred);
169 FALCON_STAT(efx, GTxLateCol, tx_late_collision);
170 FALCON_STAT(efx, GTxExDefPkt, tx_excessive_deferred);
171 FALCON_STAT(efx, GTxPausePkt, tx_pause);
172 FALCON_STAT(efx, GTxBadPkt, tx_bad);
173 FALCON_STAT(efx, GTxUcastPkt, tx_unicast);
174 FALCON_STAT(efx, GTxMcastPkt, tx_multicast);
175 FALCON_STAT(efx, GTxBcastPkt, tx_broadcast);
176 FALCON_STAT(efx, GTxLt64Pkt, tx_lt64);
177 FALCON_STAT(efx, GTx64Pkt, tx_64);
178 FALCON_STAT(efx, GTx65to127Pkt, tx_65_to_127);
179 FALCON_STAT(efx, GTx128to255Pkt, tx_128_to_255);
180 FALCON_STAT(efx, GTx256to511Pkt, tx_256_to_511);
181 FALCON_STAT(efx, GTx512to1023Pkt, tx_512_to_1023);
182 FALCON_STAT(efx, GTx1024to15xxPkt, tx_1024_to_15xx);
183 FALCON_STAT(efx, GTx15xxtoJumboPkt, tx_15xx_to_jumbo);
184 FALCON_STAT(efx, GTxGtJumboPkt, tx_gtjumbo);
185 FALCON_STAT(efx, GTxNonTcpUdpPkt, tx_non_tcpudp);
186 FALCON_STAT(efx, GTxMacSrcErrPkt, tx_mac_src_error);
187 FALCON_STAT(efx, GTxIpSrcErrPkt, tx_ip_src_error);
188
189 /* Pause frames are erroneously counted as errors (SFC bug 3269) */
190 new_rx_pause = mac_stats->rx_pause;
191 new_tx_pause = mac_stats->tx_pause;
192 mac_stats->rx_bad -= (new_rx_pause - old_rx_pause);
193 mac_stats->tx_bad -= (new_tx_pause - old_tx_pause);
194
195 /* Derive stats that the MAC doesn't provide directly */
196 mac_stats->tx_bad_bytes =
197 mac_stats->tx_bytes - mac_stats->tx_good_bytes;
198 mac_stats->tx_packets =
199 mac_stats->tx_lt64 + mac_stats->tx_64 +
200 mac_stats->tx_65_to_127 + mac_stats->tx_128_to_255 +
201 mac_stats->tx_256_to_511 + mac_stats->tx_512_to_1023 +
202 mac_stats->tx_1024_to_15xx + mac_stats->tx_15xx_to_jumbo +
203 mac_stats->tx_gtjumbo;
204 mac_stats->tx_collision =
205 mac_stats->tx_single_collision +
206 mac_stats->tx_multiple_collision +
207 mac_stats->tx_excessive_collision +
208 mac_stats->tx_late_collision;
209 mac_stats->rx_bytes =
210 mac_stats->rx_good_bytes + mac_stats->rx_bad_bytes;
211 mac_stats->rx_packets =
212 mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64 +
213 mac_stats->rx_64 + mac_stats->rx_65_to_127 +
214 mac_stats->rx_128_to_255 + mac_stats->rx_256_to_511 +
215 mac_stats->rx_512_to_1023 + mac_stats->rx_1024_to_15xx +
216 mac_stats->rx_15xx_to_jumbo + mac_stats->rx_gtjumbo;
217 mac_stats->rx_good = mac_stats->rx_packets - mac_stats->rx_bad;
218 mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64;
219}
220
221static bool falcon_gmac_check_fault(struct efx_nic *efx)
222{
223 return false;
224}
225
226struct efx_mac_operations falcon_gmac_operations = {
227 .reconfigure = falcon_reconfigure_gmac,
228 .update_stats = falcon_update_stats_gmac,
229 .check_fault = falcon_gmac_check_fault,
230};
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
new file mode 100644
index 000000000000..abc884d09d57
--- /dev/null
+++ b/drivers/net/sfc/filter.c
@@ -0,0 +1,445 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "efx.h"
11#include "filter.h"
12#include "io.h"
13#include "nic.h"
14#include "regs.h"
15
16/* "Fudge factors" - difference between programmed value and actual depth.
17 * Due to pipelined implementation we need to program H/W with a value that
18 * is larger than the hop limit we want.
19 */
20#define FILTER_CTL_SRCH_FUDGE_WILD 3
21#define FILTER_CTL_SRCH_FUDGE_FULL 1
22
23struct efx_filter_table {
24 u32 offset; /* address of table relative to BAR */
25 unsigned size; /* number of entries */
26 unsigned step; /* step between entries */
27 unsigned used; /* number currently used */
28 unsigned long *used_bitmap;
29 struct efx_filter_spec *spec;
30};
31
32struct efx_filter_state {
33 spinlock_t lock;
34 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
35 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
36};
37
38/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
39 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
40static u16 efx_filter_hash(u32 key)
41{
42 u16 tmp;
43
44 /* First 16 rounds */
45 tmp = 0x1fff ^ key >> 16;
46 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
47 tmp = tmp ^ tmp >> 9;
48 /* Last 16 rounds */
49 tmp = tmp ^ tmp << 13 ^ key;
50 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
51 return tmp ^ tmp >> 9;
52}
53
54/* To allow for hash collisions, filter search continues at these
55 * increments from the first possible entry selected by the hash. */
56static u16 efx_filter_increment(u32 key)
57{
58 return key * 2 - 1;
59}
60
61static enum efx_filter_table_id
62efx_filter_type_table_id(enum efx_filter_type type)
63{
64 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_FULL >> 2));
65 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_WILD >> 2));
66 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_FULL >> 2));
67 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_WILD >> 2));
68 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_FULL >> 2));
69 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_WILD >> 2));
70 return type >> 2;
71}
72
73static void
74efx_filter_table_reset_search_depth(struct efx_filter_state *state,
75 enum efx_filter_table_id table_id)
76{
77 memset(state->search_depth + (table_id << 2), 0,
78 sizeof(state->search_depth[0]) << 2);
79}
80
81static void efx_filter_push_rx_limits(struct efx_nic *efx)
82{
83 struct efx_filter_state *state = efx->filter_state;
84 efx_oword_t filter_ctl;
85
86 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
87
88 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
89 state->search_depth[EFX_FILTER_RX_TCP_FULL] +
90 FILTER_CTL_SRCH_FUDGE_FULL);
91 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
92 state->search_depth[EFX_FILTER_RX_TCP_WILD] +
93 FILTER_CTL_SRCH_FUDGE_WILD);
94 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
95 state->search_depth[EFX_FILTER_RX_UDP_FULL] +
96 FILTER_CTL_SRCH_FUDGE_FULL);
97 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
98 state->search_depth[EFX_FILTER_RX_UDP_WILD] +
99 FILTER_CTL_SRCH_FUDGE_WILD);
100
101 if (state->table[EFX_FILTER_TABLE_RX_MAC].size) {
102 EFX_SET_OWORD_FIELD(
103 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
104 state->search_depth[EFX_FILTER_RX_MAC_FULL] +
105 FILTER_CTL_SRCH_FUDGE_FULL);
106 EFX_SET_OWORD_FIELD(
107 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
108 state->search_depth[EFX_FILTER_RX_MAC_WILD] +
109 FILTER_CTL_SRCH_FUDGE_WILD);
110 }
111
112 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
113}
114
115/* Build a filter entry and return its n-tuple key. */
116static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
117{
118 u32 data3;
119
120 switch (efx_filter_type_table_id(spec->type)) {
121 case EFX_FILTER_TABLE_RX_IP: {
122 bool is_udp = (spec->type == EFX_FILTER_RX_UDP_FULL ||
123 spec->type == EFX_FILTER_RX_UDP_WILD);
124 EFX_POPULATE_OWORD_7(
125 *filter,
126 FRF_BZ_RSS_EN,
127 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
128 FRF_BZ_SCATTER_EN,
129 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
130 FRF_BZ_TCP_UDP, is_udp,
131 FRF_BZ_RXQ_ID, spec->dmaq_id,
132 EFX_DWORD_2, spec->data[2],
133 EFX_DWORD_1, spec->data[1],
134 EFX_DWORD_0, spec->data[0]);
135 data3 = is_udp;
136 break;
137 }
138
139 case EFX_FILTER_TABLE_RX_MAC: {
140 bool is_wild = spec->type == EFX_FILTER_RX_MAC_WILD;
141 EFX_POPULATE_OWORD_8(
142 *filter,
143 FRF_CZ_RMFT_RSS_EN,
144 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
145 FRF_CZ_RMFT_SCATTER_EN,
146 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
147 FRF_CZ_RMFT_IP_OVERRIDE,
148 !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
149 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
150 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
151 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
152 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
153 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
154 data3 = is_wild;
155 break;
156 }
157
158 default:
159 BUG();
160 }
161
162 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
163}
164
165static bool efx_filter_equal(const struct efx_filter_spec *left,
166 const struct efx_filter_spec *right)
167{
168 if (left->type != right->type ||
169 memcmp(left->data, right->data, sizeof(left->data)))
170 return false;
171
172 return true;
173}
174
175static int efx_filter_search(struct efx_filter_table *table,
176 struct efx_filter_spec *spec, u32 key,
177 bool for_insert, int *depth_required)
178{
179 unsigned hash, incr, filter_idx, depth;
180 struct efx_filter_spec *cmp;
181
182 hash = efx_filter_hash(key);
183 incr = efx_filter_increment(key);
184
185 for (depth = 1, filter_idx = hash & (table->size - 1);
186 test_bit(filter_idx, table->used_bitmap);
187 ++depth) {
188 cmp = &table->spec[filter_idx];
189 if (efx_filter_equal(spec, cmp))
190 goto found;
191 filter_idx = (filter_idx + incr) & (table->size - 1);
192 }
193 if (!for_insert)
194 return -ENOENT;
195found:
196 *depth_required = depth;
197 return filter_idx;
198}
199
200/**
201 * efx_filter_insert_filter - add or replace a filter
202 * @efx: NIC in which to insert the filter
203 * @spec: Specification for the filter
204 * @replace: Flag for whether the specified filter may replace a filter
205 * with an identical match expression and equal or lower priority
206 *
207 * On success, return the filter index within its table.
208 * On failure, return a negative error code.
209 */
210int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
211 bool replace)
212{
213 struct efx_filter_state *state = efx->filter_state;
214 enum efx_filter_table_id table_id =
215 efx_filter_type_table_id(spec->type);
216 struct efx_filter_table *table = &state->table[table_id];
217 struct efx_filter_spec *saved_spec;
218 efx_oword_t filter;
219 int filter_idx, depth;
220 u32 key;
221 int rc;
222
223 if (table->size == 0)
224 return -EINVAL;
225
226 key = efx_filter_build(&filter, spec);
227
228 netif_vdbg(efx, hw, efx->net_dev,
229 "%s: type %d search_depth=%d", __func__, spec->type,
230 state->search_depth[spec->type]);
231
232 spin_lock_bh(&state->lock);
233
234 rc = efx_filter_search(table, spec, key, true, &depth);
235 if (rc < 0)
236 goto out;
237 filter_idx = rc;
238 BUG_ON(filter_idx >= table->size);
239 saved_spec = &table->spec[filter_idx];
240
241 if (test_bit(filter_idx, table->used_bitmap)) {
242 /* Should we replace the existing filter? */
243 if (!replace) {
244 rc = -EEXIST;
245 goto out;
246 }
247 if (spec->priority < saved_spec->priority) {
248 rc = -EPERM;
249 goto out;
250 }
251 } else {
252 __set_bit(filter_idx, table->used_bitmap);
253 ++table->used;
254 }
255 *saved_spec = *spec;
256
257 if (state->search_depth[spec->type] < depth) {
258 state->search_depth[spec->type] = depth;
259 efx_filter_push_rx_limits(efx);
260 }
261
262 efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
263
264 netif_vdbg(efx, hw, efx->net_dev,
265 "%s: filter type %d index %d rxq %u set",
266 __func__, spec->type, filter_idx, spec->dmaq_id);
267
268out:
269 spin_unlock_bh(&state->lock);
270 return rc;
271}
272
273static void efx_filter_table_clear_entry(struct efx_nic *efx,
274 struct efx_filter_table *table,
275 int filter_idx)
276{
277 static efx_oword_t filter;
278
279 if (test_bit(filter_idx, table->used_bitmap)) {
280 __clear_bit(filter_idx, table->used_bitmap);
281 --table->used;
282 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
283
284 efx_writeo(efx, &filter,
285 table->offset + table->step * filter_idx);
286 }
287}
288
289/**
290 * efx_filter_remove_filter - remove a filter by specification
291 * @efx: NIC from which to remove the filter
292 * @spec: Specification for the filter
293 *
294 * On success, return zero.
295 * On failure, return a negative error code.
296 */
297int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
298{
299 struct efx_filter_state *state = efx->filter_state;
300 enum efx_filter_table_id table_id =
301 efx_filter_type_table_id(spec->type);
302 struct efx_filter_table *table = &state->table[table_id];
303 struct efx_filter_spec *saved_spec;
304 efx_oword_t filter;
305 int filter_idx, depth;
306 u32 key;
307 int rc;
308
309 key = efx_filter_build(&filter, spec);
310
311 spin_lock_bh(&state->lock);
312
313 rc = efx_filter_search(table, spec, key, false, &depth);
314 if (rc < 0)
315 goto out;
316 filter_idx = rc;
317 saved_spec = &table->spec[filter_idx];
318
319 if (spec->priority < saved_spec->priority) {
320 rc = -EPERM;
321 goto out;
322 }
323
324 efx_filter_table_clear_entry(efx, table, filter_idx);
325 if (table->used == 0)
326 efx_filter_table_reset_search_depth(state, table_id);
327 rc = 0;
328
329out:
330 spin_unlock_bh(&state->lock);
331 return rc;
332}
333
334/**
335 * efx_filter_table_clear - remove filters from a table by priority
336 * @efx: NIC from which to remove the filters
337 * @table_id: Table from which to remove the filters
338 * @priority: Maximum priority to remove
339 */
340void efx_filter_table_clear(struct efx_nic *efx,
341 enum efx_filter_table_id table_id,
342 enum efx_filter_priority priority)
343{
344 struct efx_filter_state *state = efx->filter_state;
345 struct efx_filter_table *table = &state->table[table_id];
346 int filter_idx;
347
348 spin_lock_bh(&state->lock);
349
350 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
351 if (table->spec[filter_idx].priority <= priority)
352 efx_filter_table_clear_entry(efx, table, filter_idx);
353 if (table->used == 0)
354 efx_filter_table_reset_search_depth(state, table_id);
355
356 spin_unlock_bh(&state->lock);
357}
358
359/* Restore filter stater after reset */
360void efx_restore_filters(struct efx_nic *efx)
361{
362 struct efx_filter_state *state = efx->filter_state;
363 enum efx_filter_table_id table_id;
364 struct efx_filter_table *table;
365 efx_oword_t filter;
366 int filter_idx;
367
368 spin_lock_bh(&state->lock);
369
370 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
371 table = &state->table[table_id];
372 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
373 if (!test_bit(filter_idx, table->used_bitmap))
374 continue;
375 efx_filter_build(&filter, &table->spec[filter_idx]);
376 efx_writeo(efx, &filter,
377 table->offset + table->step * filter_idx);
378 }
379 }
380
381 efx_filter_push_rx_limits(efx);
382
383 spin_unlock_bh(&state->lock);
384}
385
386int efx_probe_filters(struct efx_nic *efx)
387{
388 struct efx_filter_state *state;
389 struct efx_filter_table *table;
390 unsigned table_id;
391
392 state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
393 if (!state)
394 return -ENOMEM;
395 efx->filter_state = state;
396
397 spin_lock_init(&state->lock);
398
399 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
400 table = &state->table[EFX_FILTER_TABLE_RX_IP];
401 table->offset = FR_BZ_RX_FILTER_TBL0;
402 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
403 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
404 }
405
406 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
407 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
408 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
409 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
410 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
411 }
412
413 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
414 table = &state->table[table_id];
415 if (table->size == 0)
416 continue;
417 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
418 sizeof(unsigned long),
419 GFP_KERNEL);
420 if (!table->used_bitmap)
421 goto fail;
422 table->spec = vmalloc(table->size * sizeof(*table->spec));
423 if (!table->spec)
424 goto fail;
425 memset(table->spec, 0, table->size * sizeof(*table->spec));
426 }
427
428 return 0;
429
430fail:
431 efx_remove_filters(efx);
432 return -ENOMEM;
433}
434
435void efx_remove_filters(struct efx_nic *efx)
436{
437 struct efx_filter_state *state = efx->filter_state;
438 enum efx_filter_table_id table_id;
439
440 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
441 kfree(state->table[table_id].used_bitmap);
442 vfree(state->table[table_id].spec);
443 }
444 kfree(state);
445}
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h
new file mode 100644
index 000000000000..a53319ded79c
--- /dev/null
+++ b/drivers/net/sfc/filter.h
@@ -0,0 +1,189 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_FILTER_H
11#define EFX_FILTER_H
12
13#include <linux/types.h>
14
15enum efx_filter_table_id {
16 EFX_FILTER_TABLE_RX_IP = 0,
17 EFX_FILTER_TABLE_RX_MAC,
18 EFX_FILTER_TABLE_COUNT,
19};
20
21/**
22 * enum efx_filter_type - type of hardware filter
23 * @EFX_FILTER_RX_TCP_FULL: RX, matching TCP/IPv4 4-tuple
24 * @EFX_FILTER_RX_TCP_WILD: RX, matching TCP/IPv4 destination (host, port)
25 * @EFX_FILTER_RX_UDP_FULL: RX, matching UDP/IPv4 4-tuple
26 * @EFX_FILTER_RX_UDP_WILD: RX, matching UDP/IPv4 destination (host, port)
27 * @EFX_FILTER_RX_MAC_FULL: RX, matching Ethernet destination MAC address, VID
28 * @EFX_FILTER_RX_MAC_WILD: RX, matching Ethernet destination MAC address
29 *
30 * Falcon NICs only support the RX TCP/IPv4 and UDP/IPv4 filter types.
31 */
32enum efx_filter_type {
33 EFX_FILTER_RX_TCP_FULL = 0,
34 EFX_FILTER_RX_TCP_WILD,
35 EFX_FILTER_RX_UDP_FULL,
36 EFX_FILTER_RX_UDP_WILD,
37 EFX_FILTER_RX_MAC_FULL = 4,
38 EFX_FILTER_RX_MAC_WILD,
39 EFX_FILTER_TYPE_COUNT,
40};
41
42/**
43 * enum efx_filter_priority - priority of a hardware filter specification
44 * @EFX_FILTER_PRI_HINT: Performance hint
45 * @EFX_FILTER_PRI_MANUAL: Manually configured filter
46 * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour
47 */
48enum efx_filter_priority {
49 EFX_FILTER_PRI_HINT = 0,
50 EFX_FILTER_PRI_MANUAL,
51 EFX_FILTER_PRI_REQUIRED,
52};
53
54/**
55 * enum efx_filter_flags - flags for hardware filter specifications
56 * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
57 * By default, matching packets will be delivered only to the
58 * specified queue. If this flag is set, they will be delivered
59 * to a range of queues offset from the specified queue number
60 * according to the indirection table.
61 * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
62 * queue.
63 * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
64 * any IP filter that matches the same packet. By default, IP
65 * filters take precedence.
66 *
67 * Currently, no flags are defined for TX filters.
68 */
69enum efx_filter_flags {
70 EFX_FILTER_FLAG_RX_RSS = 0x01,
71 EFX_FILTER_FLAG_RX_SCATTER = 0x02,
72 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
73};
74
75/**
76 * struct efx_filter_spec - specification for a hardware filter
77 * @type: Type of match to be performed, from &enum efx_filter_type
78 * @priority: Priority of the filter, from &enum efx_filter_priority
79 * @flags: Miscellaneous flags, from &enum efx_filter_flags
80 * @dmaq_id: Source/target queue index
81 * @data: Match data (type-dependent)
82 *
83 * Use the efx_filter_set_*() functions to initialise the @type and
84 * @data fields.
85 */
86struct efx_filter_spec {
87 u8 type:4;
88 u8 priority:4;
89 u8 flags;
90 u16 dmaq_id;
91 u32 data[3];
92};
93
94/**
95 * efx_filter_set_rx_tcp_full - specify RX filter with TCP/IPv4 full match
96 * @spec: Specification to initialise
97 * @shost: Source host address (host byte order)
98 * @sport: Source port (host byte order)
99 * @dhost: Destination host address (host byte order)
100 * @dport: Destination port (host byte order)
101 */
102static inline void
103efx_filter_set_rx_tcp_full(struct efx_filter_spec *spec,
104 u32 shost, u16 sport, u32 dhost, u16 dport)
105{
106 spec->type = EFX_FILTER_RX_TCP_FULL;
107 spec->data[0] = sport | shost << 16;
108 spec->data[1] = dport << 16 | shost >> 16;
109 spec->data[2] = dhost;
110}
111
112/**
113 * efx_filter_set_rx_tcp_wild - specify RX filter with TCP/IPv4 wildcard match
114 * @spec: Specification to initialise
115 * @dhost: Destination host address (host byte order)
116 * @dport: Destination port (host byte order)
117 */
118static inline void
119efx_filter_set_rx_tcp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
120{
121 spec->type = EFX_FILTER_RX_TCP_WILD;
122 spec->data[0] = 0;
123 spec->data[1] = dport << 16;
124 spec->data[2] = dhost;
125}
126
127/**
128 * efx_filter_set_rx_udp_full - specify RX filter with UDP/IPv4 full match
129 * @spec: Specification to initialise
130 * @shost: Source host address (host byte order)
131 * @sport: Source port (host byte order)
132 * @dhost: Destination host address (host byte order)
133 * @dport: Destination port (host byte order)
134 */
135static inline void
136efx_filter_set_rx_udp_full(struct efx_filter_spec *spec,
137 u32 shost, u16 sport, u32 dhost, u16 dport)
138{
139 spec->type = EFX_FILTER_RX_UDP_FULL;
140 spec->data[0] = sport | shost << 16;
141 spec->data[1] = dport << 16 | shost >> 16;
142 spec->data[2] = dhost;
143}
144
145/**
146 * efx_filter_set_rx_udp_wild - specify RX filter with UDP/IPv4 wildcard match
147 * @spec: Specification to initialise
148 * @dhost: Destination host address (host byte order)
149 * @dport: Destination port (host byte order)
150 */
151static inline void
152efx_filter_set_rx_udp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
153{
154 spec->type = EFX_FILTER_RX_UDP_WILD;
155 spec->data[0] = dport;
156 spec->data[1] = 0;
157 spec->data[2] = dhost;
158}
159
160/**
161 * efx_filter_set_rx_mac_full - specify RX filter with MAC full match
162 * @spec: Specification to initialise
163 * @vid: VLAN ID
164 * @addr: Destination MAC address
165 */
166static inline void efx_filter_set_rx_mac_full(struct efx_filter_spec *spec,
167 u16 vid, const u8 *addr)
168{
169 spec->type = EFX_FILTER_RX_MAC_FULL;
170 spec->data[0] = vid;
171 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
172 spec->data[2] = addr[0] << 8 | addr[1];
173}
174
175/**
176 * efx_filter_set_rx_mac_full - specify RX filter with MAC wildcard match
177 * @spec: Specification to initialise
178 * @addr: Destination MAC address
179 */
180static inline void efx_filter_set_rx_mac_wild(struct efx_filter_spec *spec,
181 const u8 *addr)
182{
183 spec->type = EFX_FILTER_RX_MAC_WILD;
184 spec->data[0] = 0;
185 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
186 spec->data[2] = addr[0] << 8 | addr[1];
187}
188
189#endif /* EFX_FILTER_H */
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index f1aa5f374890..7a6e5ca0290e 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -13,7 +13,6 @@
13 13
14#include "net_driver.h" 14#include "net_driver.h"
15 15
16extern struct efx_mac_operations falcon_gmac_operations;
17extern struct efx_mac_operations falcon_xmac_operations; 16extern struct efx_mac_operations falcon_xmac_operations;
18extern struct efx_mac_operations efx_mcdi_mac_operations; 17extern struct efx_mac_operations efx_mcdi_mac_operations;
19extern void falcon_reconfigure_xmac_core(struct efx_nic *efx); 18extern void falcon_reconfigure_xmac_core(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index eeaf0bd64bd3..98d946020429 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -286,46 +286,24 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
286 */ 286 */
287void efx_mdio_an_reconfigure(struct efx_nic *efx) 287void efx_mdio_an_reconfigure(struct efx_nic *efx)
288{ 288{
289 bool xnp = (efx->link_advertising & ADVERTISED_10000baseT_Full
290 || EFX_WORKAROUND_13204(efx));
291 int reg; 289 int reg;
292 290
293 WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN)); 291 WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
294 292
295 /* Set up the base page */ 293 /* Set up the base page */
296 reg = ADVERTISE_CSMA; 294 reg = ADVERTISE_CSMA | ADVERTISE_RESV;
297 if (efx->link_advertising & ADVERTISED_10baseT_Half)
298 reg |= ADVERTISE_10HALF;
299 if (efx->link_advertising & ADVERTISED_10baseT_Full)
300 reg |= ADVERTISE_10FULL;
301 if (efx->link_advertising & ADVERTISED_100baseT_Half)
302 reg |= ADVERTISE_100HALF;
303 if (efx->link_advertising & ADVERTISED_100baseT_Full)
304 reg |= ADVERTISE_100FULL;
305 if (xnp)
306 reg |= ADVERTISE_RESV;
307 else if (efx->link_advertising & (ADVERTISED_1000baseT_Half |
308 ADVERTISED_1000baseT_Full))
309 reg |= ADVERTISE_NPAGE;
310 if (efx->link_advertising & ADVERTISED_Pause) 295 if (efx->link_advertising & ADVERTISED_Pause)
311 reg |= ADVERTISE_PAUSE_CAP; 296 reg |= ADVERTISE_PAUSE_CAP;
312 if (efx->link_advertising & ADVERTISED_Asym_Pause) 297 if (efx->link_advertising & ADVERTISED_Asym_Pause)
313 reg |= ADVERTISE_PAUSE_ASYM; 298 reg |= ADVERTISE_PAUSE_ASYM;
314 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); 299 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
315 300
316 /* Set up the (extended) next page if necessary */ 301 /* Set up the (extended) next page */
317 if (efx->phy_op->set_npage_adv) 302 efx->phy_op->set_npage_adv(efx, efx->link_advertising);
318 efx->phy_op->set_npage_adv(efx, efx->link_advertising);
319 303
320 /* Enable and restart AN */ 304 /* Enable and restart AN */
321 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1); 305 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
322 reg |= MDIO_AN_CTRL1_ENABLE; 306 reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP;
323 if (!(EFX_WORKAROUND_15195(efx) && LOOPBACK_EXTERNAL(efx)))
324 reg |= MDIO_AN_CTRL1_RESTART;
325 if (xnp)
326 reg |= MDIO_AN_CTRL1_XNP;
327 else
328 reg &= ~MDIO_AN_CTRL1_XNP;
329 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg); 307 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
330} 308}
331 309
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 64e7caa4bbb5..44f4d58a39a6 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -29,6 +29,7 @@
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/workqueue.h> 31#include <linux/workqueue.h>
32#include <linux/vmalloc.h>
32#include <linux/i2c.h> 33#include <linux/i2c.h>
33 34
34#include "enum.h" 35#include "enum.h"
@@ -137,6 +138,7 @@ struct efx_tx_buffer {
137 * @channel: The associated channel 138 * @channel: The associated channel
138 * @buffer: The software buffer ring 139 * @buffer: The software buffer ring
139 * @txd: The hardware descriptor ring 140 * @txd: The hardware descriptor ring
141 * @ptr_mask: The size of the ring minus 1.
140 * @flushed: Used when handling queue flushing 142 * @flushed: Used when handling queue flushing
141 * @read_count: Current read pointer. 143 * @read_count: Current read pointer.
142 * This is the number of buffers that have been removed from both rings. 144 * This is the number of buffers that have been removed from both rings.
@@ -170,6 +172,7 @@ struct efx_tx_queue {
170 struct efx_nic *nic; 172 struct efx_nic *nic;
171 struct efx_tx_buffer *buffer; 173 struct efx_tx_buffer *buffer;
172 struct efx_special_buffer txd; 174 struct efx_special_buffer txd;
175 unsigned int ptr_mask;
173 enum efx_flush_state flushed; 176 enum efx_flush_state flushed;
174 177
175 /* Members used mainly on the completion path */ 178 /* Members used mainly on the completion path */
@@ -225,10 +228,9 @@ struct efx_rx_page_state {
225/** 228/**
226 * struct efx_rx_queue - An Efx RX queue 229 * struct efx_rx_queue - An Efx RX queue
227 * @efx: The associated Efx NIC 230 * @efx: The associated Efx NIC
228 * @queue: DMA queue number
229 * @channel: The associated channel
230 * @buffer: The software buffer ring 231 * @buffer: The software buffer ring
231 * @rxd: The hardware descriptor ring 232 * @rxd: The hardware descriptor ring
233 * @ptr_mask: The size of the ring minus 1.
232 * @added_count: Number of buffers added to the receive queue. 234 * @added_count: Number of buffers added to the receive queue.
233 * @notified_count: Number of buffers given to NIC (<= @added_count). 235 * @notified_count: Number of buffers given to NIC (<= @added_count).
234 * @removed_count: Number of buffers removed from the receive queue. 236 * @removed_count: Number of buffers removed from the receive queue.
@@ -240,9 +242,6 @@ struct efx_rx_page_state {
240 * @min_fill: RX descriptor minimum non-zero fill level. 242 * @min_fill: RX descriptor minimum non-zero fill level.
241 * This records the minimum fill level observed when a ring 243 * This records the minimum fill level observed when a ring
242 * refill was triggered. 244 * refill was triggered.
243 * @min_overfill: RX descriptor minimum overflow fill level.
244 * This records the minimum fill level at which RX queue
245 * overflow was observed. It should never be set.
246 * @alloc_page_count: RX allocation strategy counter. 245 * @alloc_page_count: RX allocation strategy counter.
247 * @alloc_skb_count: RX allocation strategy counter. 246 * @alloc_skb_count: RX allocation strategy counter.
248 * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). 247 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
@@ -250,10 +249,9 @@ struct efx_rx_page_state {
250 */ 249 */
251struct efx_rx_queue { 250struct efx_rx_queue {
252 struct efx_nic *efx; 251 struct efx_nic *efx;
253 int queue;
254 struct efx_channel *channel;
255 struct efx_rx_buffer *buffer; 252 struct efx_rx_buffer *buffer;
256 struct efx_special_buffer rxd; 253 struct efx_special_buffer rxd;
254 unsigned int ptr_mask;
257 255
258 int added_count; 256 int added_count;
259 int notified_count; 257 int notified_count;
@@ -302,7 +300,6 @@ enum efx_rx_alloc_method {
302 * 300 *
303 * @efx: Associated Efx NIC 301 * @efx: Associated Efx NIC
304 * @channel: Channel instance number 302 * @channel: Channel instance number
305 * @name: Name for channel and IRQ
306 * @enabled: Channel enabled indicator 303 * @enabled: Channel enabled indicator
307 * @irq: IRQ number (MSI and MSI-X only) 304 * @irq: IRQ number (MSI and MSI-X only)
308 * @irq_moderation: IRQ moderation value (in hardware ticks) 305 * @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -311,6 +308,7 @@ enum efx_rx_alloc_method {
311 * @reset_work: Scheduled reset work thread 308 * @reset_work: Scheduled reset work thread
312 * @work_pending: Is work pending via NAPI? 309 * @work_pending: Is work pending via NAPI?
313 * @eventq: Event queue buffer 310 * @eventq: Event queue buffer
311 * @eventq_mask: Event queue pointer mask
314 * @eventq_read_ptr: Event queue read pointer 312 * @eventq_read_ptr: Event queue read pointer
315 * @last_eventq_read_ptr: Last event queue read pointer value. 313 * @last_eventq_read_ptr: Last event queue read pointer value.
316 * @magic_count: Event queue test event count 314 * @magic_count: Event queue test event count
@@ -327,14 +325,14 @@ enum efx_rx_alloc_method {
327 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 325 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
328 * @n_rx_overlength: Count of RX_OVERLENGTH errors 326 * @n_rx_overlength: Count of RX_OVERLENGTH errors
329 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 327 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
330 * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX 328 * @rx_queue: RX queue for this channel
331 * @tx_stop_count: Core TX queue stop count 329 * @tx_stop_count: Core TX queue stop count
332 * @tx_stop_lock: Core TX queue stop lock 330 * @tx_stop_lock: Core TX queue stop lock
331 * @tx_queue: TX queues for this channel
333 */ 332 */
334struct efx_channel { 333struct efx_channel {
335 struct efx_nic *efx; 334 struct efx_nic *efx;
336 int channel; 335 int channel;
337 char name[IFNAMSIZ + 6];
338 bool enabled; 336 bool enabled;
339 int irq; 337 int irq;
340 unsigned int irq_moderation; 338 unsigned int irq_moderation;
@@ -342,6 +340,7 @@ struct efx_channel {
342 struct napi_struct napi_str; 340 struct napi_struct napi_str;
343 bool work_pending; 341 bool work_pending;
344 struct efx_special_buffer eventq; 342 struct efx_special_buffer eventq;
343 unsigned int eventq_mask;
345 unsigned int eventq_read_ptr; 344 unsigned int eventq_read_ptr;
346 unsigned int last_eventq_read_ptr; 345 unsigned int last_eventq_read_ptr;
347 unsigned int magic_count; 346 unsigned int magic_count;
@@ -366,9 +365,12 @@ struct efx_channel {
366 struct efx_rx_buffer *rx_pkt; 365 struct efx_rx_buffer *rx_pkt;
367 bool rx_pkt_csummed; 366 bool rx_pkt_csummed;
368 367
369 struct efx_tx_queue *tx_queue; 368 struct efx_rx_queue rx_queue;
369
370 atomic_t tx_stop_count; 370 atomic_t tx_stop_count;
371 spinlock_t tx_stop_lock; 371 spinlock_t tx_stop_lock;
372
373 struct efx_tx_queue tx_queue[2];
372}; 374};
373 375
374enum efx_led_mode { 376enum efx_led_mode {
@@ -404,8 +406,6 @@ enum efx_int_mode {
404}; 406};
405#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) 407#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
406 408
407#define EFX_IS10G(efx) ((efx)->link_state.speed == 10000)
408
409enum nic_state { 409enum nic_state {
410 STATE_INIT = 0, 410 STATE_INIT = 0,
411 STATE_RUNNING = 1, 411 STATE_RUNNING = 1,
@@ -618,6 +618,8 @@ union efx_multicast_hash {
618 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; 618 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
619}; 619};
620 620
621struct efx_filter_state;
622
621/** 623/**
622 * struct efx_nic - an Efx NIC 624 * struct efx_nic - an Efx NIC
623 * @name: Device name (net device name or bus id before net device registered) 625 * @name: Device name (net device name or bus id before net device registered)
@@ -641,6 +643,9 @@ union efx_multicast_hash {
641 * @tx_queue: TX DMA queues 643 * @tx_queue: TX DMA queues
642 * @rx_queue: RX DMA queues 644 * @rx_queue: RX DMA queues
643 * @channel: Channels 645 * @channel: Channels
646 * @channel_name: Names for channels and their IRQs
647 * @rxq_entries: Size of receive queues requested by user.
648 * @txq_entries: Size of transmit queues requested by user.
644 * @next_buffer_table: First available buffer table id 649 * @next_buffer_table: First available buffer table id
645 * @n_channels: Number of channels in use 650 * @n_channels: Number of channels in use
646 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 651 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
@@ -724,10 +729,11 @@ struct efx_nic {
724 enum nic_state state; 729 enum nic_state state;
725 enum reset_type reset_pending; 730 enum reset_type reset_pending;
726 731
727 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES]; 732 struct efx_channel *channel[EFX_MAX_CHANNELS];
728 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 733 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
729 struct efx_channel channel[EFX_MAX_CHANNELS];
730 734
735 unsigned rxq_entries;
736 unsigned txq_entries;
731 unsigned next_buffer_table; 737 unsigned next_buffer_table;
732 unsigned n_channels; 738 unsigned n_channels;
733 unsigned n_rx_channels; 739 unsigned n_rx_channels;
@@ -794,6 +800,8 @@ struct efx_nic {
794 u64 loopback_modes; 800 u64 loopback_modes;
795 801
796 void *loopback_selftest; 802 void *loopback_selftest;
803
804 struct efx_filter_state *filter_state;
797}; 805};
798 806
799static inline int efx_dev_registered(struct efx_nic *efx) 807static inline int efx_dev_registered(struct efx_nic *efx)
@@ -909,39 +917,67 @@ struct efx_nic_type {
909 * 917 *
910 *************************************************************************/ 918 *************************************************************************/
911 919
920static inline struct efx_channel *
921efx_get_channel(struct efx_nic *efx, unsigned index)
922{
923 EFX_BUG_ON_PARANOID(index >= efx->n_channels);
924 return efx->channel[index];
925}
926
912/* Iterate over all used channels */ 927/* Iterate over all used channels */
913#define efx_for_each_channel(_channel, _efx) \ 928#define efx_for_each_channel(_channel, _efx) \
914 for (_channel = &((_efx)->channel[0]); \ 929 for (_channel = (_efx)->channel[0]; \
915 _channel < &((_efx)->channel[(efx)->n_channels]); \ 930 _channel; \
916 _channel++) 931 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
917 932 (_efx)->channel[_channel->channel + 1] : NULL)
918/* Iterate over all used TX queues */ 933
919#define efx_for_each_tx_queue(_tx_queue, _efx) \ 934extern struct efx_tx_queue *
920 for (_tx_queue = &((_efx)->tx_queue[0]); \ 935efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type);
921 _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \ 936
922 (_efx)->n_tx_channels]); \ 937static inline struct efx_tx_queue *
923 _tx_queue++) 938efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
939{
940 struct efx_tx_queue *tx_queue = channel->tx_queue;
941 EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
942 return tx_queue->channel ? tx_queue + type : NULL;
943}
924 944
925/* Iterate over all TX queues belonging to a channel */ 945/* Iterate over all TX queues belonging to a channel */
926#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 946#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
927 for (_tx_queue = (_channel)->tx_queue; \ 947 for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \
928 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 948 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
929 _tx_queue++) 949 _tx_queue++)
930 950
931/* Iterate over all used RX queues */ 951static inline struct efx_rx_queue *
932#define efx_for_each_rx_queue(_rx_queue, _efx) \ 952efx_get_rx_queue(struct efx_nic *efx, unsigned index)
933 for (_rx_queue = &((_efx)->rx_queue[0]); \ 953{
934 _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \ 954 EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
935 _rx_queue++) 955 return &efx->channel[index]->rx_queue;
956}
957
958static inline struct efx_rx_queue *
959efx_channel_get_rx_queue(struct efx_channel *channel)
960{
961 return channel->channel < channel->efx->n_rx_channels ?
962 &channel->rx_queue : NULL;
963}
936 964
937/* Iterate over all RX queues belonging to a channel */ 965/* Iterate over all RX queues belonging to a channel */
938#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 966#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
939 for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \ 967 for (_rx_queue = efx_channel_get_rx_queue(channel); \
940 _rx_queue; \ 968 _rx_queue; \
941 _rx_queue = NULL) \ 969 _rx_queue = NULL)
942 if (_rx_queue->channel != (_channel)) \ 970
943 continue; \ 971static inline struct efx_channel *
944 else 972efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
973{
974 return container_of(rx_queue, struct efx_channel, rx_queue);
975}
976
977static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
978{
979 return efx_rx_queue_channel(rx_queue)->channel;
980}
945 981
946/* Returns a pointer to the specified receive buffer in the RX 982/* Returns a pointer to the specified receive buffer in the RX
947 * descriptor queue. 983 * descriptor queue.
@@ -949,7 +985,7 @@ struct efx_nic_type {
949static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, 985static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
950 unsigned int index) 986 unsigned int index)
951{ 987{
952 return (&rx_queue->buffer[index]); 988 return &rx_queue->buffer[index];
953} 989}
954 990
955/* Set bit in a little-endian bitfield */ 991/* Set bit in a little-endian bitfield */
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index f595d920c7c4..394dd929fee7 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -104,7 +104,7 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
104static inline efx_qword_t *efx_event(struct efx_channel *channel, 104static inline efx_qword_t *efx_event(struct efx_channel *channel,
105 unsigned int index) 105 unsigned int index)
106{ 106{
107 return (((efx_qword_t *) (channel->eventq.addr)) + index); 107 return ((efx_qword_t *) (channel->eventq.addr)) + index;
108} 108}
109 109
110/* See if an event is present 110/* See if an event is present
@@ -119,8 +119,8 @@ static inline efx_qword_t *efx_event(struct efx_channel *channel,
119 */ 119 */
120static inline int efx_event_present(efx_qword_t *event) 120static inline int efx_event_present(efx_qword_t *event)
121{ 121{
122 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 122 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
123 EFX_DWORD_IS_ALL_ONES(event->dword[1]))); 123 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
124} 124}
125 125
126static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 126static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
@@ -263,8 +263,8 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
263{ 263{
264 len = ALIGN(len, EFX_BUF_SIZE); 264 len = ALIGN(len, EFX_BUF_SIZE);
265 265
266 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 266 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
267 &buffer->dma_addr); 267 &buffer->dma_addr, GFP_KERNEL);
268 if (!buffer->addr) 268 if (!buffer->addr)
269 return -ENOMEM; 269 return -ENOMEM;
270 buffer->len = len; 270 buffer->len = len;
@@ -301,8 +301,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
301 (u64)buffer->dma_addr, buffer->len, 301 (u64)buffer->dma_addr, buffer->len,
302 buffer->addr, (u64)virt_to_phys(buffer->addr)); 302 buffer->addr, (u64)virt_to_phys(buffer->addr));
303 303
304 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, 304 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
305 buffer->dma_addr); 305 buffer->dma_addr);
306 buffer->addr = NULL; 306 buffer->addr = NULL;
307 buffer->entries = 0; 307 buffer->entries = 0;
308} 308}
@@ -347,7 +347,7 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
347static inline efx_qword_t * 347static inline efx_qword_t *
348efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 348efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
349{ 349{
350 return (((efx_qword_t *) (tx_queue->txd.addr)) + index); 350 return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
351} 351}
352 352
353/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 353/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
@@ -356,7 +356,7 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
356 unsigned write_ptr; 356 unsigned write_ptr;
357 efx_dword_t reg; 357 efx_dword_t reg;
358 358
359 write_ptr = tx_queue->write_count & EFX_TXQ_MASK; 359 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
360 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 360 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
361 efx_writed_page(tx_queue->efx, &reg, 361 efx_writed_page(tx_queue->efx, &reg,
362 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 362 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
@@ -377,7 +377,7 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
377 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 377 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
378 378
379 do { 379 do {
380 write_ptr = tx_queue->write_count & EFX_TXQ_MASK; 380 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
381 buffer = &tx_queue->buffer[write_ptr]; 381 buffer = &tx_queue->buffer[write_ptr];
382 txd = efx_tx_desc(tx_queue, write_ptr); 382 txd = efx_tx_desc(tx_queue, write_ptr);
383 ++tx_queue->write_count; 383 ++tx_queue->write_count;
@@ -398,10 +398,11 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
398int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 398int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
399{ 399{
400 struct efx_nic *efx = tx_queue->efx; 400 struct efx_nic *efx = tx_queue->efx;
401 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 || 401 unsigned entries;
402 EFX_TXQ_SIZE & EFX_TXQ_MASK); 402
403 entries = tx_queue->ptr_mask + 1;
403 return efx_alloc_special_buffer(efx, &tx_queue->txd, 404 return efx_alloc_special_buffer(efx, &tx_queue->txd,
404 EFX_TXQ_SIZE * sizeof(efx_qword_t)); 405 entries * sizeof(efx_qword_t));
405} 406}
406 407
407void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 408void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
@@ -501,7 +502,7 @@ void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
501static inline efx_qword_t * 502static inline efx_qword_t *
502efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 503efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
503{ 504{
504 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index); 505 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
505} 506}
506 507
507/* This creates an entry in the RX descriptor queue */ 508/* This creates an entry in the RX descriptor queue */
@@ -526,30 +527,32 @@ efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
526 */ 527 */
527void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 528void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
528{ 529{
530 struct efx_nic *efx = rx_queue->efx;
529 efx_dword_t reg; 531 efx_dword_t reg;
530 unsigned write_ptr; 532 unsigned write_ptr;
531 533
532 while (rx_queue->notified_count != rx_queue->added_count) { 534 while (rx_queue->notified_count != rx_queue->added_count) {
533 efx_build_rx_desc(rx_queue, 535 efx_build_rx_desc(
534 rx_queue->notified_count & 536 rx_queue,
535 EFX_RXQ_MASK); 537 rx_queue->notified_count & rx_queue->ptr_mask);
536 ++rx_queue->notified_count; 538 ++rx_queue->notified_count;
537 } 539 }
538 540
539 wmb(); 541 wmb();
540 write_ptr = rx_queue->added_count & EFX_RXQ_MASK; 542 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
541 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 543 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
542 efx_writed_page(rx_queue->efx, &reg, 544 efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
543 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue); 545 efx_rx_queue_index(rx_queue));
544} 546}
545 547
546int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 548int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
547{ 549{
548 struct efx_nic *efx = rx_queue->efx; 550 struct efx_nic *efx = rx_queue->efx;
549 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 || 551 unsigned entries;
550 EFX_RXQ_SIZE & EFX_RXQ_MASK); 552
553 entries = rx_queue->ptr_mask + 1;
551 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 554 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
552 EFX_RXQ_SIZE * sizeof(efx_qword_t)); 555 entries * sizeof(efx_qword_t));
553} 556}
554 557
555void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 558void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
@@ -561,7 +564,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
561 564
562 netif_dbg(efx, hw, efx->net_dev, 565 netif_dbg(efx, hw, efx->net_dev,
563 "RX queue %d ring in special buffers %d-%d\n", 566 "RX queue %d ring in special buffers %d-%d\n",
564 rx_queue->queue, rx_queue->rxd.index, 567 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
565 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 568 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
566 569
567 rx_queue->flushed = FLUSH_NONE; 570 rx_queue->flushed = FLUSH_NONE;
@@ -575,9 +578,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
575 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 578 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
576 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 579 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
577 FRF_AZ_RX_DESCQ_EVQ_ID, 580 FRF_AZ_RX_DESCQ_EVQ_ID,
578 rx_queue->channel->channel, 581 efx_rx_queue_channel(rx_queue)->channel,
579 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 582 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
580 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue, 583 FRF_AZ_RX_DESCQ_LABEL,
584 efx_rx_queue_index(rx_queue),
581 FRF_AZ_RX_DESCQ_SIZE, 585 FRF_AZ_RX_DESCQ_SIZE,
582 __ffs(rx_queue->rxd.entries), 586 __ffs(rx_queue->rxd.entries),
583 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 587 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
@@ -585,7 +589,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
585 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 589 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
586 FRF_AZ_RX_DESCQ_EN, 1); 590 FRF_AZ_RX_DESCQ_EN, 1);
587 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 591 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
588 rx_queue->queue); 592 efx_rx_queue_index(rx_queue));
589} 593}
590 594
591static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 595static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
@@ -598,7 +602,8 @@ static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
598 /* Post a flush command */ 602 /* Post a flush command */
599 EFX_POPULATE_OWORD_2(rx_flush_descq, 603 EFX_POPULATE_OWORD_2(rx_flush_descq,
600 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 604 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
601 FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue); 605 FRF_AZ_RX_FLUSH_DESCQ,
606 efx_rx_queue_index(rx_queue));
602 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 607 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
603} 608}
604 609
@@ -613,7 +618,7 @@ void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
613 /* Remove RX descriptor ring from card */ 618 /* Remove RX descriptor ring from card */
614 EFX_ZERO_OWORD(rx_desc_ptr); 619 EFX_ZERO_OWORD(rx_desc_ptr);
615 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 620 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
616 rx_queue->queue); 621 efx_rx_queue_index(rx_queue));
617 622
618 /* Unpin RX descriptor ring */ 623 /* Unpin RX descriptor ring */
619 efx_fini_special_buffer(efx, &rx_queue->rxd); 624 efx_fini_special_buffer(efx, &rx_queue->rxd);
@@ -680,15 +685,17 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
680 /* Transmit completion */ 685 /* Transmit completion */
681 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 686 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
682 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 687 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
683 tx_queue = &efx->tx_queue[tx_ev_q_label]; 688 tx_queue = efx_channel_get_tx_queue(
689 channel, tx_ev_q_label % EFX_TXQ_TYPES);
684 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 690 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
685 EFX_TXQ_MASK); 691 tx_queue->ptr_mask);
686 channel->irq_mod_score += tx_packets; 692 channel->irq_mod_score += tx_packets;
687 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 693 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
688 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 694 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
689 /* Rewrite the FIFO write pointer */ 695 /* Rewrite the FIFO write pointer */
690 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 696 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
691 tx_queue = &efx->tx_queue[tx_ev_q_label]; 697 tx_queue = efx_channel_get_tx_queue(
698 channel, tx_ev_q_label % EFX_TXQ_TYPES);
692 699
693 if (efx_dev_registered(efx)) 700 if (efx_dev_registered(efx))
694 netif_tx_lock(efx->net_dev); 701 netif_tx_lock(efx->net_dev);
@@ -714,6 +721,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
714 bool *rx_ev_pkt_ok, 721 bool *rx_ev_pkt_ok,
715 bool *discard) 722 bool *discard)
716{ 723{
724 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
717 struct efx_nic *efx = rx_queue->efx; 725 struct efx_nic *efx = rx_queue->efx;
718 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 726 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
719 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 727 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
@@ -746,14 +754,14 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
746 /* Count errors that are not in MAC stats. Ignore expected 754 /* Count errors that are not in MAC stats. Ignore expected
747 * checksum errors during self-test. */ 755 * checksum errors during self-test. */
748 if (rx_ev_frm_trunc) 756 if (rx_ev_frm_trunc)
749 ++rx_queue->channel->n_rx_frm_trunc; 757 ++channel->n_rx_frm_trunc;
750 else if (rx_ev_tobe_disc) 758 else if (rx_ev_tobe_disc)
751 ++rx_queue->channel->n_rx_tobe_disc; 759 ++channel->n_rx_tobe_disc;
752 else if (!efx->loopback_selftest) { 760 else if (!efx->loopback_selftest) {
753 if (rx_ev_ip_hdr_chksum_err) 761 if (rx_ev_ip_hdr_chksum_err)
754 ++rx_queue->channel->n_rx_ip_hdr_chksum_err; 762 ++channel->n_rx_ip_hdr_chksum_err;
755 else if (rx_ev_tcp_udp_chksum_err) 763 else if (rx_ev_tcp_udp_chksum_err)
756 ++rx_queue->channel->n_rx_tcp_udp_chksum_err; 764 ++channel->n_rx_tcp_udp_chksum_err;
757 } 765 }
758 766
759 /* The frame must be discarded if any of these are true. */ 767 /* The frame must be discarded if any of these are true. */
@@ -769,7 +777,7 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
769 netif_dbg(efx, rx_err, efx->net_dev, 777 netif_dbg(efx, rx_err, efx->net_dev,
770 " RX queue %d unexpected RX event " 778 " RX queue %d unexpected RX event "
771 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 779 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
772 rx_queue->queue, EFX_QWORD_VAL(*event), 780 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
773 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 781 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
774 rx_ev_ip_hdr_chksum_err ? 782 rx_ev_ip_hdr_chksum_err ?
775 " [IP_HDR_CHKSUM_ERR]" : "", 783 " [IP_HDR_CHKSUM_ERR]" : "",
@@ -791,8 +799,8 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
791 struct efx_nic *efx = rx_queue->efx; 799 struct efx_nic *efx = rx_queue->efx;
792 unsigned expected, dropped; 800 unsigned expected, dropped;
793 801
794 expected = rx_queue->removed_count & EFX_RXQ_MASK; 802 expected = rx_queue->removed_count & rx_queue->ptr_mask;
795 dropped = (index - expected) & EFX_RXQ_MASK; 803 dropped = (index - expected) & rx_queue->ptr_mask;
796 netif_info(efx, rx_err, efx->net_dev, 804 netif_info(efx, rx_err, efx->net_dev,
797 "dropped %d events (index=%d expected=%d)\n", 805 "dropped %d events (index=%d expected=%d)\n",
798 dropped, index, expected); 806 dropped, index, expected);
@@ -827,10 +835,10 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
827 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 835 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
828 channel->channel); 836 channel->channel);
829 837
830 rx_queue = &efx->rx_queue[channel->channel]; 838 rx_queue = efx_channel_get_rx_queue(channel);
831 839
832 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 840 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
833 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; 841 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
834 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 842 if (unlikely(rx_ev_desc_ptr != expected_ptr))
835 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 843 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
836 844
@@ -879,7 +887,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
879 /* The queue must be empty, so we won't receive any rx 887 /* The queue must be empty, so we won't receive any rx
880 * events, so efx_process_channel() won't refill the 888 * events, so efx_process_channel() won't refill the
881 * queue. Refill it here */ 889 * queue. Refill it here */
882 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 890 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
883 else 891 else
884 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 892 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
885 "generated event "EFX_QWORD_FMT"\n", 893 "generated event "EFX_QWORD_FMT"\n",
@@ -997,6 +1005,7 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
997 1005
998int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1006int efx_nic_process_eventq(struct efx_channel *channel, int budget)
999{ 1007{
1008 struct efx_nic *efx = channel->efx;
1000 unsigned int read_ptr; 1009 unsigned int read_ptr;
1001 efx_qword_t event, *p_event; 1010 efx_qword_t event, *p_event;
1002 int ev_code; 1011 int ev_code;
@@ -1021,7 +1030,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1021 EFX_SET_QWORD(*p_event); 1030 EFX_SET_QWORD(*p_event);
1022 1031
1023 /* Increment read pointer */ 1032 /* Increment read pointer */
1024 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; 1033 read_ptr = (read_ptr + 1) & channel->eventq_mask;
1025 1034
1026 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1035 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1027 1036
@@ -1033,7 +1042,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1033 break; 1042 break;
1034 case FSE_AZ_EV_CODE_TX_EV: 1043 case FSE_AZ_EV_CODE_TX_EV:
1035 tx_packets += efx_handle_tx_event(channel, &event); 1044 tx_packets += efx_handle_tx_event(channel, &event);
1036 if (tx_packets >= EFX_TXQ_SIZE) { 1045 if (tx_packets > efx->txq_entries) {
1037 spent = budget; 1046 spent = budget;
1038 goto out; 1047 goto out;
1039 } 1048 }
@@ -1068,10 +1077,11 @@ out:
1068int efx_nic_probe_eventq(struct efx_channel *channel) 1077int efx_nic_probe_eventq(struct efx_channel *channel)
1069{ 1078{
1070 struct efx_nic *efx = channel->efx; 1079 struct efx_nic *efx = channel->efx;
1071 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 || 1080 unsigned entries;
1072 EFX_EVQ_SIZE & EFX_EVQ_MASK); 1081
1082 entries = channel->eventq_mask + 1;
1073 return efx_alloc_special_buffer(efx, &channel->eventq, 1083 return efx_alloc_special_buffer(efx, &channel->eventq,
1074 EFX_EVQ_SIZE * sizeof(efx_qword_t)); 1084 entries * sizeof(efx_qword_t));
1075} 1085}
1076 1086
1077void efx_nic_init_eventq(struct efx_channel *channel) 1087void efx_nic_init_eventq(struct efx_channel *channel)
@@ -1163,11 +1173,11 @@ void efx_nic_generate_fill_event(struct efx_channel *channel)
1163 1173
1164static void efx_poll_flush_events(struct efx_nic *efx) 1174static void efx_poll_flush_events(struct efx_nic *efx)
1165{ 1175{
1166 struct efx_channel *channel = &efx->channel[0]; 1176 struct efx_channel *channel = efx_get_channel(efx, 0);
1167 struct efx_tx_queue *tx_queue; 1177 struct efx_tx_queue *tx_queue;
1168 struct efx_rx_queue *rx_queue; 1178 struct efx_rx_queue *rx_queue;
1169 unsigned int read_ptr = channel->eventq_read_ptr; 1179 unsigned int read_ptr = channel->eventq_read_ptr;
1170 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK; 1180 unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
1171 1181
1172 do { 1182 do {
1173 efx_qword_t *event = efx_event(channel, read_ptr); 1183 efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1185,7 +1195,9 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1185 ev_queue = EFX_QWORD_FIELD(*event, 1195 ev_queue = EFX_QWORD_FIELD(*event,
1186 FSF_AZ_DRIVER_EV_SUBDATA); 1196 FSF_AZ_DRIVER_EV_SUBDATA);
1187 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { 1197 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1188 tx_queue = efx->tx_queue + ev_queue; 1198 tx_queue = efx_get_tx_queue(
1199 efx, ev_queue / EFX_TXQ_TYPES,
1200 ev_queue % EFX_TXQ_TYPES);
1189 tx_queue->flushed = FLUSH_DONE; 1201 tx_queue->flushed = FLUSH_DONE;
1190 } 1202 }
1191 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1203 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
@@ -1195,7 +1207,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1195 ev_failed = EFX_QWORD_FIELD( 1207 ev_failed = EFX_QWORD_FIELD(
1196 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1208 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1197 if (ev_queue < efx->n_rx_channels) { 1209 if (ev_queue < efx->n_rx_channels) {
1198 rx_queue = efx->rx_queue + ev_queue; 1210 rx_queue = efx_get_rx_queue(efx, ev_queue);
1199 rx_queue->flushed = 1211 rx_queue->flushed =
1200 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1212 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1201 } 1213 }
@@ -1205,7 +1217,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1205 * it's ok to throw away every non-flush event */ 1217 * it's ok to throw away every non-flush event */
1206 EFX_SET_QWORD(*event); 1218 EFX_SET_QWORD(*event);
1207 1219
1208 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; 1220 read_ptr = (read_ptr + 1) & channel->eventq_mask;
1209 } while (read_ptr != end_ptr); 1221 } while (read_ptr != end_ptr);
1210 1222
1211 channel->eventq_read_ptr = read_ptr; 1223 channel->eventq_read_ptr = read_ptr;
@@ -1216,6 +1228,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1216 * serialise them */ 1228 * serialise them */
1217int efx_nic_flush_queues(struct efx_nic *efx) 1229int efx_nic_flush_queues(struct efx_nic *efx)
1218{ 1230{
1231 struct efx_channel *channel;
1219 struct efx_rx_queue *rx_queue; 1232 struct efx_rx_queue *rx_queue;
1220 struct efx_tx_queue *tx_queue; 1233 struct efx_tx_queue *tx_queue;
1221 int i, tx_pending, rx_pending; 1234 int i, tx_pending, rx_pending;
@@ -1224,29 +1237,35 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1224 efx->type->prepare_flush(efx); 1237 efx->type->prepare_flush(efx);
1225 1238
1226 /* Flush all tx queues in parallel */ 1239 /* Flush all tx queues in parallel */
1227 efx_for_each_tx_queue(tx_queue, efx) 1240 efx_for_each_channel(channel, efx) {
1228 efx_flush_tx_queue(tx_queue); 1241 efx_for_each_channel_tx_queue(tx_queue, channel)
1242 efx_flush_tx_queue(tx_queue);
1243 }
1229 1244
1230 /* The hardware supports four concurrent rx flushes, each of which may 1245 /* The hardware supports four concurrent rx flushes, each of which may
1231 * need to be retried if there is an outstanding descriptor fetch */ 1246 * need to be retried if there is an outstanding descriptor fetch */
1232 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { 1247 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1233 rx_pending = tx_pending = 0; 1248 rx_pending = tx_pending = 0;
1234 efx_for_each_rx_queue(rx_queue, efx) { 1249 efx_for_each_channel(channel, efx) {
1235 if (rx_queue->flushed == FLUSH_PENDING) 1250 efx_for_each_channel_rx_queue(rx_queue, channel) {
1236 ++rx_pending; 1251 if (rx_queue->flushed == FLUSH_PENDING)
1237 } 1252 ++rx_pending;
1238 efx_for_each_rx_queue(rx_queue, efx) {
1239 if (rx_pending == EFX_RX_FLUSH_COUNT)
1240 break;
1241 if (rx_queue->flushed == FLUSH_FAILED ||
1242 rx_queue->flushed == FLUSH_NONE) {
1243 efx_flush_rx_queue(rx_queue);
1244 ++rx_pending;
1245 } 1253 }
1246 } 1254 }
1247 efx_for_each_tx_queue(tx_queue, efx) { 1255 efx_for_each_channel(channel, efx) {
1248 if (tx_queue->flushed != FLUSH_DONE) 1256 efx_for_each_channel_rx_queue(rx_queue, channel) {
1249 ++tx_pending; 1257 if (rx_pending == EFX_RX_FLUSH_COUNT)
1258 break;
1259 if (rx_queue->flushed == FLUSH_FAILED ||
1260 rx_queue->flushed == FLUSH_NONE) {
1261 efx_flush_rx_queue(rx_queue);
1262 ++rx_pending;
1263 }
1264 }
1265 efx_for_each_channel_tx_queue(tx_queue, channel) {
1266 if (tx_queue->flushed != FLUSH_DONE)
1267 ++tx_pending;
1268 }
1250 } 1269 }
1251 1270
1252 if (rx_pending == 0 && tx_pending == 0) 1271 if (rx_pending == 0 && tx_pending == 0)
@@ -1258,19 +1277,21 @@ int efx_nic_flush_queues(struct efx_nic *efx)
1258 1277
1259 /* Mark the queues as all flushed. We're going to return failure 1278 /* Mark the queues as all flushed. We're going to return failure
1260 * leading to a reset, or fake up success anyway */ 1279 * leading to a reset, or fake up success anyway */
1261 efx_for_each_tx_queue(tx_queue, efx) { 1280 efx_for_each_channel(channel, efx) {
1262 if (tx_queue->flushed != FLUSH_DONE) 1281 efx_for_each_channel_tx_queue(tx_queue, channel) {
1263 netif_err(efx, hw, efx->net_dev, 1282 if (tx_queue->flushed != FLUSH_DONE)
1264 "tx queue %d flush command timed out\n", 1283 netif_err(efx, hw, efx->net_dev,
1265 tx_queue->queue); 1284 "tx queue %d flush command timed out\n",
1266 tx_queue->flushed = FLUSH_DONE; 1285 tx_queue->queue);
1267 } 1286 tx_queue->flushed = FLUSH_DONE;
1268 efx_for_each_rx_queue(rx_queue, efx) { 1287 }
1269 if (rx_queue->flushed != FLUSH_DONE) 1288 efx_for_each_channel_rx_queue(rx_queue, channel) {
1270 netif_err(efx, hw, efx->net_dev, 1289 if (rx_queue->flushed != FLUSH_DONE)
1271 "rx queue %d flush command timed out\n", 1290 netif_err(efx, hw, efx->net_dev,
1272 rx_queue->queue); 1291 "rx queue %d flush command timed out\n",
1273 rx_queue->flushed = FLUSH_DONE; 1292 efx_rx_queue_index(rx_queue));
1293 rx_queue->flushed = FLUSH_DONE;
1294 }
1274 } 1295 }
1275 1296
1276 return -ETIMEDOUT; 1297 return -ETIMEDOUT;
@@ -1457,7 +1478,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1457 */ 1478 */
1458static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1479static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1459{ 1480{
1460 struct efx_channel *channel = dev_id; 1481 struct efx_channel *channel = *(struct efx_channel **)dev_id;
1461 struct efx_nic *efx = channel->efx; 1482 struct efx_nic *efx = channel->efx;
1462 efx_oword_t *int_ker = efx->irq_status.addr; 1483 efx_oword_t *int_ker = efx->irq_status.addr;
1463 int syserr; 1484 int syserr;
@@ -1532,7 +1553,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1532 efx_for_each_channel(channel, efx) { 1553 efx_for_each_channel(channel, efx) {
1533 rc = request_irq(channel->irq, efx_msi_interrupt, 1554 rc = request_irq(channel->irq, efx_msi_interrupt,
1534 IRQF_PROBE_SHARED, /* Not shared */ 1555 IRQF_PROBE_SHARED, /* Not shared */
1535 channel->name, channel); 1556 efx->channel_name[channel->channel],
1557 &efx->channel[channel->channel]);
1536 if (rc) { 1558 if (rc) {
1537 netif_err(efx, drv, efx->net_dev, 1559 netif_err(efx, drv, efx->net_dev,
1538 "failed to hook IRQ %d\n", channel->irq); 1560 "failed to hook IRQ %d\n", channel->irq);
@@ -1544,7 +1566,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
1544 1566
1545 fail2: 1567 fail2:
1546 efx_for_each_channel(channel, efx) 1568 efx_for_each_channel(channel, efx)
1547 free_irq(channel->irq, channel); 1569 free_irq(channel->irq, &efx->channel[channel->channel]);
1548 fail1: 1570 fail1:
1549 return rc; 1571 return rc;
1550} 1572}
@@ -1557,7 +1579,7 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
1557 /* Disable MSI/MSI-X interrupts */ 1579 /* Disable MSI/MSI-X interrupts */
1558 efx_for_each_channel(channel, efx) { 1580 efx_for_each_channel(channel, efx) {
1559 if (channel->irq) 1581 if (channel->irq)
1560 free_irq(channel->irq, channel); 1582 free_irq(channel->irq, &efx->channel[channel->channel]);
1561 } 1583 }
1562 1584
1563 /* ACK legacy interrupt */ 1585 /* ACK legacy interrupt */
@@ -1827,8 +1849,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1827 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 1849 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1828 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 1850 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1829 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 1851 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1830 /* The register buffer is allocated with slab, so we can't 1852 /* We can't reasonably read all of the buffer table (up to 8MB!).
1831 * reasonably read all of the buffer table (up to 8MB!).
1832 * However this driver will only use a few entries. Reading 1853 * However this driver will only use a few entries. Reading
1833 * 1K entries allows for some expansion of queue count and 1854 * 1K entries allows for some expansion of queue count and
1834 * size before we need to change the version. */ 1855 * size before we need to change the version. */
@@ -1836,7 +1857,6 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1836 A, A, 8, 1024), 1857 A, A, 8, 1024),
1837 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 1858 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1838 B, Z, 8, 1024), 1859 B, Z, 8, 1024),
1839 /* RX_FILTER_TBL{0,1} is huge and not used by this driver */
1840 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 1860 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1841 REGISTER_TABLE_BB_CZ(TIMER_TBL), 1861 REGISTER_TABLE_BB_CZ(TIMER_TBL),
1842 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 1862 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -1846,6 +1866,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1846 REGISTER_TABLE_CZ(MC_TREG_SMEM), 1866 REGISTER_TABLE_CZ(MC_TREG_SMEM),
1847 /* MSIX_PBA_TABLE is not mapped */ 1867 /* MSIX_PBA_TABLE is not mapped */
1848 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 1868 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1869 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
1849}; 1870};
1850 1871
1851size_t efx_nic_get_regs_len(struct efx_nic *efx) 1872size_t efx_nic_get_regs_len(struct efx_nic *efx)
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 5bc26137257b..1dab609757fb 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -11,17 +11,12 @@
11#define EFX_PHY_H 11#define EFX_PHY_H
12 12
13/**************************************************************************** 13/****************************************************************************
14 * 10Xpress (SFX7101 and SFT9001) PHYs 14 * 10Xpress (SFX7101) PHY
15 */ 15 */
16extern struct efx_phy_operations falcon_sfx7101_phy_ops; 16extern struct efx_phy_operations falcon_sfx7101_phy_ops;
17extern struct efx_phy_operations falcon_sft9001_phy_ops;
18 17
19extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 18extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
20 19
21/* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed
22 * to boot due to corrupt flash, or some other negative error code. */
23extern int sft9001_wait_boot(struct efx_nic *efx);
24
25/**************************************************************************** 20/****************************************************************************
26 * AMCC/Quake QT202x PHYs 21 * AMCC/Quake QT202x PHYs
27 */ 22 */
@@ -42,6 +37,17 @@ extern struct efx_phy_operations falcon_qt202x_phy_ops;
42extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state); 37extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
43 38
44/**************************************************************************** 39/****************************************************************************
40* Transwitch CX4 retimer
41*/
42extern struct efx_phy_operations falcon_txc_phy_ops;
43
44#define TXC_GPIO_DIR_INPUT 0
45#define TXC_GPIO_DIR_OUTPUT 1
46
47extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
48extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
49
50/****************************************************************************
45 * Siena managed PHYs 51 * Siena managed PHYs
46 */ 52 */
47extern struct efx_phy_operations efx_mcdi_phy_ops; 53extern struct efx_phy_operations efx_mcdi_phy_ops;
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 18a3be428348..96430ed81c36 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -2893,6 +2893,20 @@
2893#define FRF_AB_XX_FORCE_SIG_WIDTH 8 2893#define FRF_AB_XX_FORCE_SIG_WIDTH 8
2894#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff 2894#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
2895 2895
2896/* RX_MAC_FILTER_TBL0 */
2897/* RMFT_DEST_MAC is wider than 32 bits */
2898#define FRF_CZ_RMFT_DEST_MAC_LO_LBN 12
2899#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
2900#define FRF_CZ_RMFT_DEST_MAC_HI_LBN 44
2901#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH 16
2902
2903/* TX_MAC_FILTER_TBL0 */
2904/* TMFT_SRC_MAC is wider than 32 bits */
2905#define FRF_CZ_TMFT_SRC_MAC_LO_LBN 12
2906#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
2909
2896/* DRIVER_EV */ 2910/* DRIVER_EV */
2897/* Sub-fields of an RX flush completion event */ 2911/* Sub-fields of an RX flush completion event */
2898#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 2912#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 799c461ce7b8..6d0959b5158e 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -133,7 +133,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
133 unsigned index, count; 133 unsigned index, count;
134 134
135 for (count = 0; count < EFX_RX_BATCH; ++count) { 135 for (count = 0; count < EFX_RX_BATCH; ++count) {
136 index = rx_queue->added_count & EFX_RXQ_MASK; 136 index = rx_queue->added_count & rx_queue->ptr_mask;
137 rx_buf = efx_rx_buffer(rx_queue, index); 137 rx_buf = efx_rx_buffer(rx_queue, index);
138 138
139 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); 139 rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
@@ -208,7 +208,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
208 dma_addr += sizeof(struct efx_rx_page_state); 208 dma_addr += sizeof(struct efx_rx_page_state);
209 209
210 split: 210 split:
211 index = rx_queue->added_count & EFX_RXQ_MASK; 211 index = rx_queue->added_count & rx_queue->ptr_mask;
212 rx_buf = efx_rx_buffer(rx_queue, index); 212 rx_buf = efx_rx_buffer(rx_queue, index);
213 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 213 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
214 rx_buf->skb = NULL; 214 rx_buf->skb = NULL;
@@ -285,7 +285,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
285 * we'd like to insert an additional descriptor whilst leaving 285 * we'd like to insert an additional descriptor whilst leaving
286 * EFX_RXD_HEAD_ROOM for the non-recycle path */ 286 * EFX_RXD_HEAD_ROOM for the non-recycle path */
287 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2); 287 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
288 if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) { 288 if (unlikely(fill_level > rx_queue->max_fill)) {
289 /* We could place "state" on a list, and drain the list in 289 /* We could place "state" on a list, and drain the list in
290 * efx_fast_push_rx_descriptors(). For now, this will do. */ 290 * efx_fast_push_rx_descriptors(). For now, this will do. */
291 return; 291 return;
@@ -294,7 +294,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
294 ++state->refcnt; 294 ++state->refcnt;
295 get_page(rx_buf->page); 295 get_page(rx_buf->page);
296 296
297 index = rx_queue->added_count & EFX_RXQ_MASK; 297 index = rx_queue->added_count & rx_queue->ptr_mask;
298 new_buf = efx_rx_buffer(rx_queue, index); 298 new_buf = efx_rx_buffer(rx_queue, index);
299 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 299 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
300 new_buf->skb = NULL; 300 new_buf->skb = NULL;
@@ -311,7 +311,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
311 struct efx_rx_buffer *rx_buf) 311 struct efx_rx_buffer *rx_buf)
312{ 312{
313 struct efx_nic *efx = channel->efx; 313 struct efx_nic *efx = channel->efx;
314 struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel]; 314 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
315 struct efx_rx_buffer *new_buf; 315 struct efx_rx_buffer *new_buf;
316 unsigned index; 316 unsigned index;
317 317
@@ -319,7 +319,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
319 page_count(rx_buf->page) == 1) 319 page_count(rx_buf->page) == 1)
320 efx_resurrect_rx_buffer(rx_queue, rx_buf); 320 efx_resurrect_rx_buffer(rx_queue, rx_buf);
321 321
322 index = rx_queue->added_count & EFX_RXQ_MASK; 322 index = rx_queue->added_count & rx_queue->ptr_mask;
323 new_buf = efx_rx_buffer(rx_queue, index); 323 new_buf = efx_rx_buffer(rx_queue, index);
324 324
325 memcpy(new_buf, rx_buf, sizeof(*new_buf)); 325 memcpy(new_buf, rx_buf, sizeof(*new_buf));
@@ -341,13 +341,13 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
341 */ 341 */
342void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) 342void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
343{ 343{
344 struct efx_channel *channel = rx_queue->channel; 344 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
345 unsigned fill_level; 345 unsigned fill_level;
346 int space, rc = 0; 346 int space, rc = 0;
347 347
348 /* Calculate current fill level, and exit if we don't need to fill */ 348 /* Calculate current fill level, and exit if we don't need to fill */
349 fill_level = (rx_queue->added_count - rx_queue->removed_count); 349 fill_level = (rx_queue->added_count - rx_queue->removed_count);
350 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); 350 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
351 if (fill_level >= rx_queue->fast_fill_trigger) 351 if (fill_level >= rx_queue->fast_fill_trigger)
352 goto out; 352 goto out;
353 353
@@ -364,7 +364,8 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
364 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 364 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
365 "RX queue %d fast-filling descriptor ring from" 365 "RX queue %d fast-filling descriptor ring from"
366 " level %d to level %d using %s allocation\n", 366 " level %d to level %d using %s allocation\n",
367 rx_queue->queue, fill_level, rx_queue->fast_fill_limit, 367 efx_rx_queue_index(rx_queue), fill_level,
368 rx_queue->fast_fill_limit,
368 channel->rx_alloc_push_pages ? "page" : "skb"); 369 channel->rx_alloc_push_pages ? "page" : "skb");
369 370
370 do { 371 do {
@@ -382,7 +383,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
382 383
383 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 384 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
384 "RX queue %d fast-filled descriptor ring " 385 "RX queue %d fast-filled descriptor ring "
385 "to level %d\n", rx_queue->queue, 386 "to level %d\n", efx_rx_queue_index(rx_queue),
386 rx_queue->added_count - rx_queue->removed_count); 387 rx_queue->added_count - rx_queue->removed_count);
387 388
388 out: 389 out:
@@ -393,7 +394,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
393void efx_rx_slow_fill(unsigned long context) 394void efx_rx_slow_fill(unsigned long context)
394{ 395{
395 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; 396 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
396 struct efx_channel *channel = rx_queue->channel; 397 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
397 398
398 /* Post an event to cause NAPI to run and refill the queue */ 399 /* Post an event to cause NAPI to run and refill the queue */
399 efx_nic_generate_fill_event(channel); 400 efx_nic_generate_fill_event(channel);
@@ -421,7 +422,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
421 netif_err(efx, rx_err, efx->net_dev, 422 netif_err(efx, rx_err, efx->net_dev,
422 " RX queue %d seriously overlength " 423 " RX queue %d seriously overlength "
423 "RX event (0x%x > 0x%x+0x%x). Leaking\n", 424 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
424 rx_queue->queue, len, max_len, 425 efx_rx_queue_index(rx_queue), len, max_len,
425 efx->type->rx_buffer_padding); 426 efx->type->rx_buffer_padding);
426 /* If this buffer was skb-allocated, then the meta 427 /* If this buffer was skb-allocated, then the meta
427 * data at the end of the skb will be trashed. So 428 * data at the end of the skb will be trashed. So
@@ -434,10 +435,10 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
434 netif_err(efx, rx_err, efx->net_dev, 435 netif_err(efx, rx_err, efx->net_dev,
435 " RX queue %d overlength RX event " 436 " RX queue %d overlength RX event "
436 "(0x%x > 0x%x)\n", 437 "(0x%x > 0x%x)\n",
437 rx_queue->queue, len, max_len); 438 efx_rx_queue_index(rx_queue), len, max_len);
438 } 439 }
439 440
440 rx_queue->channel->n_rx_overlength++; 441 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
441} 442}
442 443
443/* Pass a received packet up through the generic LRO stack 444/* Pass a received packet up through the generic LRO stack
@@ -507,7 +508,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
507 unsigned int len, bool checksummed, bool discard) 508 unsigned int len, bool checksummed, bool discard)
508{ 509{
509 struct efx_nic *efx = rx_queue->efx; 510 struct efx_nic *efx = rx_queue->efx;
510 struct efx_channel *channel = rx_queue->channel; 511 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
511 struct efx_rx_buffer *rx_buf; 512 struct efx_rx_buffer *rx_buf;
512 bool leak_packet = false; 513 bool leak_packet = false;
513 514
@@ -528,7 +529,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
528 529
529 netif_vdbg(efx, rx_status, efx->net_dev, 530 netif_vdbg(efx, rx_status, efx->net_dev,
530 "RX queue %d received id %x at %llx+%x %s%s\n", 531 "RX queue %d received id %x at %llx+%x %s%s\n",
531 rx_queue->queue, index, 532 efx_rx_queue_index(rx_queue), index,
532 (unsigned long long)rx_buf->dma_addr, len, 533 (unsigned long long)rx_buf->dma_addr, len,
533 (checksummed ? " [SUMMED]" : ""), 534 (checksummed ? " [SUMMED]" : ""),
534 (discard ? " [DISCARD]" : "")); 535 (discard ? " [DISCARD]" : ""));
@@ -560,12 +561,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
560 */ 561 */
561 rx_buf->len = len; 562 rx_buf->len = len;
562out: 563out:
563 if (rx_queue->channel->rx_pkt) 564 if (channel->rx_pkt)
564 __efx_rx_packet(rx_queue->channel, 565 __efx_rx_packet(channel,
565 rx_queue->channel->rx_pkt, 566 channel->rx_pkt, channel->rx_pkt_csummed);
566 rx_queue->channel->rx_pkt_csummed); 567 channel->rx_pkt = rx_buf;
567 rx_queue->channel->rx_pkt = rx_buf; 568 channel->rx_pkt_csummed = checksummed;
568 rx_queue->channel->rx_pkt_csummed = checksummed;
569} 569}
570 570
571/* Handle a received packet. Second half: Touches packet payload. */ 571/* Handle a received packet. Second half: Touches packet payload. */
@@ -615,7 +615,7 @@ void __efx_rx_packet(struct efx_channel *channel,
615 EFX_BUG_ON_PARANOID(!skb); 615 EFX_BUG_ON_PARANOID(!skb);
616 616
617 /* Set the SKB flags */ 617 /* Set the SKB flags */
618 skb->ip_summed = CHECKSUM_NONE; 618 skb_checksum_none_assert(skb);
619 619
620 /* Pass the packet up */ 620 /* Pass the packet up */
621 netif_receive_skb(skb); 621 netif_receive_skb(skb);
@@ -650,15 +650,22 @@ void efx_rx_strategy(struct efx_channel *channel)
650int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 650int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
651{ 651{
652 struct efx_nic *efx = rx_queue->efx; 652 struct efx_nic *efx = rx_queue->efx;
653 unsigned int rxq_size; 653 unsigned int entries;
654 int rc; 654 int rc;
655 655
656 /* Create the smallest power-of-two aligned ring */
657 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
658 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
659 rx_queue->ptr_mask = entries - 1;
660
656 netif_dbg(efx, probe, efx->net_dev, 661 netif_dbg(efx, probe, efx->net_dev,
657 "creating RX queue %d\n", rx_queue->queue); 662 "creating RX queue %d size %#x mask %#x\n",
663 efx_rx_queue_index(rx_queue), efx->rxq_entries,
664 rx_queue->ptr_mask);
658 665
659 /* Allocate RX buffers */ 666 /* Allocate RX buffers */
660 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); 667 rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
661 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); 668 GFP_KERNEL);
662 if (!rx_queue->buffer) 669 if (!rx_queue->buffer)
663 return -ENOMEM; 670 return -ENOMEM;
664 671
@@ -672,20 +679,20 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
672 679
673void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 680void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
674{ 681{
682 struct efx_nic *efx = rx_queue->efx;
675 unsigned int max_fill, trigger, limit; 683 unsigned int max_fill, trigger, limit;
676 684
677 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 685 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
678 "initialising RX queue %d\n", rx_queue->queue); 686 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
679 687
680 /* Initialise ptr fields */ 688 /* Initialise ptr fields */
681 rx_queue->added_count = 0; 689 rx_queue->added_count = 0;
682 rx_queue->notified_count = 0; 690 rx_queue->notified_count = 0;
683 rx_queue->removed_count = 0; 691 rx_queue->removed_count = 0;
684 rx_queue->min_fill = -1U; 692 rx_queue->min_fill = -1U;
685 rx_queue->min_overfill = -1U;
686 693
687 /* Initialise limit fields */ 694 /* Initialise limit fields */
688 max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM; 695 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
689 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 696 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
690 limit = max_fill * min(rx_refill_limit, 100U) / 100U; 697 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
691 698
@@ -703,14 +710,14 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
703 struct efx_rx_buffer *rx_buf; 710 struct efx_rx_buffer *rx_buf;
704 711
705 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 712 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
706 "shutting down RX queue %d\n", rx_queue->queue); 713 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
707 714
708 del_timer_sync(&rx_queue->slow_fill); 715 del_timer_sync(&rx_queue->slow_fill);
709 efx_nic_fini_rx(rx_queue); 716 efx_nic_fini_rx(rx_queue);
710 717
711 /* Release RX buffers NB start at index 0 not current HW ptr */ 718 /* Release RX buffers NB start at index 0 not current HW ptr */
712 if (rx_queue->buffer) { 719 if (rx_queue->buffer) {
713 for (i = 0; i <= EFX_RXQ_MASK; i++) { 720 for (i = 0; i <= rx_queue->ptr_mask; i++) {
714 rx_buf = efx_rx_buffer(rx_queue, i); 721 rx_buf = efx_rx_buffer(rx_queue, i);
715 efx_fini_rx_buffer(rx_queue, rx_buf); 722 efx_fini_rx_buffer(rx_queue, rx_buf);
716 } 723 }
@@ -720,7 +727,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
720void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 727void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
721{ 728{
722 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 729 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
723 "destroying RX queue %d\n", rx_queue->queue); 730 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
724 731
725 efx_nic_remove_rx(rx_queue); 732 efx_nic_remove_rx(rx_queue);
726 733
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 85f015f005d5..da4473b71058 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -506,7 +506,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
506 506
507 for (i = 0; i < 3; i++) { 507 for (i = 0; i < 3; i++) {
508 /* Determine how many packets to send */ 508 /* Determine how many packets to send */
509 state->packet_count = EFX_TXQ_SIZE / 3; 509 state->packet_count = efx->txq_entries / 3;
510 state->packet_count = min(1 << (i << 2), state->packet_count); 510 state->packet_count = min(1 << (i << 2), state->packet_count);
511 state->skbs = kzalloc(sizeof(state->skbs[0]) * 511 state->skbs = kzalloc(sizeof(state->skbs[0]) *
512 state->packet_count, GFP_KERNEL); 512 state->packet_count, GFP_KERNEL);
@@ -567,7 +567,7 @@ static int efx_wait_for_link(struct efx_nic *efx)
567 efx->type->monitor(efx); 567 efx->type->monitor(efx);
568 mutex_unlock(&efx->mac_lock); 568 mutex_unlock(&efx->mac_lock);
569 } else { 569 } else {
570 struct efx_channel *channel = &efx->channel[0]; 570 struct efx_channel *channel = efx_get_channel(efx, 0);
571 if (channel->work_pending) 571 if (channel->work_pending)
572 efx_process_channel_now(channel); 572 efx_process_channel_now(channel);
573 } 573 }
@@ -594,6 +594,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
594{ 594{
595 enum efx_loopback_mode mode; 595 enum efx_loopback_mode mode;
596 struct efx_loopback_state *state; 596 struct efx_loopback_state *state;
597 struct efx_channel *channel = efx_get_channel(efx, 0);
597 struct efx_tx_queue *tx_queue; 598 struct efx_tx_queue *tx_queue;
598 int rc = 0; 599 int rc = 0;
599 600
@@ -634,7 +635,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
634 } 635 }
635 636
636 /* Test both types of TX queue */ 637 /* Test both types of TX queue */
637 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) { 638 efx_for_each_channel_tx_queue(tx_queue, channel) {
638 state->offload_csum = (tx_queue->queue & 639 state->offload_csum = (tx_queue->queue &
639 EFX_TXQ_TYPE_OFFLOAD); 640 EFX_TXQ_TYPE_OFFLOAD);
640 rc = efx_test_loopback(tx_queue, 641 rc = efx_test_loopback(tx_queue,
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 3fab030f8ab5..2115f95ddc88 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -450,7 +450,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
450 mac_stats->rx_bad_bytes); 450 mac_stats->rx_bad_bytes);
451 MAC_STAT(rx_packets, RX_PKTS); 451 MAC_STAT(rx_packets, RX_PKTS);
452 MAC_STAT(rx_good, RX_GOOD_PKTS); 452 MAC_STAT(rx_good, RX_GOOD_PKTS);
453 mac_stats->rx_bad = mac_stats->rx_packets - mac_stats->rx_good; 453 MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
454 MAC_STAT(rx_pause, RX_PAUSE_PKTS); 454 MAC_STAT(rx_pause, RX_PAUSE_PKTS);
455 MAC_STAT(rx_control, RX_CONTROL_PKTS); 455 MAC_STAT(rx_control, RX_CONTROL_PKTS);
456 MAC_STAT(rx_unicast, RX_UNICAST_PKTS); 456 MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
@@ -651,6 +651,6 @@ struct efx_nic_type siena_a0_nic_type = {
651 .tx_dc_base = 0x88000, 651 .tx_dc_base = 0x88000,
652 .rx_dc_base = 0x68000, 652 .rx_dc_base = 0x68000,
653 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 653 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
654 NETIF_F_RXHASH), 654 NETIF_F_RXHASH | NETIF_F_NTUPLE),
655 .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT, 655 .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
656}; 656};
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 6791be90c2fe..1bc6c48c96ee 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -19,10 +19,7 @@
19#include "workarounds.h" 19#include "workarounds.h"
20#include "selftest.h" 20#include "selftest.h"
21 21
22/* We expect these MMDs to be in the package. SFT9001 also has a 22/* We expect these MMDs to be in the package. */
23 * clause 22 extension MMD, but since it doesn't have all the generic
24 * MMD registers it is pointless to include it here.
25 */
26#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \ 23#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
27 MDIO_DEVS_PCS | \ 24 MDIO_DEVS_PCS | \
28 MDIO_DEVS_PHYXS | \ 25 MDIO_DEVS_PHYXS | \
@@ -33,12 +30,6 @@
33 (1 << LOOPBACK_PMAPMD) | \ 30 (1 << LOOPBACK_PMAPMD) | \
34 (1 << LOOPBACK_PHYXS_WS)) 31 (1 << LOOPBACK_PHYXS_WS))
35 32
36#define SFT9001_LOOPBACKS ((1 << LOOPBACK_GPHY) | \
37 (1 << LOOPBACK_PHYXS) | \
38 (1 << LOOPBACK_PCS) | \
39 (1 << LOOPBACK_PMAPMD) | \
40 (1 << LOOPBACK_PHYXS_WS))
41
42/* We complain if we fail to see the link partner as 10G capable this many 33/* We complain if we fail to see the link partner as 10G capable this many
43 * times in a row (must be > 1 as sampling the autoneg. registers is racy) 34 * times in a row (must be > 1 as sampling the autoneg. registers is racy)
44 */ 35 */
@@ -50,9 +41,8 @@
50#define PMA_PMD_EXT_GMII_EN_WIDTH 1 41#define PMA_PMD_EXT_GMII_EN_WIDTH 1
51#define PMA_PMD_EXT_CLK_OUT_LBN 2 42#define PMA_PMD_EXT_CLK_OUT_LBN 2
52#define PMA_PMD_EXT_CLK_OUT_WIDTH 1 43#define PMA_PMD_EXT_CLK_OUT_WIDTH 1
53#define PMA_PMD_LNPGA_POWERDOWN_LBN 8 /* SFX7101 only */ 44#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
54#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1 45#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
55#define PMA_PMD_EXT_CLK312_LBN 8 /* SFT9001 only */
56#define PMA_PMD_EXT_CLK312_WIDTH 1 46#define PMA_PMD_EXT_CLK312_WIDTH 1
57#define PMA_PMD_EXT_LPOWER_LBN 12 47#define PMA_PMD_EXT_LPOWER_LBN 12
58#define PMA_PMD_EXT_LPOWER_WIDTH 1 48#define PMA_PMD_EXT_LPOWER_WIDTH 1
@@ -84,7 +74,6 @@
84#define PMA_PMD_LED_FLASH (3) 74#define PMA_PMD_LED_FLASH (3)
85#define PMA_PMD_LED_MASK 3 75#define PMA_PMD_LED_MASK 3
86/* All LEDs under hardware control */ 76/* All LEDs under hardware control */
87#define SFT9001_PMA_PMD_LED_DEFAULT 0
88/* Green and Amber under hardware control, Red off */ 77/* Green and Amber under hardware control, Red off */
89#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) 78#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
90 79
@@ -98,31 +87,7 @@
98#define PMA_PMD_SPEED_LBN 4 87#define PMA_PMD_SPEED_LBN 4
99#define PMA_PMD_SPEED_WIDTH 4 88#define PMA_PMD_SPEED_WIDTH 4
100 89
101/* Cable diagnostics - SFT9001 only */ 90/* Misc register defines */
102#define PMA_PMD_CDIAG_CTRL_REG 49213
103#define CDIAG_CTRL_IMMED_LBN 15
104#define CDIAG_CTRL_BRK_LINK_LBN 12
105#define CDIAG_CTRL_IN_PROG_LBN 11
106#define CDIAG_CTRL_LEN_UNIT_LBN 10
107#define CDIAG_CTRL_LEN_METRES 1
108#define PMA_PMD_CDIAG_RES_REG 49174
109#define CDIAG_RES_A_LBN 12
110#define CDIAG_RES_B_LBN 8
111#define CDIAG_RES_C_LBN 4
112#define CDIAG_RES_D_LBN 0
113#define CDIAG_RES_WIDTH 4
114#define CDIAG_RES_OPEN 2
115#define CDIAG_RES_OK 1
116#define CDIAG_RES_INVALID 0
117/* Set of 4 registers for pairs A-D */
118#define PMA_PMD_CDIAG_LEN_REG 49175
119
120/* Serdes control registers - SFT9001 only */
121#define PMA_PMD_CSERDES_CTRL_REG 64258
122/* Set the 156.25 MHz output to 312.5 MHz to drive Falcon's XMAC */
123#define PMA_PMD_CSERDES_DEFAULT 0x000f
124
125/* Misc register defines - SFX7101 only */
126#define PCS_CLOCK_CTRL_REG 55297 91#define PCS_CLOCK_CTRL_REG 55297
127#define PLL312_RST_N_LBN 2 92#define PLL312_RST_N_LBN 2
128 93
@@ -185,121 +150,17 @@ struct tenxpress_phy_data {
185 int bad_lp_tries; 150 int bad_lp_tries;
186}; 151};
187 152
188static ssize_t show_phy_short_reach(struct device *dev,
189 struct device_attribute *attr, char *buf)
190{
191 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
192 int reg;
193
194 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR);
195 return sprintf(buf, "%d\n", !!(reg & MDIO_PMA_10GBT_TXPWR_SHORT));
196}
197
198static ssize_t set_phy_short_reach(struct device *dev,
199 struct device_attribute *attr,
200 const char *buf, size_t count)
201{
202 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
203 int rc;
204
205 rtnl_lock();
206 if (efx->state != STATE_RUNNING) {
207 rc = -EBUSY;
208 } else {
209 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
210 MDIO_PMA_10GBT_TXPWR_SHORT,
211 count != 0 && *buf != '0');
212 rc = efx_reconfigure_port(efx);
213 }
214 rtnl_unlock();
215
216 return rc < 0 ? rc : (ssize_t)count;
217}
218
219static DEVICE_ATTR(phy_short_reach, 0644, show_phy_short_reach,
220 set_phy_short_reach);
221
222int sft9001_wait_boot(struct efx_nic *efx)
223{
224 unsigned long timeout = jiffies + HZ + 1;
225 int boot_stat;
226
227 for (;;) {
228 boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS,
229 PCS_BOOT_STATUS_REG);
230 if (boot_stat >= 0) {
231 netif_dbg(efx, hw, efx->net_dev,
232 "PHY boot status = %#x\n", boot_stat);
233 switch (boot_stat &
234 ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
235 (3 << PCS_BOOT_PROGRESS_LBN) |
236 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN) |
237 (1 << PCS_BOOT_CODE_STARTED_LBN))) {
238 case ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
239 (PCS_BOOT_PROGRESS_CHECKSUM <<
240 PCS_BOOT_PROGRESS_LBN)):
241 case ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
242 (PCS_BOOT_PROGRESS_INIT <<
243 PCS_BOOT_PROGRESS_LBN) |
244 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN)):
245 return -EINVAL;
246 case ((PCS_BOOT_PROGRESS_WAIT_MDIO <<
247 PCS_BOOT_PROGRESS_LBN) |
248 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN)):
249 return (efx->phy_mode & PHY_MODE_SPECIAL) ?
250 0 : -EIO;
251 case ((PCS_BOOT_PROGRESS_JUMP <<
252 PCS_BOOT_PROGRESS_LBN) |
253 (1 << PCS_BOOT_CODE_STARTED_LBN)):
254 case ((PCS_BOOT_PROGRESS_JUMP <<
255 PCS_BOOT_PROGRESS_LBN) |
256 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN) |
257 (1 << PCS_BOOT_CODE_STARTED_LBN)):
258 return (efx->phy_mode & PHY_MODE_SPECIAL) ?
259 -EIO : 0;
260 default:
261 if (boot_stat & (1 << PCS_BOOT_FATAL_ERROR_LBN))
262 return -EIO;
263 break;
264 }
265 }
266
267 if (time_after_eq(jiffies, timeout))
268 return -ETIMEDOUT;
269
270 msleep(50);
271 }
272}
273
274static int tenxpress_init(struct efx_nic *efx) 153static int tenxpress_init(struct efx_nic *efx)
275{ 154{
276 int reg; 155 /* Enable 312.5 MHz clock */
277 156 efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
278 if (efx->phy_type == PHY_TYPE_SFX7101) { 157 1 << CLK312_EN_LBN);
279 /* Enable 312.5 MHz clock */
280 efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
281 1 << CLK312_EN_LBN);
282 } else {
283 /* Enable 312.5 MHz clock and GMII */
284 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
285 reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) |
286 (1 << PMA_PMD_EXT_CLK_OUT_LBN) |
287 (1 << PMA_PMD_EXT_CLK312_LBN) |
288 (1 << PMA_PMD_EXT_ROBUST_LBN));
289
290 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
291 efx_mdio_set_flag(efx, MDIO_MMD_C22EXT,
292 GPHY_XCONTROL_REG, 1 << GPHY_ISOLATE_LBN,
293 false);
294 }
295 158
296 /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */ 159 /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
297 if (efx->phy_type == PHY_TYPE_SFX7101) { 160 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
298 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG, 161 1 << PMA_PMA_LED_ACTIVITY_LBN, true);
299 1 << PMA_PMA_LED_ACTIVITY_LBN, true); 162 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
300 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, 163 SFX7101_PMA_PMD_LED_DEFAULT);
301 SFX7101_PMA_PMD_LED_DEFAULT);
302 }
303 164
304 return 0; 165 return 0;
305} 166}
@@ -307,7 +168,6 @@ static int tenxpress_init(struct efx_nic *efx)
307static int tenxpress_phy_probe(struct efx_nic *efx) 168static int tenxpress_phy_probe(struct efx_nic *efx)
308{ 169{
309 struct tenxpress_phy_data *phy_data; 170 struct tenxpress_phy_data *phy_data;
310 int rc;
311 171
312 /* Allocate phy private storage */ 172 /* Allocate phy private storage */
313 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 173 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
@@ -316,42 +176,15 @@ static int tenxpress_phy_probe(struct efx_nic *efx)
316 efx->phy_data = phy_data; 176 efx->phy_data = phy_data;
317 phy_data->phy_mode = efx->phy_mode; 177 phy_data->phy_mode = efx->phy_mode;
318 178
319 /* Create any special files */ 179 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
320 if (efx->phy_type == PHY_TYPE_SFT9001B) { 180 efx->mdio.mode_support = MDIO_SUPPORTS_C45;
321 rc = device_create_file(&efx->pci_dev->dev,
322 &dev_attr_phy_short_reach);
323 if (rc)
324 goto fail;
325 }
326
327 if (efx->phy_type == PHY_TYPE_SFX7101) {
328 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
329 efx->mdio.mode_support = MDIO_SUPPORTS_C45;
330
331 efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
332 181
333 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | 182 efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
334 ADVERTISED_10000baseT_Full);
335 } else {
336 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
337 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
338 183
339 efx->loopback_modes = (SFT9001_LOOPBACKS | 184 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
340 FALCON_XMAC_LOOPBACKS | 185 ADVERTISED_10000baseT_Full);
341 FALCON_GMAC_LOOPBACKS);
342
343 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
344 ADVERTISED_10000baseT_Full |
345 ADVERTISED_1000baseT_Full |
346 ADVERTISED_100baseT_Full);
347 }
348 186
349 return 0; 187 return 0;
350
351fail:
352 kfree(efx->phy_data);
353 efx->phy_data = NULL;
354 return rc;
355} 188}
356 189
357static int tenxpress_phy_init(struct efx_nic *efx) 190static int tenxpress_phy_init(struct efx_nic *efx)
@@ -361,16 +194,6 @@ static int tenxpress_phy_init(struct efx_nic *efx)
361 falcon_board(efx)->type->init_phy(efx); 194 falcon_board(efx)->type->init_phy(efx);
362 195
363 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { 196 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
364 if (efx->phy_type == PHY_TYPE_SFT9001A) {
365 int reg;
366 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
367 PMA_PMD_XCONTROL_REG);
368 reg |= (1 << PMA_PMD_EXT_SSR_LBN);
369 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
370 PMA_PMD_XCONTROL_REG, reg);
371 mdelay(200);
372 }
373
374 rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); 197 rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
375 if (rc < 0) 198 if (rc < 0)
376 return rc; 199 return rc;
@@ -403,7 +226,7 @@ static int tenxpress_special_reset(struct efx_nic *efx)
403{ 226{
404 int rc, reg; 227 int rc, reg;
405 228
406 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so 229 /* The XGMAC clock is driven from the SFX7101 312MHz clock, so
407 * a special software reset can glitch the XGMAC sufficiently for stats 230 * a special software reset can glitch the XGMAC sufficiently for stats
408 * requests to fail. */ 231 * requests to fail. */
409 falcon_stop_nic_stats(efx); 232 falcon_stop_nic_stats(efx);
@@ -484,53 +307,18 @@ static bool sfx7101_link_ok(struct efx_nic *efx)
484 MDIO_DEVS_PHYXS); 307 MDIO_DEVS_PHYXS);
485} 308}
486 309
487static bool sft9001_link_ok(struct efx_nic *efx, struct ethtool_cmd *ecmd)
488{
489 u32 reg;
490
491 if (efx_phy_mode_disabled(efx->phy_mode))
492 return false;
493 else if (efx->loopback_mode == LOOPBACK_GPHY)
494 return true;
495 else if (efx->loopback_mode)
496 return efx_mdio_links_ok(efx,
497 MDIO_DEVS_PMAPMD |
498 MDIO_DEVS_PHYXS);
499
500 /* We must use the same definition of link state as LASI,
501 * otherwise we can miss a link state transition
502 */
503 if (ecmd->speed == 10000) {
504 reg = efx_mdio_read(efx, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT1);
505 return reg & MDIO_PCS_10GBRT_STAT1_BLKLK;
506 } else {
507 reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_STATUS_REG);
508 return reg & (1 << C22EXT_STATUS_LINK_LBN);
509 }
510}
511
512static void tenxpress_ext_loopback(struct efx_nic *efx) 310static void tenxpress_ext_loopback(struct efx_nic *efx)
513{ 311{
514 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1, 312 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
515 1 << LOOPBACK_NEAR_LBN, 313 1 << LOOPBACK_NEAR_LBN,
516 efx->loopback_mode == LOOPBACK_PHYXS); 314 efx->loopback_mode == LOOPBACK_PHYXS);
517 if (efx->phy_type != PHY_TYPE_SFX7101)
518 efx_mdio_set_flag(efx, MDIO_MMD_C22EXT, GPHY_XCONTROL_REG,
519 1 << GPHY_LOOPBACK_NEAR_LBN,
520 efx->loopback_mode == LOOPBACK_GPHY);
521} 315}
522 316
523static void tenxpress_low_power(struct efx_nic *efx) 317static void tenxpress_low_power(struct efx_nic *efx)
524{ 318{
525 if (efx->phy_type == PHY_TYPE_SFX7101) 319 efx_mdio_set_mmds_lpower(
526 efx_mdio_set_mmds_lpower( 320 efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
527 efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER), 321 TENXPRESS_REQUIRED_DEVS);
528 TENXPRESS_REQUIRED_DEVS);
529 else
530 efx_mdio_set_flag(
531 efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG,
532 1 << PMA_PMD_EXT_LPOWER_LBN,
533 !!(efx->phy_mode & PHY_MODE_LOW_POWER));
534} 322}
535 323
536static int tenxpress_phy_reconfigure(struct efx_nic *efx) 324static int tenxpress_phy_reconfigure(struct efx_nic *efx)
@@ -550,12 +338,7 @@ static int tenxpress_phy_reconfigure(struct efx_nic *efx)
550 338
551 if (loop_reset || phy_mode_change) { 339 if (loop_reset || phy_mode_change) {
552 tenxpress_special_reset(efx); 340 tenxpress_special_reset(efx);
553 341 falcon_reset_xaui(efx);
554 /* Reset XAUI if we were in 10G, and are staying
555 * in 10G. If we're moving into and out of 10G
556 * then xaui will be reset anyway */
557 if (EFX_IS10G(efx))
558 falcon_reset_xaui(efx);
559 } 342 }
560 343
561 tenxpress_low_power(efx); 344 tenxpress_low_power(efx);
@@ -578,29 +361,12 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
578{ 361{
579 struct efx_link_state old_state = efx->link_state; 362 struct efx_link_state old_state = efx->link_state;
580 363
581 if (efx->phy_type == PHY_TYPE_SFX7101) { 364 efx->link_state.up = sfx7101_link_ok(efx);
582 efx->link_state.up = sfx7101_link_ok(efx); 365 efx->link_state.speed = 10000;
583 efx->link_state.speed = 10000; 366 efx->link_state.fd = true;
584 efx->link_state.fd = true; 367 efx->link_state.fc = efx_mdio_get_pause(efx);
585 efx->link_state.fc = efx_mdio_get_pause(efx);
586
587 sfx7101_check_bad_lp(efx, efx->link_state.up);
588 } else {
589 struct ethtool_cmd ecmd;
590
591 /* Check the LASI alarm first */
592 if (efx->loopback_mode == LOOPBACK_NONE &&
593 !(efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT) &
594 MDIO_PMA_LASI_LSALARM))
595 return false;
596 368
597 tenxpress_get_settings(efx, &ecmd); 369 sfx7101_check_bad_lp(efx, efx->link_state.up);
598
599 efx->link_state.up = sft9001_link_ok(efx, &ecmd);
600 efx->link_state.speed = ecmd.speed;
601 efx->link_state.fd = (ecmd.duplex == DUPLEX_FULL);
602 efx->link_state.fc = efx_mdio_get_pause(efx);
603 }
604 370
605 return !efx_link_state_equal(&efx->link_state, &old_state); 371 return !efx_link_state_equal(&efx->link_state, &old_state);
606} 372}
@@ -621,10 +387,6 @@ static void sfx7101_phy_fini(struct efx_nic *efx)
621 387
622static void tenxpress_phy_remove(struct efx_nic *efx) 388static void tenxpress_phy_remove(struct efx_nic *efx)
623{ 389{
624 if (efx->phy_type == PHY_TYPE_SFT9001B)
625 device_remove_file(&efx->pci_dev->dev,
626 &dev_attr_phy_short_reach);
627
628 kfree(efx->phy_data); 390 kfree(efx->phy_data);
629 efx->phy_data = NULL; 391 efx->phy_data = NULL;
630} 392}
@@ -647,10 +409,7 @@ void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
647 (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN); 409 (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
648 break; 410 break;
649 default: 411 default:
650 if (efx->phy_type == PHY_TYPE_SFX7101) 412 reg = SFX7101_PMA_PMD_LED_DEFAULT;
651 reg = SFX7101_PMA_PMD_LED_DEFAULT;
652 else
653 reg = SFT9001_PMA_PMD_LED_DEFAULT;
654 break; 413 break;
655 } 414 }
656 415
@@ -685,102 +444,12 @@ sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
685 return rc; 444 return rc;
686} 445}
687 446
688static const char *const sft9001_test_names[] = {
689 "bist",
690 "cable.pairA.status",
691 "cable.pairB.status",
692 "cable.pairC.status",
693 "cable.pairD.status",
694 "cable.pairA.length",
695 "cable.pairB.length",
696 "cable.pairC.length",
697 "cable.pairD.length",
698};
699
700static const char *sft9001_test_name(struct efx_nic *efx, unsigned int index)
701{
702 if (index < ARRAY_SIZE(sft9001_test_names))
703 return sft9001_test_names[index];
704 return NULL;
705}
706
707static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
708{
709 int rc = 0, rc2, i, ctrl_reg, res_reg;
710
711 /* Initialise cable diagnostic results to unknown failure */
712 for (i = 1; i < 9; ++i)
713 results[i] = -1;
714
715 /* Run cable diagnostics; wait up to 5 seconds for them to complete.
716 * A cable fault is not a self-test failure, but a timeout is. */
717 ctrl_reg = ((1 << CDIAG_CTRL_IMMED_LBN) |
718 (CDIAG_CTRL_LEN_METRES << CDIAG_CTRL_LEN_UNIT_LBN));
719 if (flags & ETH_TEST_FL_OFFLINE) {
720 /* Break the link in order to run full diagnostics. We
721 * must reset the PHY to resume normal service. */
722 ctrl_reg |= (1 << CDIAG_CTRL_BRK_LINK_LBN);
723 }
724 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_CTRL_REG,
725 ctrl_reg);
726 i = 0;
727 while (efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_CTRL_REG) &
728 (1 << CDIAG_CTRL_IN_PROG_LBN)) {
729 if (++i == 50) {
730 rc = -ETIMEDOUT;
731 goto out;
732 }
733 msleep(100);
734 }
735 res_reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_RES_REG);
736 for (i = 0; i < 4; i++) {
737 int pair_res =
738 (res_reg >> (CDIAG_RES_A_LBN - i * CDIAG_RES_WIDTH))
739 & ((1 << CDIAG_RES_WIDTH) - 1);
740 int len_reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
741 PMA_PMD_CDIAG_LEN_REG + i);
742 if (pair_res == CDIAG_RES_OK)
743 results[1 + i] = 1;
744 else if (pair_res == CDIAG_RES_INVALID)
745 results[1 + i] = -1;
746 else
747 results[1 + i] = -pair_res;
748 if (pair_res != CDIAG_RES_INVALID &&
749 pair_res != CDIAG_RES_OPEN &&
750 len_reg != 0xffff)
751 results[5 + i] = len_reg;
752 }
753
754out:
755 if (flags & ETH_TEST_FL_OFFLINE) {
756 /* Reset, running the BIST and then resuming normal service. */
757 rc2 = tenxpress_special_reset(efx);
758 results[0] = rc2 ? -1 : 1;
759 if (!rc)
760 rc = rc2;
761
762 efx_mdio_an_reconfigure(efx);
763 }
764
765 return rc;
766}
767
768static void 447static void
769tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 448tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
770{ 449{
771 u32 adv = 0, lpa = 0; 450 u32 adv = 0, lpa = 0;
772 int reg; 451 int reg;
773 452
774 if (efx->phy_type != PHY_TYPE_SFX7101) {
775 reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_CTRL);
776 if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN))
777 adv |= ADVERTISED_1000baseT_Full;
778 reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_STATUS);
779 if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN))
780 lpa |= ADVERTISED_1000baseT_Half;
781 if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN))
782 lpa |= ADVERTISED_1000baseT_Full;
783 }
784 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL); 453 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
785 if (reg & MDIO_AN_10GBT_CTRL_ADV10G) 454 if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
786 adv |= ADVERTISED_10000baseT_Full; 455 adv |= ADVERTISED_10000baseT_Full;
@@ -790,23 +459,9 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
790 459
791 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); 460 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
792 461
793 if (efx->phy_type != PHY_TYPE_SFX7101) {
794 ecmd->supported |= (SUPPORTED_100baseT_Full |
795 SUPPORTED_1000baseT_Full);
796 if (ecmd->speed != SPEED_10000) {
797 ecmd->eth_tp_mdix =
798 (efx_mdio_read(efx, MDIO_MMD_PMAPMD,
799 PMA_PMD_XSTATUS_REG) &
800 (1 << PMA_PMD_XSTAT_MDIX_LBN))
801 ? ETH_TP_MDI_X : ETH_TP_MDI;
802 }
803 }
804
805 /* In loopback, the PHY automatically brings up the correct interface, 462 /* In loopback, the PHY automatically brings up the correct interface,
806 * but doesn't advertise the correct speed. So override it */ 463 * but doesn't advertise the correct speed. So override it */
807 if (efx->loopback_mode == LOOPBACK_GPHY) 464 if (LOOPBACK_EXTERNAL(efx))
808 ecmd->speed = SPEED_1000;
809 else if (LOOPBACK_EXTERNAL(efx))
810 ecmd->speed = SPEED_10000; 465 ecmd->speed = SPEED_10000;
811} 466}
812 467
@@ -825,16 +480,6 @@ static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
825 advertising & ADVERTISED_10000baseT_Full); 480 advertising & ADVERTISED_10000baseT_Full);
826} 481}
827 482
828static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
829{
830 efx_mdio_set_flag(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_CTRL,
831 1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN,
832 advertising & ADVERTISED_1000baseT_Full);
833 efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
834 MDIO_AN_10GBT_CTRL_ADV10G,
835 advertising & ADVERTISED_10000baseT_Full);
836}
837
838struct efx_phy_operations falcon_sfx7101_phy_ops = { 483struct efx_phy_operations falcon_sfx7101_phy_ops = {
839 .probe = tenxpress_phy_probe, 484 .probe = tenxpress_phy_probe,
840 .init = tenxpress_phy_init, 485 .init = tenxpress_phy_init,
@@ -849,18 +494,3 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
849 .test_name = sfx7101_test_name, 494 .test_name = sfx7101_test_name,
850 .run_tests = sfx7101_run_tests, 495 .run_tests = sfx7101_run_tests,
851}; 496};
852
853struct efx_phy_operations falcon_sft9001_phy_ops = {
854 .probe = tenxpress_phy_probe,
855 .init = tenxpress_phy_init,
856 .reconfigure = tenxpress_phy_reconfigure,
857 .poll = tenxpress_phy_poll,
858 .fini = efx_port_dummy_op_void,
859 .remove = tenxpress_phy_remove,
860 .get_settings = tenxpress_get_settings,
861 .set_settings = tenxpress_set_settings,
862 .set_npage_adv = sft9001_set_npage_adv,
863 .test_alive = efx_mdio_test_alive,
864 .test_name = sft9001_test_name,
865 .run_tests = sft9001_run_tests,
866};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index c6942da2c99a..11726989fe2d 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -28,7 +28,7 @@
28 * The tx_queue descriptor ring fill-level must fall below this value 28 * The tx_queue descriptor ring fill-level must fall below this value
29 * before we restart the netif queue 29 * before we restart the netif queue
30 */ 30 */
31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) 31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32 32
33/* We need to be able to nest calls to netif_tx_stop_queue(), partly 33/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * because of the 2 hardware queues associated with each core queue, 34 * because of the 2 hardware queues associated with each core queue,
@@ -37,8 +37,9 @@
37void efx_stop_queue(struct efx_channel *channel) 37void efx_stop_queue(struct efx_channel *channel)
38{ 38{
39 struct efx_nic *efx = channel->efx; 39 struct efx_nic *efx = channel->efx;
40 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
40 41
41 if (!channel->tx_queue) 42 if (!tx_queue)
42 return; 43 return;
43 44
44 spin_lock_bh(&channel->tx_stop_lock); 45 spin_lock_bh(&channel->tx_stop_lock);
@@ -46,9 +47,8 @@ void efx_stop_queue(struct efx_channel *channel)
46 47
47 atomic_inc(&channel->tx_stop_count); 48 atomic_inc(&channel->tx_stop_count);
48 netif_tx_stop_queue( 49 netif_tx_stop_queue(
49 netdev_get_tx_queue( 50 netdev_get_tx_queue(efx->net_dev,
50 efx->net_dev, 51 tx_queue->queue / EFX_TXQ_TYPES));
51 channel->tx_queue->queue / EFX_TXQ_TYPES));
52 52
53 spin_unlock_bh(&channel->tx_stop_lock); 53 spin_unlock_bh(&channel->tx_stop_lock);
54} 54}
@@ -57,8 +57,9 @@ void efx_stop_queue(struct efx_channel *channel)
57void efx_wake_queue(struct efx_channel *channel) 57void efx_wake_queue(struct efx_channel *channel)
58{ 58{
59 struct efx_nic *efx = channel->efx; 59 struct efx_nic *efx = channel->efx;
60 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
60 61
61 if (!channel->tx_queue) 62 if (!tx_queue)
62 return; 63 return;
63 64
64 local_bh_disable(); 65 local_bh_disable();
@@ -66,9 +67,8 @@ void efx_wake_queue(struct efx_channel *channel)
66 &channel->tx_stop_lock)) { 67 &channel->tx_stop_lock)) {
67 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n"); 68 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
68 netif_tx_wake_queue( 69 netif_tx_wake_queue(
69 netdev_get_tx_queue( 70 netdev_get_tx_queue(efx->net_dev,
70 efx->net_dev, 71 tx_queue->queue / EFX_TXQ_TYPES));
71 channel->tx_queue->queue / EFX_TXQ_TYPES));
72 spin_unlock(&channel->tx_stop_lock); 72 spin_unlock(&channel->tx_stop_lock);
73 } 73 }
74 local_bh_enable(); 74 local_bh_enable();
@@ -207,7 +207,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
207 } 207 }
208 208
209 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 209 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
210 q_space = EFX_TXQ_MASK - 1 - fill_level; 210 q_space = efx->txq_entries - 1 - fill_level;
211 211
212 /* Map for DMA. Use pci_map_single rather than pci_map_page 212 /* Map for DMA. Use pci_map_single rather than pci_map_page
213 * since this is more efficient on machines with sparse 213 * since this is more efficient on machines with sparse
@@ -244,14 +244,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
244 &tx_queue->read_count; 244 &tx_queue->read_count;
245 fill_level = (tx_queue->insert_count 245 fill_level = (tx_queue->insert_count
246 - tx_queue->old_read_count); 246 - tx_queue->old_read_count);
247 q_space = EFX_TXQ_MASK - 1 - fill_level; 247 q_space = efx->txq_entries - 1 - fill_level;
248 if (unlikely(q_space-- <= 0)) 248 if (unlikely(q_space-- <= 0))
249 goto stop; 249 goto stop;
250 smp_mb(); 250 smp_mb();
251 --tx_queue->stopped; 251 --tx_queue->stopped;
252 } 252 }
253 253
254 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 254 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
255 buffer = &tx_queue->buffer[insert_ptr]; 255 buffer = &tx_queue->buffer[insert_ptr];
256 efx_tsoh_free(tx_queue, buffer); 256 efx_tsoh_free(tx_queue, buffer);
257 EFX_BUG_ON_PARANOID(buffer->tsoh); 257 EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -320,7 +320,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
320 /* Work backwards until we hit the original insert pointer value */ 320 /* Work backwards until we hit the original insert pointer value */
321 while (tx_queue->insert_count != tx_queue->write_count) { 321 while (tx_queue->insert_count != tx_queue->write_count) {
322 --tx_queue->insert_count; 322 --tx_queue->insert_count;
323 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 323 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
324 buffer = &tx_queue->buffer[insert_ptr]; 324 buffer = &tx_queue->buffer[insert_ptr];
325 efx_dequeue_buffer(tx_queue, buffer); 325 efx_dequeue_buffer(tx_queue, buffer);
326 buffer->len = 0; 326 buffer->len = 0;
@@ -350,8 +350,8 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
350 struct efx_nic *efx = tx_queue->efx; 350 struct efx_nic *efx = tx_queue->efx;
351 unsigned int stop_index, read_ptr; 351 unsigned int stop_index, read_ptr;
352 352
353 stop_index = (index + 1) & EFX_TXQ_MASK; 353 stop_index = (index + 1) & tx_queue->ptr_mask;
354 read_ptr = tx_queue->read_count & EFX_TXQ_MASK; 354 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
355 355
356 while (read_ptr != stop_index) { 356 while (read_ptr != stop_index) {
357 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 357 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -368,7 +368,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
368 buffer->len = 0; 368 buffer->len = 0;
369 369
370 ++tx_queue->read_count; 370 ++tx_queue->read_count;
371 read_ptr = tx_queue->read_count & EFX_TXQ_MASK; 371 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
372 } 372 }
373} 373}
374 374
@@ -390,9 +390,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
390 if (unlikely(efx->port_inhibited)) 390 if (unlikely(efx->port_inhibited))
391 return NETDEV_TX_BUSY; 391 return NETDEV_TX_BUSY;
392 392
393 tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)]; 393 tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
394 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) 394 skb->ip_summed == CHECKSUM_PARTIAL ?
395 tx_queue += EFX_TXQ_TYPE_OFFLOAD; 395 EFX_TXQ_TYPE_OFFLOAD : 0);
396 396
397 return efx_enqueue_skb(tx_queue, skb); 397 return efx_enqueue_skb(tx_queue, skb);
398} 398}
@@ -402,7 +402,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
402 unsigned fill_level; 402 unsigned fill_level;
403 struct efx_nic *efx = tx_queue->efx; 403 struct efx_nic *efx = tx_queue->efx;
404 404
405 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK); 405 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
406 406
407 efx_dequeue_buffers(tx_queue, index); 407 efx_dequeue_buffers(tx_queue, index);
408 408
@@ -412,7 +412,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
412 smp_mb(); 412 smp_mb();
413 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 413 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
414 fill_level = tx_queue->insert_count - tx_queue->read_count; 414 fill_level = tx_queue->insert_count - tx_queue->read_count;
415 if (fill_level < EFX_TXQ_THRESHOLD) { 415 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
416 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 416 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
417 417
418 /* Do this under netif_tx_lock(), to avoid racing 418 /* Do this under netif_tx_lock(), to avoid racing
@@ -430,18 +430,24 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
430int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) 430int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
431{ 431{
432 struct efx_nic *efx = tx_queue->efx; 432 struct efx_nic *efx = tx_queue->efx;
433 unsigned int txq_size; 433 unsigned int entries;
434 int i, rc; 434 int i, rc;
435 435
436 netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n", 436 /* Create the smallest power-of-two aligned ring */
437 tx_queue->queue); 437 entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
438 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
439 tx_queue->ptr_mask = entries - 1;
440
441 netif_dbg(efx, probe, efx->net_dev,
442 "creating TX queue %d size %#x mask %#x\n",
443 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
438 444
439 /* Allocate software ring */ 445 /* Allocate software ring */
440 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer); 446 tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
441 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 447 GFP_KERNEL);
442 if (!tx_queue->buffer) 448 if (!tx_queue->buffer)
443 return -ENOMEM; 449 return -ENOMEM;
444 for (i = 0; i <= EFX_TXQ_MASK; ++i) 450 for (i = 0; i <= tx_queue->ptr_mask; ++i)
445 tx_queue->buffer[i].continuation = true; 451 tx_queue->buffer[i].continuation = true;
446 452
447 /* Allocate hardware ring */ 453 /* Allocate hardware ring */
@@ -481,7 +487,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
481 487
482 /* Free any buffers left in the ring */ 488 /* Free any buffers left in the ring */
483 while (tx_queue->read_count != tx_queue->write_count) { 489 while (tx_queue->read_count != tx_queue->write_count) {
484 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK]; 490 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
485 efx_dequeue_buffer(tx_queue, buffer); 491 efx_dequeue_buffer(tx_queue, buffer);
486 buffer->continuation = true; 492 buffer->continuation = true;
487 buffer->len = 0; 493 buffer->len = 0;
@@ -741,7 +747,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
741 747
742 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 748 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
743 /* -1 as there is no way to represent all descriptors used */ 749 /* -1 as there is no way to represent all descriptors used */
744 q_space = EFX_TXQ_MASK - 1 - fill_level; 750 q_space = efx->txq_entries - 1 - fill_level;
745 751
746 while (1) { 752 while (1) {
747 if (unlikely(q_space-- <= 0)) { 753 if (unlikely(q_space-- <= 0)) {
@@ -757,7 +763,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
757 *(volatile unsigned *)&tx_queue->read_count; 763 *(volatile unsigned *)&tx_queue->read_count;
758 fill_level = (tx_queue->insert_count 764 fill_level = (tx_queue->insert_count
759 - tx_queue->old_read_count); 765 - tx_queue->old_read_count);
760 q_space = EFX_TXQ_MASK - 1 - fill_level; 766 q_space = efx->txq_entries - 1 - fill_level;
761 if (unlikely(q_space-- <= 0)) { 767 if (unlikely(q_space-- <= 0)) {
762 *final_buffer = NULL; 768 *final_buffer = NULL;
763 return 1; 769 return 1;
@@ -766,13 +772,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
766 --tx_queue->stopped; 772 --tx_queue->stopped;
767 } 773 }
768 774
769 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK; 775 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
770 buffer = &tx_queue->buffer[insert_ptr]; 776 buffer = &tx_queue->buffer[insert_ptr];
771 ++tx_queue->insert_count; 777 ++tx_queue->insert_count;
772 778
773 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 779 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
774 tx_queue->read_count > 780 tx_queue->read_count >=
775 EFX_TXQ_MASK); 781 efx->txq_entries);
776 782
777 efx_tsoh_free(tx_queue, buffer); 783 efx_tsoh_free(tx_queue, buffer);
778 EFX_BUG_ON_PARANOID(buffer->len); 784 EFX_BUG_ON_PARANOID(buffer->len);
@@ -813,7 +819,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
813{ 819{
814 struct efx_tx_buffer *buffer; 820 struct efx_tx_buffer *buffer;
815 821
816 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK]; 822 buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
817 efx_tsoh_free(tx_queue, buffer); 823 efx_tsoh_free(tx_queue, buffer);
818 EFX_BUG_ON_PARANOID(buffer->len); 824 EFX_BUG_ON_PARANOID(buffer->len);
819 EFX_BUG_ON_PARANOID(buffer->unmap_len); 825 EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -838,7 +844,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
838 while (tx_queue->insert_count != tx_queue->write_count) { 844 while (tx_queue->insert_count != tx_queue->write_count) {
839 --tx_queue->insert_count; 845 --tx_queue->insert_count;
840 buffer = &tx_queue->buffer[tx_queue->insert_count & 846 buffer = &tx_queue->buffer[tx_queue->insert_count &
841 EFX_TXQ_MASK]; 847 tx_queue->ptr_mask];
842 efx_tsoh_free(tx_queue, buffer); 848 efx_tsoh_free(tx_queue, buffer);
843 EFX_BUG_ON_PARANOID(buffer->skb); 849 EFX_BUG_ON_PARANOID(buffer->skb);
844 if (buffer->unmap_len) { 850 if (buffer->unmap_len) {
@@ -1168,7 +1174,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1168 unsigned i; 1174 unsigned i;
1169 1175
1170 if (tx_queue->buffer) { 1176 if (tx_queue->buffer) {
1171 for (i = 0; i <= EFX_TXQ_MASK; ++i) 1177 for (i = 0; i <= tx_queue->ptr_mask; ++i)
1172 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1178 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1173 } 1179 }
1174 1180
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c
new file mode 100644
index 000000000000..351794a79215
--- /dev/null
+++ b/drivers/net/sfc/txc43128_phy.c
@@ -0,0 +1,560 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10/*
11 * Driver for Transwitch/Mysticom CX4 retimer
12 * see www.transwitch.com, part is TXC-43128
13 */
14
15#include <linux/delay.h>
16#include <linux/slab.h>
17#include "efx.h"
18#include "mdio_10g.h"
19#include "phy.h"
20#include "nic.h"
21
22/* We expect these MMDs to be in the package */
23#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \
24 MDIO_DEVS_PMAPMD | \
25 MDIO_DEVS_PHYXS)
26
27#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
28 (1 << LOOPBACK_PMAPMD) | \
29 (1 << LOOPBACK_PHYXS_WS))
30
31/**************************************************************************
32 *
33 * Compile-time config
34 *
35 **************************************************************************
36 */
37#define TXCNAME "TXC43128"
38/* Total length of time we'll wait for the PHY to come out of reset (ms) */
39#define TXC_MAX_RESET_TIME 500
40/* Interval between checks (ms) */
41#define TXC_RESET_WAIT 10
42/* How long to run BIST (us) */
43#define TXC_BIST_DURATION 50
44
45/**************************************************************************
46 *
47 * Register definitions
48 *
49 **************************************************************************
50 */
51
52/* Command register */
53#define TXC_GLRGS_GLCMD 0xc004
54/* Useful bits in command register */
55/* Lane power-down */
56#define TXC_GLCMD_L01PD_LBN 5
57#define TXC_GLCMD_L23PD_LBN 6
58/* Limited SW reset: preserves configuration but
59 * initiates a logic reset. Self-clearing */
60#define TXC_GLCMD_LMTSWRST_LBN 14
61
62/* Signal Quality Control */
63#define TXC_GLRGS_GSGQLCTL 0xc01a
64/* Enable bit */
65#define TXC_GSGQLCT_SGQLEN_LBN 15
66/* Lane selection */
67#define TXC_GSGQLCT_LNSL_LBN 13
68#define TXC_GSGQLCT_LNSL_WIDTH 2
69
70/* Analog TX control */
71#define TXC_ALRGS_ATXCTL 0xc040
72/* Lane power-down */
73#define TXC_ATXCTL_TXPD3_LBN 15
74#define TXC_ATXCTL_TXPD2_LBN 14
75#define TXC_ATXCTL_TXPD1_LBN 13
76#define TXC_ATXCTL_TXPD0_LBN 12
77
78/* Amplitude on lanes 0, 1 */
79#define TXC_ALRGS_ATXAMP0 0xc041
80/* Amplitude on lanes 2, 3 */
81#define TXC_ALRGS_ATXAMP1 0xc042
82/* Bit position of value for lane 0 (or 2) */
83#define TXC_ATXAMP_LANE02_LBN 3
84/* Bit position of value for lane 1 (or 3) */
85#define TXC_ATXAMP_LANE13_LBN 11
86
87#define TXC_ATXAMP_1280_mV 0
88#define TXC_ATXAMP_1200_mV 8
89#define TXC_ATXAMP_1120_mV 12
90#define TXC_ATXAMP_1060_mV 14
91#define TXC_ATXAMP_0820_mV 25
92#define TXC_ATXAMP_0720_mV 26
93#define TXC_ATXAMP_0580_mV 27
94#define TXC_ATXAMP_0440_mV 28
95
96#define TXC_ATXAMP_0820_BOTH \
97 ((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
98 | (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
99
100#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */
101
102/* Preemphasis on lanes 0, 1 */
103#define TXC_ALRGS_ATXPRE0 0xc043
104/* Preemphasis on lanes 2, 3 */
105#define TXC_ALRGS_ATXPRE1 0xc044
106
107#define TXC_ATXPRE_NONE 0
108#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */
109
110#define TXC_ALRGS_ARXCTL 0xc045
111/* Lane power-down */
112#define TXC_ARXCTL_RXPD3_LBN 15
113#define TXC_ARXCTL_RXPD2_LBN 14
114#define TXC_ARXCTL_RXPD1_LBN 13
115#define TXC_ARXCTL_RXPD0_LBN 12
116
117/* Main control */
118#define TXC_MRGS_CTL 0xc340
119/* Bits in main control */
120#define TXC_MCTL_RESET_LBN 15 /* Self clear */
121#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */
122#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */
123
124/* GPIO output */
125#define TXC_GPIO_OUTPUT 0xc346
126#define TXC_GPIO_DIR 0xc348
127
128/* Vendor-specific BIST registers */
129#define TXC_BIST_CTL 0xc280
130#define TXC_BIST_TXFRMCNT 0xc281
131#define TXC_BIST_RX0FRMCNT 0xc282
132#define TXC_BIST_RX1FRMCNT 0xc283
133#define TXC_BIST_RX2FRMCNT 0xc284
134#define TXC_BIST_RX3FRMCNT 0xc285
135#define TXC_BIST_RX0ERRCNT 0xc286
136#define TXC_BIST_RX1ERRCNT 0xc287
137#define TXC_BIST_RX2ERRCNT 0xc288
138#define TXC_BIST_RX3ERRCNT 0xc289
139
140/* BIST type (controls bit patter in test) */
141#define TXC_BIST_CTRL_TYPE_LBN 10
142#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */
143#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */
144#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */
145#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */
146/* Set this to 1 for 10 bit and 0 for 8 bit */
147#define TXC_BIST_CTRL_B10EN_LBN 12
148/* Enable BIST (write 0 to disable) */
149#define TXC_BIST_CTRL_ENAB_LBN 13
150/* Stop BIST (self-clears when stop complete) */
151#define TXC_BIST_CTRL_STOP_LBN 14
152/* Start BIST (cleared by writing 1 to STOP) */
153#define TXC_BIST_CTRL_STRT_LBN 15
154
155/* Mt. Diablo test configuration */
156#define TXC_MTDIABLO_CTRL 0xc34f
157#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10
158
159struct txc43128_data {
160 unsigned long bug10934_timer;
161 enum efx_phy_mode phy_mode;
162 enum efx_loopback_mode loopback_mode;
163};
164
165/* The PHY sometimes needs a reset to bring the link back up. So long as
166 * it reports link down, we reset it every 5 seconds.
167 */
168#define BUG10934_RESET_INTERVAL (5 * HZ)
169
170/* Perform a reset that doesn't clear configuration changes */
171static void txc_reset_logic(struct efx_nic *efx);
172
173/* Set the output value of a gpio */
174void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
175{
176 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
177}
178
179/* Set up the GPIO direction register */
180void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
181{
182 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
183}
184
185/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
186 * global reset (it's less clear what reset of other MMDs does).*/
187static int txc_reset_phy(struct efx_nic *efx)
188{
189 int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
190 TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
191 TXC_RESET_WAIT);
192 if (rc < 0)
193 goto fail;
194
195 /* Check that all the MMDs we expect are present and responding. */
196 rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0);
197 if (rc < 0)
198 goto fail;
199
200 return 0;
201
202fail:
203 netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n");
204 return rc;
205}
206
207/* Run a single BIST on one MMD */
208static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
209{
210 int ctrl, bctl;
211 int lane;
212 int rc = 0;
213
214 /* Set PMA to test into loopback using Mt Diablo reg as per app note */
215 ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
216 ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
217 efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
218
219 /* The BIST app. note lists these as 3 distinct steps. */
220 /* Set the BIST type */
221 bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
222 efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
223
224 /* Set the BSTEN bit in the BIST Control register to enable */
225 bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
226 efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
227
228 /* Set the BSTRT bit in the BIST Control register */
229 efx_mdio_write(efx, mmd, TXC_BIST_CTL,
230 bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
231
232 /* Wait. */
233 udelay(TXC_BIST_DURATION);
234
235 /* Set the BSTOP bit in the BIST Control register */
236 bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
237 efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
238
239 /* The STOP bit should go off when things have stopped */
240 while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
241 bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
242
243 /* Check all the error counts are 0 and all the frame counts are
244 non-zero */
245 for (lane = 0; lane < 4; lane++) {
246 int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
247 if (count != 0) {
248 netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
249 "Lane %d had %d errs\n", lane, count);
250 rc = -EIO;
251 }
252 count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
253 if (count == 0) {
254 netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
255 "Lane %d got 0 frames\n", lane);
256 rc = -EIO;
257 }
258 }
259
260 if (rc == 0)
261 netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
262
263 /* Disable BIST */
264 efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
265
266 /* Turn off loopback */
267 ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
268 efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
269
270 return rc;
271}
272
273static int txc_bist(struct efx_nic *efx)
274{
275 return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
276}
277
278/* Push the non-configurable defaults into the PHY. This must be
279 * done after every full reset */
280static void txc_apply_defaults(struct efx_nic *efx)
281{
282 int mctrl;
283
284 /* Turn amplitude down and preemphasis off on the host side
285 * (PHY<->MAC) as this is believed less likely to upset Falcon
286 * and no adverse effects have been noted. It probably also
287 * saves a picowatt or two */
288
289 /* Turn off preemphasis */
290 efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
291 efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
292
293 /* Turn down the amplitude */
294 efx_mdio_write(efx, MDIO_MMD_PHYXS,
295 TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
296 efx_mdio_write(efx, MDIO_MMD_PHYXS,
297 TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
298
299 /* Set the line side amplitude and preemphasis to the databook
300 * defaults as an erratum causes them to be 0 on at least some
301 * PHY rev.s */
302 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
303 TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
304 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
305 TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
306 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
307 TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
308 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
309 TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
310
311 /* Set up the LEDs */
312 mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
313
314 /* Set the Green and Red LEDs to their default modes */
315 mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
316 efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
317
318 /* Databook recommends doing this after configuration changes */
319 txc_reset_logic(efx);
320
321 falcon_board(efx)->type->init_phy(efx);
322}
323
324static int txc43128_phy_probe(struct efx_nic *efx)
325{
326 struct txc43128_data *phy_data;
327
328 /* Allocate phy private storage */
329 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
330 if (!phy_data)
331 return -ENOMEM;
332 efx->phy_data = phy_data;
333 phy_data->phy_mode = efx->phy_mode;
334
335 efx->mdio.mmds = TXC_REQUIRED_DEVS;
336 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
337
338 efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
339
340 return 0;
341}
342
343/* Initialisation entry point for this PHY driver */
344static int txc43128_phy_init(struct efx_nic *efx)
345{
346 int rc;
347
348 rc = txc_reset_phy(efx);
349 if (rc < 0)
350 return rc;
351
352 rc = txc_bist(efx);
353 if (rc < 0)
354 return rc;
355
356 txc_apply_defaults(efx);
357
358 return 0;
359}
360
361/* Set the lane power down state in the global registers */
362static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
363{
364 int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
365 int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
366
367 if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
368 ctl &= ~pd;
369 else
370 ctl |= pd;
371
372 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
373}
374
375/* Set the lane power down state in the analog control registers */
376static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
377{
378 int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
379 | (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
380 int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
381 | (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
382 int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
383 int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
384
385 if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
386 txctl &= ~txpd;
387 rxctl &= ~rxpd;
388 } else {
389 txctl |= txpd;
390 rxctl |= rxpd;
391 }
392
393 efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
394 efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
395}
396
397static void txc_set_power(struct efx_nic *efx)
398{
399 /* According to the data book, all the MMDs can do low power */
400 efx_mdio_set_mmds_lpower(efx,
401 !!(efx->phy_mode & PHY_MODE_LOW_POWER),
402 TXC_REQUIRED_DEVS);
403
404 /* Global register bank is in PCS, PHY XS. These control the host
405 * side and line side settings respectively. */
406 txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
407 txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
408
409 /* Analog register bank in PMA/PMD, PHY XS */
410 txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
411 txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
412}
413
414static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
415{
416 int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
417 int tries = 50;
418
419 val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
420 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
421 while (tries--) {
422 val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
423 if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
424 break;
425 udelay(1);
426 }
427 if (!tries)
428 netif_info(efx, hw, efx->net_dev,
429 TXCNAME " Logic reset timed out!\n");
430}
431
432/* Perform a logic reset. This preserves the configuration registers
433 * and is needed for some configuration changes to take effect */
434static void txc_reset_logic(struct efx_nic *efx)
435{
436 /* The data sheet claims we can do the logic reset on either the
437 * PCS or the PHYXS and the result is a reset of both host- and
438 * line-side logic. */
439 txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
440}
441
442static bool txc43128_phy_read_link(struct efx_nic *efx)
443{
444 return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
445}
446
447static int txc43128_phy_reconfigure(struct efx_nic *efx)
448{
449 struct txc43128_data *phy_data = efx->phy_data;
450 enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
451 bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
452
453 if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
454 txc_reset_phy(efx);
455 txc_apply_defaults(efx);
456 falcon_reset_xaui(efx);
457 mode_change &= ~PHY_MODE_TX_DISABLED;
458 }
459
460 efx_mdio_transmit_disable(efx);
461 efx_mdio_phy_reconfigure(efx);
462 if (mode_change & PHY_MODE_LOW_POWER)
463 txc_set_power(efx);
464
465 /* The data sheet claims this is required after every reconfiguration
466 * (note at end of 7.1), but we mustn't do it when nothing changes as
467 * it glitches the link, and reconfigure gets called on link change,
468 * so we get an IRQ storm on link up. */
469 if (loop_change || mode_change)
470 txc_reset_logic(efx);
471
472 phy_data->phy_mode = efx->phy_mode;
473 phy_data->loopback_mode = efx->loopback_mode;
474
475 return 0;
476}
477
478static void txc43128_phy_fini(struct efx_nic *efx)
479{
480 /* Disable link events */
481 efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
482}
483
484static void txc43128_phy_remove(struct efx_nic *efx)
485{
486 kfree(efx->phy_data);
487 efx->phy_data = NULL;
488}
489
490/* Periodic callback: this exists mainly to poll link status as we
491 * don't use LASI interrupts */
492static bool txc43128_phy_poll(struct efx_nic *efx)
493{
494 struct txc43128_data *data = efx->phy_data;
495 bool was_up = efx->link_state.up;
496
497 efx->link_state.up = txc43128_phy_read_link(efx);
498 efx->link_state.speed = 10000;
499 efx->link_state.fd = true;
500 efx->link_state.fc = efx->wanted_fc;
501
502 if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) {
503 data->bug10934_timer = jiffies;
504 } else {
505 if (time_after_eq(jiffies, (data->bug10934_timer +
506 BUG10934_RESET_INTERVAL))) {
507 data->bug10934_timer = jiffies;
508 txc_reset_logic(efx);
509 }
510 }
511
512 return efx->link_state.up != was_up;
513}
514
515static const char *txc43128_test_names[] = {
516 "bist"
517};
518
519static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
520{
521 if (index < ARRAY_SIZE(txc43128_test_names))
522 return txc43128_test_names[index];
523 return NULL;
524}
525
526static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
527{
528 int rc;
529
530 if (!(flags & ETH_TEST_FL_OFFLINE))
531 return 0;
532
533 rc = txc_reset_phy(efx);
534 if (rc < 0)
535 return rc;
536
537 rc = txc_bist(efx);
538 txc_apply_defaults(efx);
539 results[0] = rc ? -1 : 1;
540 return rc;
541}
542
543static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
544{
545 mdio45_ethtool_gset(&efx->mdio, ecmd);
546}
547
548struct efx_phy_operations falcon_txc_phy_ops = {
549 .probe = txc43128_phy_probe,
550 .init = txc43128_phy_init,
551 .reconfigure = txc43128_phy_reconfigure,
552 .poll = txc43128_phy_poll,
553 .fini = txc43128_phy_fini,
554 .remove = txc43128_phy_remove,
555 .get_settings = txc43128_get_settings,
556 .set_settings = efx_mdio_set_settings,
557 .test_alive = efx_mdio_test_alive,
558 .run_tests = txc43128_run_tests,
559 .test_name = txc43128_test_name,
560};
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 782e45a613d6..e0d63083c3a8 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -19,9 +19,7 @@
19#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) 19#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
20#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) 20#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
21#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) 21#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
22#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) 22#define EFX_WORKAROUND_10G(efx) 1
23#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
24 (efx)->phy_type == PHY_TYPE_SFT9001B)
25 23
26/* XAUI resets if link not detected */ 24/* XAUI resets if link not detected */
27#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS 25#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
@@ -58,9 +56,4 @@
58/* Leak overlength packets rather than free */ 56/* Leak overlength packets rather than free */
59#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A 57#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
60 58
61/* Need to send XNP pages for 100BaseT */
62#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001
63/* Don't restart AN in near-side loopback */
64#define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001
65
66#endif /* EFX_WORKAROUNDS_H */ 59#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 79fd02bc69fd..50259dfec583 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -798,7 +798,7 @@ static int sh_eth_rx(struct net_device *ndev)
798 skb->dev = ndev; 798 skb->dev = ndev;
799 sh_eth_set_receive_align(skb); 799 sh_eth_set_receive_align(skb);
800 800
801 skb->ip_summed = CHECKSUM_NONE; 801 skb_checksum_none_assert(skb);
802 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 802 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
803 } 803 }
804 if (entry >= RX_RING_SIZE - 1) 804 if (entry >= RX_RING_SIZE - 1)
@@ -1031,7 +1031,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
1031 mdp->duplex = -1; 1031 mdp->duplex = -1;
1032 1032
1033 /* Try connect to PHY */ 1033 /* Try connect to PHY */
1034 phydev = phy_connect(ndev, phy_id, &sh_eth_adjust_link, 1034 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1035 0, PHY_INTERFACE_MODE_MII); 1035 0, PHY_INTERFACE_MODE_MII);
1036 if (IS_ERR(phydev)) { 1036 if (IS_ERR(phydev)) {
1037 dev_err(&ndev->dev, "phy_connect failed\n"); 1037 dev_err(&ndev->dev, "phy_connect failed\n");
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index bbbded76ff14..581836867098 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -832,7 +832,7 @@ static u16 __devinit read_eeprom(long ioaddr, int location)
832 outl(0, ee_addr); 832 outl(0, ee_addr);
833 eeprom_delay(); 833 eeprom_delay();
834 834
835 return (retval); 835 return retval;
836} 836}
837 837
838/* Read and write the MII management registers using software-generated 838/* Read and write the MII management registers using software-generated
@@ -1042,7 +1042,7 @@ sis900_open(struct net_device *net_dev)
1042 init_timer(&sis_priv->timer); 1042 init_timer(&sis_priv->timer);
1043 sis_priv->timer.expires = jiffies + HZ; 1043 sis_priv->timer.expires = jiffies + HZ;
1044 sis_priv->timer.data = (unsigned long)net_dev; 1044 sis_priv->timer.data = (unsigned long)net_dev;
1045 sis_priv->timer.function = &sis900_timer; 1045 sis_priv->timer.function = sis900_timer;
1046 add_timer(&sis_priv->timer); 1046 add_timer(&sis_priv->timer);
1047 1047
1048 return 0; 1048 return 0;
@@ -2247,9 +2247,9 @@ static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
2247 2247
2248 /* leave 8 or 7 most siginifant bits */ 2248 /* leave 8 or 7 most siginifant bits */
2249 if ((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV)) 2249 if ((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
2250 return ((int)(crc >> 24)); 2250 return (int)(crc >> 24);
2251 else 2251 else
2252 return ((int)(crc >> 25)); 2252 return (int)(crc >> 25);
2253} 2253}
2254 2254
2255/** 2255/**
diff --git a/drivers/net/skfp/cfm.c b/drivers/net/skfp/cfm.c
index 5310d39b5737..e395ace3120b 100644
--- a/drivers/net/skfp/cfm.c
+++ b/drivers/net/skfp/cfm.c
@@ -542,8 +542,8 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
542 */ 542 */
543int cfm_get_mac_input(struct s_smc *smc) 543int cfm_get_mac_input(struct s_smc *smc)
544{ 544{
545 return((smc->mib.fddiSMTCF_State == SC10_C_WRAP_B || 545 return (smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
546 smc->mib.fddiSMTCF_State == SC5_THRU_B) ? PB : PA) ; 546 smc->mib.fddiSMTCF_State == SC5_THRU_B) ? PB : PA;
547} 547}
548 548
549/* 549/*
@@ -553,8 +553,8 @@ int cfm_get_mac_input(struct s_smc *smc)
553 */ 553 */
554int cfm_get_mac_output(struct s_smc *smc) 554int cfm_get_mac_output(struct s_smc *smc)
555{ 555{
556 return((smc->mib.fddiSMTCF_State == SC10_C_WRAP_B || 556 return (smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
557 smc->mib.fddiSMTCF_State == SC4_THRU_A) ? PB : PA) ; 557 smc->mib.fddiSMTCF_State == SC4_THRU_A) ? PB : PA;
558} 558}
559 559
560static char path_iso[] = { 560static char path_iso[] = {
@@ -623,5 +623,5 @@ int cem_build_path(struct s_smc *smc, char *to, int path_index)
623 623
624 LINT_USE(path_index); 624 LINT_USE(path_index);
625 625
626 return(len) ; 626 return len;
627} 627}
diff --git a/drivers/net/skfp/drvfbi.c b/drivers/net/skfp/drvfbi.c
index c77cc14b3227..07da97c303d6 100644
--- a/drivers/net/skfp/drvfbi.c
+++ b/drivers/net/skfp/drvfbi.c
@@ -267,7 +267,7 @@ void timer_irq(struct s_smc *smc)
267int pcm_get_s_port(struct s_smc *smc) 267int pcm_get_s_port(struct s_smc *smc)
268{ 268{
269 SK_UNUSED(smc) ; 269 SK_UNUSED(smc) ;
270 return(PS) ; 270 return PS;
271} 271}
272 272
273/* 273/*
@@ -366,7 +366,7 @@ void sm_pm_bypass_req(struct s_smc *smc, int mode)
366 */ 366 */
367int sm_pm_bypass_present(struct s_smc *smc) 367int sm_pm_bypass_present(struct s_smc *smc)
368{ 368{
369 return( (inp(ADDR(B0_DAS)) & DAS_BYP_ST) ? TRUE: FALSE) ; 369 return (inp(ADDR(B0_DAS)) & DAS_BYP_ST) ? TRUE : FALSE;
370} 370}
371 371
372void plc_clear_irq(struct s_smc *smc, int p) 372void plc_clear_irq(struct s_smc *smc, int p)
@@ -483,9 +483,9 @@ static int is_equal_num(char comp1[], char comp2[], int num)
483 483
484 for (i = 0 ; i < num ; i++) { 484 for (i = 0 ; i < num ; i++) {
485 if (comp1[i] != comp2[i]) 485 if (comp1[i] != comp2[i])
486 return (0) ; 486 return 0;
487 } 487 }
488 return (1) ; 488 return 1;
489} /* is_equal_num */ 489} /* is_equal_num */
490 490
491 491
@@ -522,18 +522,18 @@ int set_oi_id_def(struct s_smc *smc)
522 i++ ; 522 i++ ;
523 break ; /* entry ok */ 523 break ; /* entry ok */
524 default: 524 default:
525 return (1) ; /* invalid oi_status */ 525 return 1; /* invalid oi_status */
526 } 526 }
527 } 527 }
528 528
529 if (i == 0) 529 if (i == 0)
530 return (2) ; 530 return 2;
531 if (!act_entries) 531 if (!act_entries)
532 return (3) ; 532 return 3;
533 533
534 /* ok, we have a valid OEM data base with an active entry */ 534 /* ok, we have a valid OEM data base with an active entry */
535 smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[sel_id] ; 535 smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[sel_id] ;
536 return (0) ; 536 return 0;
537} 537}
538#endif /* MULT_OEM */ 538#endif /* MULT_OEM */
539 539
diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c
index e8387d25f24a..8639a0884f5c 100644
--- a/drivers/net/skfp/ess.c
+++ b/drivers/net/skfp/ess.c
@@ -135,7 +135,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
135 */ 135 */
136 if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) { 136 if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) {
137 DB_ESS("ESS: RAF frame error, parameter type not found\n",0,0) ; 137 DB_ESS("ESS: RAF frame error, parameter type not found\n",0,0) ;
138 return(fs) ; 138 return fs;
139 } 139 }
140 msg_res_type = ((struct smt_p_0015 *)p)->res_type ; 140 msg_res_type = ((struct smt_p_0015 *)p)->res_type ;
141 141
@@ -147,7 +147,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
147 * error in frame: para ESS command was not found 147 * error in frame: para ESS command was not found
148 */ 148 */
149 DB_ESS("ESS: RAF frame error, parameter command not found\n",0,0); 149 DB_ESS("ESS: RAF frame error, parameter command not found\n",0,0);
150 return(fs) ; 150 return fs;
151 } 151 }
152 152
153 DB_ESSN(2,"fc %x ft %x\n",sm->smt_class,sm->smt_type) ; 153 DB_ESSN(2,"fc %x ft %x\n",sm->smt_class,sm->smt_type) ;
@@ -175,12 +175,12 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
175 * local and no static allocation is used 175 * local and no static allocation is used
176 */ 176 */
177 if (!local || smc->mib.fddiESSPayload) 177 if (!local || smc->mib.fddiESSPayload)
178 return(fs) ; 178 return fs;
179 179
180 p = (void *) sm_to_para(smc,sm,SMT_P0019) ; 180 p = (void *) sm_to_para(smc,sm,SMT_P0019) ;
181 for (i = 0; i < 5; i++) { 181 for (i = 0; i < 5; i++) {
182 if (((struct smt_p_0019 *)p)->alloc_addr.a[i]) { 182 if (((struct smt_p_0019 *)p)->alloc_addr.a[i]) {
183 return(fs) ; 183 return fs;
184 } 184 }
185 } 185 }
186 186
@@ -199,10 +199,10 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
199 sm->smt_dest = smt_sba_da ; 199 sm->smt_dest = smt_sba_da ;
200 200
201 if (smc->ess.local_sba_active) 201 if (smc->ess.local_sba_active)
202 return(fs | I_INDICATOR) ; 202 return fs | I_INDICATOR;
203 203
204 if (!(db = smt_get_mbuf(smc))) 204 if (!(db = smt_get_mbuf(smc)))
205 return(fs) ; 205 return fs;
206 206
207 db->sm_len = mb->sm_len ; 207 db->sm_len = mb->sm_len ;
208 db->sm_off = mb->sm_off ; 208 db->sm_off = mb->sm_off ;
@@ -212,7 +212,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
212 (struct smt_header *)(db->sm_data+db->sm_off), 212 (struct smt_header *)(db->sm_data+db->sm_off),
213 "RAF") ; 213 "RAF") ;
214 smt_send_frame(smc,db,FC_SMT_INFO,0) ; 214 smt_send_frame(smc,db,FC_SMT_INFO,0) ;
215 return(fs) ; 215 return fs;
216 } 216 }
217 217
218 /* 218 /*
@@ -221,7 +221,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
221 */ 221 */
222 if (smt_check_para(smc,sm,plist_raf_alc_res)) { 222 if (smt_check_para(smc,sm,plist_raf_alc_res)) {
223 DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ; 223 DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
224 return(fs) ; 224 return fs;
225 } 225 }
226 226
227 /* 227 /*
@@ -242,7 +242,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
242 (sm->smt_tid != smc->ess.alloc_trans_id)) { 242 (sm->smt_tid != smc->ess.alloc_trans_id)) {
243 243
244 DB_ESS("ESS: Allocation Responce not accepted\n",0,0) ; 244 DB_ESS("ESS: Allocation Responce not accepted\n",0,0) ;
245 return(fs) ; 245 return fs;
246 } 246 }
247 247
248 /* 248 /*
@@ -268,7 +268,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
268 */ 268 */
269 (void)process_bw_alloc(smc,(long)payload,(long)overhead) ; 269 (void)process_bw_alloc(smc,(long)payload,(long)overhead) ;
270 270
271 return(fs) ; 271 return fs;
272 /* end of Process Allocation Request */ 272 /* end of Process Allocation Request */
273 273
274 /* 274 /*
@@ -280,7 +280,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
280 */ 280 */
281 if (sm->smt_type != SMT_REQUEST) { 281 if (sm->smt_type != SMT_REQUEST) {
282 DB_ESS("ESS: Do not process Change Responses\n",0,0) ; 282 DB_ESS("ESS: Do not process Change Responses\n",0,0) ;
283 return(fs) ; 283 return fs;
284 } 284 }
285 285
286 /* 286 /*
@@ -288,7 +288,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
288 */ 288 */
289 if (smt_check_para(smc,sm,plist_raf_chg_req)) { 289 if (smt_check_para(smc,sm,plist_raf_chg_req)) {
290 DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ; 290 DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
291 return(fs) ; 291 return fs;
292 } 292 }
293 293
294 /* 294 /*
@@ -300,7 +300,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
300 if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index 300 if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index
301 != PRIMARY_RING) || (msg_res_type != SYNC_BW)) { 301 != PRIMARY_RING) || (msg_res_type != SYNC_BW)) {
302 DB_ESS("ESS: RAF frame with para problem, ignoring\n",0,0) ; 302 DB_ESS("ESS: RAF frame with para problem, ignoring\n",0,0) ;
303 return(fs) ; 303 return fs;
304 } 304 }
305 305
306 /* 306 /*
@@ -319,14 +319,14 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
319 * process the bandwidth allocation 319 * process the bandwidth allocation
320 */ 320 */
321 if(!process_bw_alloc(smc,(long)payload,(long)overhead)) 321 if(!process_bw_alloc(smc,(long)payload,(long)overhead))
322 return(fs) ; 322 return fs;
323 323
324 /* 324 /*
325 * send an RAF Change Reply 325 * send an RAF Change Reply
326 */ 326 */
327 ess_send_response(smc,sm,CHANGE_ALLOCATION) ; 327 ess_send_response(smc,sm,CHANGE_ALLOCATION) ;
328 328
329 return(fs) ; 329 return fs;
330 /* end of Process Change Request */ 330 /* end of Process Change Request */
331 331
332 /* 332 /*
@@ -338,7 +338,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
338 */ 338 */
339 if (sm->smt_type != SMT_REQUEST) { 339 if (sm->smt_type != SMT_REQUEST) {
340 DB_ESS("ESS: Do not process a Report Reply\n",0,0) ; 340 DB_ESS("ESS: Do not process a Report Reply\n",0,0) ;
341 return(fs) ; 341 return fs;
342 } 342 }
343 343
344 DB_ESSN(2,"ESS: Report Request from %s\n", 344 DB_ESSN(2,"ESS: Report Request from %s\n",
@@ -349,7 +349,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
349 */ 349 */
350 if (msg_res_type != SYNC_BW) { 350 if (msg_res_type != SYNC_BW) {
351 DB_ESS("ESS: ignoring RAF with para problem\n",0,0) ; 351 DB_ESS("ESS: ignoring RAF with para problem\n",0,0) ;
352 return(fs) ; 352 return fs;
353 } 353 }
354 354
355 /* 355 /*
@@ -357,7 +357,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
357 */ 357 */
358 ess_send_response(smc,sm,REPORT_ALLOCATION) ; 358 ess_send_response(smc,sm,REPORT_ALLOCATION) ;
359 359
360 return(fs) ; 360 return fs;
361 /* end of Process Report Request */ 361 /* end of Process Report Request */
362 362
363 default: 363 default:
@@ -368,7 +368,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
368 break ; 368 break ;
369 } 369 }
370 370
371 return(fs) ; 371 return fs;
372} 372}
373 373
374/* 374/*
@@ -418,17 +418,17 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
418 */ 418 */
419/* if (smt_set_obj(smc,SMT_P320F,payload,S_SET)) { 419/* if (smt_set_obj(smc,SMT_P320F,payload,S_SET)) {
420 DB_ESS("ESS: SMT does not accept the payload value\n",0,0) ; 420 DB_ESS("ESS: SMT does not accept the payload value\n",0,0) ;
421 return(FALSE) ; 421 return FALSE;
422 } 422 }
423 if (smt_set_obj(smc,SMT_P3210,overhead,S_SET)) { 423 if (smt_set_obj(smc,SMT_P3210,overhead,S_SET)) {
424 DB_ESS("ESS: SMT does not accept the overhead value\n",0,0) ; 424 DB_ESS("ESS: SMT does not accept the overhead value\n",0,0) ;
425 return(FALSE) ; 425 return FALSE;
426 } */ 426 } */
427 427
428 /* premliminary */ 428 /* premliminary */
429 if (payload > MAX_PAYLOAD || overhead > 5000) { 429 if (payload > MAX_PAYLOAD || overhead > 5000) {
430 DB_ESS("ESS: payload / overhead not accepted\n",0,0) ; 430 DB_ESS("ESS: payload / overhead not accepted\n",0,0) ;
431 return(FALSE) ; 431 return FALSE;
432 } 432 }
433 433
434 /* 434 /*
@@ -468,7 +468,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
468 468
469 ess_config_fifo(smc) ; 469 ess_config_fifo(smc) ;
470 set_formac_tsync(smc,smc->ess.sync_bw) ; 470 set_formac_tsync(smc,smc->ess.sync_bw) ;
471 return(TRUE) ; 471 return TRUE;
472} 472}
473 473
474static void ess_send_response(struct s_smc *smc, struct smt_header *sm, 474static void ess_send_response(struct s_smc *smc, struct smt_header *sm,
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 9d8d1ac48176..ca4e7bb6a5a8 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -112,8 +112,8 @@ static u_long mac_get_tneg(struct s_smc *smc)
112 u_long tneg ; 112 u_long tneg ;
113 113
114 tneg = (u_long)((long)inpw(FM_A(FM_TNEG))<<5) ; 114 tneg = (u_long)((long)inpw(FM_A(FM_TNEG))<<5) ;
115 return((u_long)((tneg + ((inpw(FM_A(FM_TMRS))>>10)&0x1f)) | 115 return (u_long)((tneg + ((inpw(FM_A(FM_TMRS))>>10)&0x1f)) |
116 0xffe00000L)) ; 116 0xffe00000L) ;
117} 117}
118 118
119void mac_update_counter(struct s_smc *smc) 119void mac_update_counter(struct s_smc *smc)
@@ -163,7 +163,7 @@ static u_long read_mdr(struct s_smc *smc, unsigned int addr)
163 /* is used */ 163 /* is used */
164 p = (u_long)inpw(FM_A(FM_MDRU))<<16 ; 164 p = (u_long)inpw(FM_A(FM_MDRU))<<16 ;
165 p += (u_long)inpw(FM_A(FM_MDRL)) ; 165 p += (u_long)inpw(FM_A(FM_MDRL)) ;
166 return(p) ; 166 return p;
167} 167}
168#endif 168#endif
169 169
@@ -887,7 +887,7 @@ int init_fplus(struct s_smc *smc)
887 /* make sure all PCI settings are correct */ 887 /* make sure all PCI settings are correct */
888 mac_do_pci_fix(smc) ; 888 mac_do_pci_fix(smc) ;
889 889
890 return(init_mac(smc,1)) ; 890 return init_mac(smc, 1);
891 /* enable_formac(smc) ; */ 891 /* enable_formac(smc) ; */
892} 892}
893 893
@@ -989,7 +989,7 @@ static int init_mac(struct s_smc *smc, int all)
989 } 989 }
990 smc->hw.hw_state = STARTED ; 990 smc->hw.hw_state = STARTED ;
991 991
992 return(0) ; 992 return 0;
993} 993}
994 994
995 995
@@ -1049,7 +1049,7 @@ void sm_ma_control(struct s_smc *smc, int mode)
1049 1049
1050int sm_mac_get_tx_state(struct s_smc *smc) 1050int sm_mac_get_tx_state(struct s_smc *smc)
1051{ 1051{
1052 return((inpw(FM_A(FM_STMCHN))>>4)&7) ; 1052 return (inpw(FM_A(FM_STMCHN))>>4) & 7;
1053} 1053}
1054 1054
1055/* 1055/*
@@ -1084,9 +1084,9 @@ static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
1084 } 1084 }
1085 if (memcmp((char *)&tb->a,(char *)own,6)) 1085 if (memcmp((char *)&tb->a,(char *)own,6))
1086 continue ; 1086 continue ;
1087 return(tb) ; 1087 return tb;
1088 } 1088 }
1089 return(slot) ; /* return first free or NULL */ 1089 return slot; /* return first free or NULL */
1090} 1090}
1091 1091
1092/* 1092/*
@@ -1152,12 +1152,12 @@ int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
1152 */ 1152 */
1153 if (can & 0x80) { 1153 if (can & 0x80) {
1154 if (smc->hw.fp.smt_slots_used >= SMT_MAX_MULTI) { 1154 if (smc->hw.fp.smt_slots_used >= SMT_MAX_MULTI) {
1155 return(1) ; 1155 return 1;
1156 } 1156 }
1157 } 1157 }
1158 else { 1158 else {
1159 if (smc->hw.fp.os_slots_used >= FPMAX_MULTICAST-SMT_MAX_MULTI) { 1159 if (smc->hw.fp.os_slots_used >= FPMAX_MULTICAST-SMT_MAX_MULTI) {
1160 return(1) ; 1160 return 1;
1161 } 1161 }
1162 } 1162 }
1163 1163
@@ -1165,7 +1165,7 @@ int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
1165 * find empty slot 1165 * find empty slot
1166 */ 1166 */
1167 if (!(tb = mac_get_mc_table(smc,addr,&own,0,can & ~0x80))) 1167 if (!(tb = mac_get_mc_table(smc,addr,&own,0,can & ~0x80)))
1168 return(1) ; 1168 return 1;
1169 tb->n++ ; 1169 tb->n++ ;
1170 tb->a = own ; 1170 tb->a = own ;
1171 tb->perm = (can & 0x80) ? 1 : 0 ; 1171 tb->perm = (can & 0x80) ? 1 : 0 ;
@@ -1175,7 +1175,7 @@ int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
1175 else 1175 else
1176 smc->hw.fp.os_slots_used++ ; 1176 smc->hw.fp.os_slots_used++ ;
1177 1177
1178 return(0) ; 1178 return 0;
1179} 1179}
1180 1180
1181/* 1181/*
diff --git a/drivers/net/skfp/hwmtm.c b/drivers/net/skfp/hwmtm.c
index d322f1b702ac..af5a755e269d 100644
--- a/drivers/net/skfp/hwmtm.c
+++ b/drivers/net/skfp/hwmtm.c
@@ -232,16 +232,16 @@ u_int mac_drv_check_space(void)
232#ifdef COMMON_MB_POOL 232#ifdef COMMON_MB_POOL
233 call_count++ ; 233 call_count++ ;
234 if (call_count == 1) { 234 if (call_count == 1) {
235 return(EXT_VIRT_MEM) ; 235 return EXT_VIRT_MEM;
236 } 236 }
237 else { 237 else {
238 return(EXT_VIRT_MEM_2) ; 238 return EXT_VIRT_MEM_2;
239 } 239 }
240#else 240#else
241 return (EXT_VIRT_MEM) ; 241 return EXT_VIRT_MEM;
242#endif 242#endif
243#else 243#else
244 return (0) ; 244 return 0;
245#endif 245#endif
246} 246}
247 247
@@ -271,7 +271,7 @@ int mac_drv_init(struct s_smc *smc)
271 if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *) 271 if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
272 mac_drv_get_desc_mem(smc,(u_int) 272 mac_drv_get_desc_mem(smc,(u_int)
273 (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) { 273 (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
274 return(1) ; /* no space the hwm modul can't work */ 274 return 1; /* no space the hwm modul can't work */
275 } 275 }
276 276
277 /* 277 /*
@@ -283,18 +283,18 @@ int mac_drv_init(struct s_smc *smc)
283#ifndef COMMON_MB_POOL 283#ifndef COMMON_MB_POOL
284 if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc, 284 if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
285 MAX_MBUF*sizeof(SMbuf)))) { 285 MAX_MBUF*sizeof(SMbuf)))) {
286 return(1) ; /* no space the hwm modul can't work */ 286 return 1; /* no space the hwm modul can't work */
287 } 287 }
288#else 288#else
289 if (!mb_start) { 289 if (!mb_start) {
290 if (!(mb_start = (SMbuf *) mac_drv_get_space(smc, 290 if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
291 MAX_MBUF*sizeof(SMbuf)))) { 291 MAX_MBUF*sizeof(SMbuf)))) {
292 return(1) ; /* no space the hwm modul can't work */ 292 return 1; /* no space the hwm modul can't work */
293 } 293 }
294 } 294 }
295#endif 295#endif
296#endif 296#endif
297 return (0) ; 297 return 0;
298} 298}
299 299
300/* 300/*
@@ -349,7 +349,7 @@ static u_long init_descr_ring(struct s_smc *smc,
349 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ; 349 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
350 d1++; 350 d1++;
351 } 351 }
352 return(phys) ; 352 return phys;
353} 353}
354 354
355static void init_txd_ring(struct s_smc *smc) 355static void init_txd_ring(struct s_smc *smc)
@@ -502,7 +502,7 @@ SMbuf *smt_get_mbuf(struct s_smc *smc)
502 mb->sm_use_count = 1 ; 502 mb->sm_use_count = 1 ;
503 } 503 }
504 DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ; 504 DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ;
505 return (mb) ; /* May be NULL */ 505 return mb; /* May be NULL */
506} 506}
507 507
508void smt_free_mbuf(struct s_smc *smc, SMbuf *mb) 508void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
@@ -621,7 +621,7 @@ static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
621 t = t->txd_next ; 621 t = t->txd_next ;
622 tx_used-- ; 622 tx_used-- ;
623 } 623 }
624 return(phys) ; 624 return phys;
625} 625}
626 626
627/* 627/*
@@ -673,7 +673,7 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
673 r = r->rxd_next ; 673 r = r->rxd_next ;
674 rx_used-- ; 674 rx_used-- ;
675 } 675 }
676 return(phys) ; 676 return phys;
677} 677}
678 678
679 679
@@ -1595,7 +1595,7 @@ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
1595 } 1595 }
1596 DB_TX("frame_status = %x",frame_status,0,3) ; 1596 DB_TX("frame_status = %x",frame_status,0,3) ;
1597 NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ; 1597 NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
1598 return(frame_status) ; 1598 return frame_status;
1599} 1599}
1600 1600
1601/* 1601/*
@@ -1764,7 +1764,7 @@ static SMbuf *get_llc_rx(struct s_smc *smc)
1764 smc->os.hwm.llc_rx_pipe = mb->sm_next ; 1764 smc->os.hwm.llc_rx_pipe = mb->sm_next ;
1765 } 1765 }
1766 DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ; 1766 DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ;
1767 return(mb) ; 1767 return mb;
1768} 1768}
1769 1769
1770/* 1770/*
@@ -1797,7 +1797,7 @@ static SMbuf *get_txd_mb(struct s_smc *smc)
1797 smc->os.hwm.txd_tx_pipe = mb->sm_next ; 1797 smc->os.hwm.txd_tx_pipe = mb->sm_next ;
1798 } 1798 }
1799 DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ; 1799 DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ;
1800 return(mb) ; 1800 return mb;
1801} 1801}
1802 1802
1803/* 1803/*
diff --git a/drivers/net/skfp/hwt.c b/drivers/net/skfp/hwt.c
index 053151468f93..e6baa53307c7 100644
--- a/drivers/net/skfp/hwt.c
+++ b/drivers/net/skfp/hwt.c
@@ -179,7 +179,7 @@ u_long hwt_read(struct s_smc *smc)
179 else 179 else
180 smc->hw.t_stop = smc->hw.t_start - tr ; 180 smc->hw.t_stop = smc->hw.t_start - tr ;
181 } 181 }
182 return (smc->hw.t_stop) ; 182 return smc->hw.t_stop;
183} 183}
184 184
185#ifdef PCI 185#ifdef PCI
@@ -208,7 +208,7 @@ u_long hwt_quick_read(struct s_smc *smc)
208 outpw(ADDR(B2_TI_CRTL), TIM_START) ; 208 outpw(ADDR(B2_TI_CRTL), TIM_START) ;
209 outpd(ADDR(B2_TI_INI),interval) ; 209 outpd(ADDR(B2_TI_INI),interval) ;
210 210
211 return(time) ; 211 return time;
212} 212}
213 213
214/************************ 214/************************
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index ba45bc794d77..112d35b1bf0e 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -504,7 +504,7 @@ int sm_pm_get_ls(struct s_smc *smc, int phy)
504 504
505#ifdef CONCENTRATOR 505#ifdef CONCENTRATOR
506 if (!plc_is_installed(smc,phy)) 506 if (!plc_is_installed(smc,phy))
507 return(PC_QLS) ; 507 return PC_QLS;
508#endif 508#endif
509 509
510 state = inpw(PLC(phy,PL_STATUS_A)) & PL_LINE_ST ; 510 state = inpw(PLC(phy,PL_STATUS_A)) & PL_LINE_ST ;
@@ -528,7 +528,7 @@ int sm_pm_get_ls(struct s_smc *smc, int phy)
528 default : 528 default :
529 state = PC_LS_NONE ; 529 state = PC_LS_NONE ;
530 } 530 }
531 return(state) ; 531 return state;
532} 532}
533 533
534static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len) 534static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
@@ -547,7 +547,7 @@ static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
547#if 0 547#if 0
548 printf("PL_PCM_SIGNAL is set\n") ; 548 printf("PL_PCM_SIGNAL is set\n") ;
549#endif 549#endif
550 return(1) ; 550 return 1;
551 } 551 }
552 /* write bit[n] & length = 1 to regs */ 552 /* write bit[n] & length = 1 to regs */
553 outpw(PLC(np,PL_VECTOR_LEN),len-1) ; /* len=nr-1 */ 553 outpw(PLC(np,PL_VECTOR_LEN),len-1) ; /* len=nr-1 */
@@ -562,7 +562,7 @@ static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
562 printf("SIGNALING bit %d .. %d\n",phy->bitn,phy->bitn+len-1) ; 562 printf("SIGNALING bit %d .. %d\n",phy->bitn,phy->bitn+len-1) ;
563#endif 563#endif
564#endif 564#endif
565 return(0) ; 565 return 0;
566} 566}
567 567
568/* 568/*
@@ -1590,12 +1590,12 @@ int pcm_status_twisted(struct s_smc *smc)
1590{ 1590{
1591 int twist = 0 ; 1591 int twist = 0 ;
1592 if (smc->s.sas != SMT_DAS) 1592 if (smc->s.sas != SMT_DAS)
1593 return(0) ; 1593 return 0;
1594 if (smc->y[PA].twisted && (smc->y[PA].mib->fddiPORTPCMState == PC8_ACTIVE)) 1594 if (smc->y[PA].twisted && (smc->y[PA].mib->fddiPORTPCMState == PC8_ACTIVE))
1595 twist |= 1 ; 1595 twist |= 1 ;
1596 if (smc->y[PB].twisted && (smc->y[PB].mib->fddiPORTPCMState == PC8_ACTIVE)) 1596 if (smc->y[PB].twisted && (smc->y[PB].mib->fddiPORTPCMState == PC8_ACTIVE))
1597 twist |= 2 ; 1597 twist |= 2 ;
1598 return(twist) ; 1598 return twist;
1599} 1599}
1600 1600
1601/* 1601/*
@@ -1636,9 +1636,9 @@ int pcm_rooted_station(struct s_smc *smc)
1636 for (n = 0 ; n < NUMPHYS ; n++) { 1636 for (n = 0 ; n < NUMPHYS ; n++) {
1637 if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE && 1637 if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE &&
1638 smc->y[n].mib->fddiPORTNeighborType == TM) 1638 smc->y[n].mib->fddiPORTNeighborType == TM)
1639 return(0) ; 1639 return 0;
1640 } 1640 }
1641 return(1) ; 1641 return 1;
1642} 1642}
1643 1643
1644/* 1644/*
@@ -1915,7 +1915,7 @@ int get_pcm_state(struct s_smc *smc, int np)
1915 case PL_PC9 : pcs = PC_MAINT ; break ; 1915 case PL_PC9 : pcs = PC_MAINT ; break ;
1916 default : pcs = PC_DISABLE ; break ; 1916 default : pcs = PC_DISABLE ; break ;
1917 } 1917 }
1918 return(pcs) ; 1918 return pcs;
1919} 1919}
1920 1920
1921char *get_linestate(struct s_smc *smc, int np) 1921char *get_linestate(struct s_smc *smc, int np)
@@ -1937,7 +1937,7 @@ char *get_linestate(struct s_smc *smc, int np)
1937 default: ls = "unknown" ; break ; 1937 default: ls = "unknown" ; break ;
1938#endif 1938#endif
1939 } 1939 }
1940 return(ls) ; 1940 return ls;
1941} 1941}
1942 1942
1943char *get_pcmstate(struct s_smc *smc, int np) 1943char *get_pcmstate(struct s_smc *smc, int np)
@@ -1959,7 +1959,7 @@ char *get_pcmstate(struct s_smc *smc, int np)
1959 case PL_PC9 : pcs = "MAINT" ; break ; 1959 case PL_PC9 : pcs = "MAINT" ; break ;
1960 default : pcs = "UNKNOWN" ; break ; 1960 default : pcs = "UNKNOWN" ; break ;
1961 } 1961 }
1962 return(pcs) ; 1962 return pcs;
1963} 1963}
1964 1964
1965void list_phy(struct s_smc *smc) 1965void list_phy(struct s_smc *smc)
diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c
index a320fdb3727d..9ac4665d7411 100644
--- a/drivers/net/skfp/pmf.c
+++ b/drivers/net/skfp/pmf.c
@@ -328,7 +328,7 @@ static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
328 * build SMT header 328 * build SMT header
329 */ 329 */
330 if (!(mb = smt_get_mbuf(smc))) 330 if (!(mb = smt_get_mbuf(smc)))
331 return(mb) ; 331 return mb;
332 332
333 smt = smtod(mb, struct smt_header *) ; 333 smt = smtod(mb, struct smt_header *) ;
334 smt->smt_dest = req->smt_source ; /* DA == source of request */ 334 smt->smt_dest = req->smt_source ; /* DA == source of request */
@@ -493,7 +493,7 @@ static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
493 smt_add_para(smc,&set_pcon,(u_short) SMT_P1035,0,0) ; 493 smt_add_para(smc,&set_pcon,(u_short) SMT_P1035,0,0) ;
494 smt_add_para(smc,&set_pcon,(u_short) SMT_P1036,0,0) ; 494 smt_add_para(smc,&set_pcon,(u_short) SMT_P1036,0,0) ;
495 } 495 }
496 return(mb) ; 496 return mb;
497} 497}
498 498
499static int smt_authorize(struct s_smc *smc, struct smt_header *sm) 499static int smt_authorize(struct s_smc *smc, struct smt_header *sm)
@@ -511,7 +511,7 @@ static int smt_authorize(struct s_smc *smc, struct smt_header *sm)
511 if (i != 8) { 511 if (i != 8) {
512 if (memcmp((char *) &sm->smt_sid, 512 if (memcmp((char *) &sm->smt_sid,
513 (char *) &smc->mib.fddiPRPMFStation,8)) 513 (char *) &smc->mib.fddiPRPMFStation,8))
514 return(1) ; 514 return 1;
515 } 515 }
516 /* 516 /*
517 * check authoriziation parameter if passwd not zero 517 * check authoriziation parameter if passwd not zero
@@ -522,13 +522,13 @@ static int smt_authorize(struct s_smc *smc, struct smt_header *sm)
522 if (i != 8) { 522 if (i != 8) {
523 pa = (struct smt_para *) sm_to_para(smc,sm,SMT_P_AUTHOR) ; 523 pa = (struct smt_para *) sm_to_para(smc,sm,SMT_P_AUTHOR) ;
524 if (!pa) 524 if (!pa)
525 return(1) ; 525 return 1;
526 if (pa->p_len != 8) 526 if (pa->p_len != 8)
527 return(1) ; 527 return 1;
528 if (memcmp((char *)(pa+1),(char *)smc->mib.fddiPRPMFPasswd,8)) 528 if (memcmp((char *)(pa+1),(char *)smc->mib.fddiPRPMFPasswd,8))
529 return(1) ; 529 return 1;
530 } 530 }
531 return(0) ; 531 return 0;
532} 532}
533 533
534static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm) 534static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm)
@@ -542,9 +542,9 @@ static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm)
542 if ((smc->mib.fddiSMTSetCount.count != sc->count) || 542 if ((smc->mib.fddiSMTSetCount.count != sc->count) ||
543 memcmp((char *) smc->mib.fddiSMTSetCount.timestamp, 543 memcmp((char *) smc->mib.fddiSMTSetCount.timestamp,
544 (char *)sc->timestamp,8)) 544 (char *)sc->timestamp,8))
545 return(1) ; 545 return 1;
546 } 546 }
547 return(0) ; 547 return 0;
548} 548}
549 549
550void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, 550void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
@@ -1109,7 +1109,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1109 break ; 1109 break ;
1110 case 0x2000 : 1110 case 0x2000 :
1111 if (mac < 0 || mac >= NUMMACS) { 1111 if (mac < 0 || mac >= NUMMACS) {
1112 return(SMT_RDF_NOPARAM) ; 1112 return SMT_RDF_NOPARAM;
1113 } 1113 }
1114 mib_m = &smc->mib.m[mac] ; 1114 mib_m = &smc->mib.m[mac] ;
1115 mib_addr = (char *) mib_m ; 1115 mib_addr = (char *) mib_m ;
@@ -1118,7 +1118,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1118 break ; 1118 break ;
1119 case 0x3000 : 1119 case 0x3000 :
1120 if (path < 0 || path >= NUMPATHS) { 1120 if (path < 0 || path >= NUMPATHS) {
1121 return(SMT_RDF_NOPARAM) ; 1121 return SMT_RDF_NOPARAM;
1122 } 1122 }
1123 mib_a = &smc->mib.a[path] ; 1123 mib_a = &smc->mib.a[path] ;
1124 mib_addr = (char *) mib_a ; 1124 mib_addr = (char *) mib_a ;
@@ -1127,7 +1127,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1127 break ; 1127 break ;
1128 case 0x4000 : 1128 case 0x4000 :
1129 if (port < 0 || port >= smt_mib_phys(smc)) { 1129 if (port < 0 || port >= smt_mib_phys(smc)) {
1130 return(SMT_RDF_NOPARAM) ; 1130 return SMT_RDF_NOPARAM;
1131 } 1131 }
1132 mib_p = &smc->mib.p[port_to_mib(smc,port)] ; 1132 mib_p = &smc->mib.p[port_to_mib(smc,port)] ;
1133 mib_addr = (char *) mib_p ; 1133 mib_addr = (char *) mib_p ;
@@ -1151,22 +1151,20 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1151 case SMT_P10F9 : 1151 case SMT_P10F9 :
1152#endif 1152#endif
1153 case SMT_P20F1 : 1153 case SMT_P20F1 :
1154 if (!local) { 1154 if (!local)
1155 return(SMT_RDF_NOPARAM) ; 1155 return SMT_RDF_NOPARAM;
1156 }
1157 break ; 1156 break ;
1158 } 1157 }
1159 pt = smt_get_ptab(pa->p_type) ; 1158 pt = smt_get_ptab(pa->p_type) ;
1160 if (!pt) { 1159 if (!pt)
1161 return( (pa->p_type & 0xff00) ? SMT_RDF_NOPARAM : 1160 return (pa->p_type & 0xff00) ? SMT_RDF_NOPARAM :
1162 SMT_RDF_ILLEGAL ) ; 1161 SMT_RDF_ILLEGAL;
1163 }
1164 switch (pt->p_access) { 1162 switch (pt->p_access) {
1165 case AC_GR : 1163 case AC_GR :
1166 case AC_S : 1164 case AC_S :
1167 break ; 1165 break ;
1168 default : 1166 default :
1169 return(SMT_RDF_ILLEGAL) ; 1167 return SMT_RDF_ILLEGAL;
1170 } 1168 }
1171 to = mib_addr + pt->p_offset ; 1169 to = mib_addr + pt->p_offset ;
1172 swap = pt->p_swap ; /* pointer to swap string */ 1170 swap = pt->p_swap ; /* pointer to swap string */
@@ -1292,7 +1290,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1292 break ; 1290 break ;
1293 default : 1291 default :
1294 SMT_PANIC(smc,SMT_E0120, SMT_E0120_MSG) ; 1292 SMT_PANIC(smc,SMT_E0120, SMT_E0120_MSG) ;
1295 return(SMT_RDF_ILLEGAL) ; 1293 return SMT_RDF_ILLEGAL;
1296 } 1294 }
1297 } 1295 }
1298 /* 1296 /*
@@ -1501,15 +1499,15 @@ change_mac_para:
1501 default : 1499 default :
1502 break ; 1500 break ;
1503 } 1501 }
1504 return(0) ; 1502 return 0;
1505 1503
1506val_error: 1504val_error:
1507 /* parameter value in frame is out of range */ 1505 /* parameter value in frame is out of range */
1508 return(SMT_RDF_RANGE) ; 1506 return SMT_RDF_RANGE;
1509 1507
1510len_error: 1508len_error:
1511 /* parameter value in frame is too short */ 1509 /* parameter value in frame is too short */
1512 return(SMT_RDF_LENGTH) ; 1510 return SMT_RDF_LENGTH;
1513 1511
1514#if 0 1512#if 0
1515no_author_error: 1513no_author_error:
@@ -1518,7 +1516,7 @@ no_author_error:
1518 * because SBA denied is not a valid return code in the 1516 * because SBA denied is not a valid return code in the
1519 * PMF protocol. 1517 * PMF protocol.
1520 */ 1518 */
1521 return(SMT_RDF_AUTHOR) ; 1519 return SMT_RDF_AUTHOR;
1522#endif 1520#endif
1523} 1521}
1524 1522
@@ -1527,7 +1525,7 @@ static const struct s_p_tab *smt_get_ptab(u_short para)
1527 const struct s_p_tab *pt ; 1525 const struct s_p_tab *pt ;
1528 for (pt = p_tab ; pt->p_num && pt->p_num != para ; pt++) 1526 for (pt = p_tab ; pt->p_num && pt->p_num != para ; pt++)
1529 ; 1527 ;
1530 return(pt->p_num ? pt : NULL) ; 1528 return pt->p_num ? pt : NULL;
1531} 1529}
1532 1530
1533static int smt_mib_phys(struct s_smc *smc) 1531static int smt_mib_phys(struct s_smc *smc)
@@ -1535,11 +1533,11 @@ static int smt_mib_phys(struct s_smc *smc)
1535#ifdef CONCENTRATOR 1533#ifdef CONCENTRATOR
1536 SK_UNUSED(smc) ; 1534 SK_UNUSED(smc) ;
1537 1535
1538 return(NUMPHYS) ; 1536 return NUMPHYS;
1539#else 1537#else
1540 if (smc->s.sas == SMT_SAS) 1538 if (smc->s.sas == SMT_SAS)
1541 return(1) ; 1539 return 1;
1542 return(NUMPHYS) ; 1540 return NUMPHYS;
1543#endif 1541#endif
1544} 1542}
1545 1543
@@ -1548,11 +1546,11 @@ static int port_to_mib(struct s_smc *smc, int p)
1548#ifdef CONCENTRATOR 1546#ifdef CONCENTRATOR
1549 SK_UNUSED(smc) ; 1547 SK_UNUSED(smc) ;
1550 1548
1551 return(p) ; 1549 return p;
1552#else 1550#else
1553 if (smc->s.sas == SMT_SAS) 1551 if (smc->s.sas == SMT_SAS)
1554 return(PS) ; 1552 return PS;
1555 return(p) ; 1553 return p;
1556#endif 1554#endif
1557} 1555}
1558 1556
diff --git a/drivers/net/skfp/queue.c b/drivers/net/skfp/queue.c
index 09adb3d68b7c..c1a0df455a59 100644
--- a/drivers/net/skfp/queue.c
+++ b/drivers/net/skfp/queue.c
@@ -128,7 +128,7 @@ u_short smt_online(struct s_smc *smc, int on)
128{ 128{
129 queue_event(smc,EVENT_ECM,on ? EC_CONNECT : EC_DISCONNECT) ; 129 queue_event(smc,EVENT_ECM,on ? EC_CONNECT : EC_DISCONNECT) ;
130 ev_dispatcher(smc) ; 130 ev_dispatcher(smc) ;
131 return(smc->mib.fddiSMTCF_State) ; 131 return smc->mib.fddiSMTCF_State;
132} 132}
133 133
134/* 134/*
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 31b2dabf094c..ba2e8339fe90 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -209,7 +209,7 @@ static int skfp_init_one(struct pci_dev *pdev,
209 void __iomem *mem; 209 void __iomem *mem;
210 int err; 210 int err;
211 211
212 pr_debug(KERN_INFO "entering skfp_init_one\n"); 212 pr_debug("entering skfp_init_one\n");
213 213
214 if (num_boards == 0) 214 if (num_boards == 0)
215 printk("%s\n", boot_msg); 215 printk("%s\n", boot_msg);
@@ -385,7 +385,7 @@ static int skfp_driver_init(struct net_device *dev)
385 skfddi_priv *bp = &smc->os; 385 skfddi_priv *bp = &smc->os;
386 int err = -EIO; 386 int err = -EIO;
387 387
388 pr_debug(KERN_INFO "entering skfp_driver_init\n"); 388 pr_debug("entering skfp_driver_init\n");
389 389
390 // set the io address in private structures 390 // set the io address in private structures
391 bp->base_addr = dev->base_addr; 391 bp->base_addr = dev->base_addr;
@@ -405,7 +405,7 @@ static int skfp_driver_init(struct net_device *dev)
405 405
406 // Determine the required size of the 'shared' memory area. 406 // Determine the required size of the 'shared' memory area.
407 bp->SharedMemSize = mac_drv_check_space(); 407 bp->SharedMemSize = mac_drv_check_space();
408 pr_debug(KERN_INFO "Memory for HWM: %ld\n", bp->SharedMemSize); 408 pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
409 if (bp->SharedMemSize > 0) { 409 if (bp->SharedMemSize > 0) {
410 bp->SharedMemSize += 16; // for descriptor alignment 410 bp->SharedMemSize += 16; // for descriptor alignment
411 411
@@ -429,18 +429,18 @@ static int skfp_driver_init(struct net_device *dev)
429 429
430 card_stop(smc); // Reset adapter. 430 card_stop(smc); // Reset adapter.
431 431
432 pr_debug(KERN_INFO "mac_drv_init()..\n"); 432 pr_debug("mac_drv_init()..\n");
433 if (mac_drv_init(smc) != 0) { 433 if (mac_drv_init(smc) != 0) {
434 pr_debug(KERN_INFO "mac_drv_init() failed.\n"); 434 pr_debug("mac_drv_init() failed\n");
435 goto fail; 435 goto fail;
436 } 436 }
437 read_address(smc, NULL); 437 read_address(smc, NULL);
438 pr_debug(KERN_INFO "HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a); 438 pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
439 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6); 439 memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
440 440
441 smt_reset_defaults(smc, 0); 441 smt_reset_defaults(smc, 0);
442 442
443 return (0); 443 return 0;
444 444
445fail: 445fail:
446 if (bp->SharedMemAddr) { 446 if (bp->SharedMemAddr) {
@@ -485,7 +485,7 @@ static int skfp_open(struct net_device *dev)
485 struct s_smc *smc = netdev_priv(dev); 485 struct s_smc *smc = netdev_priv(dev);
486 int err; 486 int err;
487 487
488 pr_debug(KERN_INFO "entering skfp_open\n"); 488 pr_debug("entering skfp_open\n");
489 /* Register IRQ - support shared interrupts by passing device ptr */ 489 /* Register IRQ - support shared interrupts by passing device ptr */
490 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED, 490 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
491 dev->name, dev); 491 dev->name, dev);
@@ -516,7 +516,7 @@ static int skfp_open(struct net_device *dev)
516 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC); 516 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
517 517
518 netif_start_queue(dev); 518 netif_start_queue(dev);
519 return (0); 519 return 0;
520} // skfp_open 520} // skfp_open
521 521
522 522
@@ -565,7 +565,7 @@ static int skfp_close(struct net_device *dev)
565 skb_queue_purge(&bp->SendSkbQueue); 565 skb_queue_purge(&bp->SendSkbQueue);
566 bp->QueueSkb = MAX_TX_QUEUE_LEN; 566 bp->QueueSkb = MAX_TX_QUEUE_LEN;
567 567
568 return (0); 568 return 0;
569} // skfp_close 569} // skfp_close
570 570
571 571
@@ -794,7 +794,7 @@ static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
794 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; 794 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
795 795
796#endif 796#endif
797 return ((struct net_device_stats *) &bp->os.MacStat); 797 return (struct net_device_stats *)&bp->os.MacStat;
798} // ctl_get_stat 798} // ctl_get_stat
799 799
800 800
@@ -856,12 +856,12 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
856 /* Enable promiscuous mode, if necessary */ 856 /* Enable promiscuous mode, if necessary */
857 if (dev->flags & IFF_PROMISC) { 857 if (dev->flags & IFF_PROMISC) {
858 mac_drv_rx_mode(smc, RX_ENABLE_PROMISC); 858 mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
859 pr_debug(KERN_INFO "PROMISCUOUS MODE ENABLED\n"); 859 pr_debug("PROMISCUOUS MODE ENABLED\n");
860 } 860 }
861 /* Else, update multicast address table */ 861 /* Else, update multicast address table */
862 else { 862 else {
863 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC); 863 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
864 pr_debug(KERN_INFO "PROMISCUOUS MODE DISABLED\n"); 864 pr_debug("PROMISCUOUS MODE DISABLED\n");
865 865
866 // Reset all MC addresses 866 // Reset all MC addresses
867 mac_clear_multicast(smc); 867 mac_clear_multicast(smc);
@@ -869,7 +869,7 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
869 869
870 if (dev->flags & IFF_ALLMULTI) { 870 if (dev->flags & IFF_ALLMULTI) {
871 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI); 871 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
872 pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n"); 872 pr_debug("ENABLE ALL MC ADDRESSES\n");
873 } else if (!netdev_mc_empty(dev)) { 873 } else if (!netdev_mc_empty(dev)) {
874 if (netdev_mc_count(dev) <= FPMAX_MULTICAST) { 874 if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
875 /* use exact filtering */ 875 /* use exact filtering */
@@ -880,18 +880,18 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
880 (struct fddi_addr *)ha->addr, 880 (struct fddi_addr *)ha->addr,
881 1); 881 1);
882 882
883 pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n", 883 pr_debug("ENABLE MC ADDRESS: %pMF\n",
884 ha->addr); 884 ha->addr);
885 } 885 }
886 886
887 } else { // more MC addresses than HW supports 887 } else { // more MC addresses than HW supports
888 888
889 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI); 889 mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
890 pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n"); 890 pr_debug("ENABLE ALL MC ADDRESSES\n");
891 } 891 }
892 } else { // no MC addresses 892 } else { // no MC addresses
893 893
894 pr_debug(KERN_INFO "DISABLE ALL MC ADDRESSES\n"); 894 pr_debug("DISABLE ALL MC ADDRESSES\n");
895 } 895 }
896 896
897 /* Update adapter filters */ 897 /* Update adapter filters */
@@ -932,7 +932,7 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
932 ResetAdapter(smc); 932 ResetAdapter(smc);
933 spin_unlock_irqrestore(&bp->DriverLock, Flags); 933 spin_unlock_irqrestore(&bp->DriverLock, Flags);
934 934
935 return (0); /* always return zero */ 935 return 0; /* always return zero */
936} // skfp_ctl_set_mac_address 936} // skfp_ctl_set_mac_address
937 937
938 938
@@ -1045,7 +1045,7 @@ static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
1045 struct s_smc *smc = netdev_priv(dev); 1045 struct s_smc *smc = netdev_priv(dev);
1046 skfddi_priv *bp = &smc->os; 1046 skfddi_priv *bp = &smc->os;
1047 1047
1048 pr_debug(KERN_INFO "skfp_send_pkt\n"); 1048 pr_debug("skfp_send_pkt\n");
1049 1049
1050 /* 1050 /*
1051 * Verify that incoming transmit request is OK 1051 * Verify that incoming transmit request is OK
@@ -1114,13 +1114,13 @@ static void send_queued_packets(struct s_smc *smc)
1114 1114
1115 int frame_status; // HWM tx frame status. 1115 int frame_status; // HWM tx frame status.
1116 1116
1117 pr_debug(KERN_INFO "send queued packets\n"); 1117 pr_debug("send queued packets\n");
1118 for (;;) { 1118 for (;;) {
1119 // send first buffer from queue 1119 // send first buffer from queue
1120 skb = skb_dequeue(&bp->SendSkbQueue); 1120 skb = skb_dequeue(&bp->SendSkbQueue);
1121 1121
1122 if (!skb) { 1122 if (!skb) {
1123 pr_debug(KERN_INFO "queue empty\n"); 1123 pr_debug("queue empty\n");
1124 return; 1124 return;
1125 } // queue empty ! 1125 } // queue empty !
1126 1126
@@ -1232,7 +1232,7 @@ static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1232static void ResetAdapter(struct s_smc *smc) 1232static void ResetAdapter(struct s_smc *smc)
1233{ 1233{
1234 1234
1235 pr_debug(KERN_INFO "[fddi: ResetAdapter]\n"); 1235 pr_debug("[fddi: ResetAdapter]\n");
1236 1236
1237 // Stop the adapter. 1237 // Stop the adapter.
1238 1238
@@ -1278,7 +1278,7 @@ void llc_restart_tx(struct s_smc *smc)
1278{ 1278{
1279 skfddi_priv *bp = &smc->os; 1279 skfddi_priv *bp = &smc->os;
1280 1280
1281 pr_debug(KERN_INFO "[llc_restart_tx]\n"); 1281 pr_debug("[llc_restart_tx]\n");
1282 1282
1283 // Try to send queued packets 1283 // Try to send queued packets
1284 spin_unlock(&bp->DriverLock); 1284 spin_unlock(&bp->DriverLock);
@@ -1308,21 +1308,21 @@ void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1308{ 1308{
1309 void *virt; 1309 void *virt;
1310 1310
1311 pr_debug(KERN_INFO "mac_drv_get_space (%d bytes), ", size); 1311 pr_debug("mac_drv_get_space (%d bytes), ", size);
1312 virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap); 1312 virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
1313 1313
1314 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) { 1314 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1315 printk("Unexpected SMT memory size requested: %d\n", size); 1315 printk("Unexpected SMT memory size requested: %d\n", size);
1316 return (NULL); 1316 return NULL;
1317 } 1317 }
1318 smc->os.SharedMemHeap += size; // Move heap pointer. 1318 smc->os.SharedMemHeap += size; // Move heap pointer.
1319 1319
1320 pr_debug(KERN_INFO "mac_drv_get_space end\n"); 1320 pr_debug("mac_drv_get_space end\n");
1321 pr_debug(KERN_INFO "virt addr: %lx\n", (ulong) virt); 1321 pr_debug("virt addr: %lx\n", (ulong) virt);
1322 pr_debug(KERN_INFO "bus addr: %lx\n", (ulong) 1322 pr_debug("bus addr: %lx\n", (ulong)
1323 (smc->os.SharedMemDMA + 1323 (smc->os.SharedMemDMA +
1324 ((char *) virt - (char *)smc->os.SharedMemAddr))); 1324 ((char *) virt - (char *)smc->os.SharedMemAddr)));
1325 return (virt); 1325 return virt;
1326} // mac_drv_get_space 1326} // mac_drv_get_space
1327 1327
1328 1328
@@ -1349,7 +1349,7 @@ void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1349 1349
1350 char *virt; 1350 char *virt;
1351 1351
1352 pr_debug(KERN_INFO "mac_drv_get_desc_mem\n"); 1352 pr_debug("mac_drv_get_desc_mem\n");
1353 1353
1354 // Descriptor memory must be aligned on 16-byte boundary. 1354 // Descriptor memory must be aligned on 16-byte boundary.
1355 1355
@@ -1363,9 +1363,9 @@ void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1363 1363
1364 if (!mac_drv_get_space(smc, size)) { 1364 if (!mac_drv_get_space(smc, size)) {
1365 printk("fddi: Unable to align descriptor memory.\n"); 1365 printk("fddi: Unable to align descriptor memory.\n");
1366 return (NULL); 1366 return NULL;
1367 } 1367 }
1368 return (virt + size); 1368 return virt + size;
1369} // mac_drv_get_desc_mem 1369} // mac_drv_get_desc_mem
1370 1370
1371 1371
@@ -1384,8 +1384,8 @@ void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1384 ************************/ 1384 ************************/
1385unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt) 1385unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1386{ 1386{
1387 return (smc->os.SharedMemDMA + 1387 return smc->os.SharedMemDMA +
1388 ((char *) virt - (char *)smc->os.SharedMemAddr)); 1388 ((char *) virt - (char *)smc->os.SharedMemAddr);
1389} // mac_drv_virt2phys 1389} // mac_drv_virt2phys
1390 1390
1391 1391
@@ -1419,8 +1419,8 @@ unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1419 ************************/ 1419 ************************/
1420u_long dma_master(struct s_smc * smc, void *virt, int len, int flag) 1420u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1421{ 1421{
1422 return (smc->os.SharedMemDMA + 1422 return smc->os.SharedMemDMA +
1423 ((char *) virt - (char *)smc->os.SharedMemAddr)); 1423 ((char *) virt - (char *)smc->os.SharedMemAddr);
1424} // dma_master 1424} // dma_master
1425 1425
1426 1426
@@ -1493,7 +1493,7 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1493{ 1493{
1494 struct sk_buff *skb; 1494 struct sk_buff *skb;
1495 1495
1496 pr_debug(KERN_INFO "entering mac_drv_tx_complete\n"); 1496 pr_debug("entering mac_drv_tx_complete\n");
1497 // Check if this TxD points to a skb 1497 // Check if this TxD points to a skb
1498 1498
1499 if (!(skb = txd->txd_os.skb)) { 1499 if (!(skb = txd->txd_os.skb)) {
@@ -1513,7 +1513,7 @@ void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
1513 // free the skb 1513 // free the skb
1514 dev_kfree_skb_irq(skb); 1514 dev_kfree_skb_irq(skb);
1515 1515
1516 pr_debug(KERN_INFO "leaving mac_drv_tx_complete\n"); 1516 pr_debug("leaving mac_drv_tx_complete\n");
1517} // mac_drv_tx_complete 1517} // mac_drv_tx_complete
1518 1518
1519 1519
@@ -1580,7 +1580,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1580 unsigned short ri; 1580 unsigned short ri;
1581 u_int RifLength; 1581 u_int RifLength;
1582 1582
1583 pr_debug(KERN_INFO "entering mac_drv_rx_complete (len=%d)\n", len); 1583 pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
1584 if (frag_count != 1) { // This is not allowed to happen. 1584 if (frag_count != 1) { // This is not allowed to happen.
1585 1585
1586 printk("fddi: Multi-fragment receive!\n"); 1586 printk("fddi: Multi-fragment receive!\n");
@@ -1589,7 +1589,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1589 } 1589 }
1590 skb = rxd->rxd_os.skb; 1590 skb = rxd->rxd_os.skb;
1591 if (!skb) { 1591 if (!skb) {
1592 pr_debug(KERN_INFO "No skb in rxd\n"); 1592 pr_debug("No skb in rxd\n");
1593 smc->os.MacStat.gen.rx_errors++; 1593 smc->os.MacStat.gen.rx_errors++;
1594 goto RequeueRxd; 1594 goto RequeueRxd;
1595 } 1595 }
@@ -1619,7 +1619,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1619 else { 1619 else {
1620 int n; 1620 int n;
1621// goos: RIF removal has still to be tested 1621// goos: RIF removal has still to be tested
1622 pr_debug(KERN_INFO "RIF found\n"); 1622 pr_debug("RIF found\n");
1623 // Get RIF length from Routing Control (RC) field. 1623 // Get RIF length from Routing Control (RC) field.
1624 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header. 1624 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
1625 1625
@@ -1664,7 +1664,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1664 return; 1664 return;
1665 1665
1666 RequeueRxd: 1666 RequeueRxd:
1667 pr_debug(KERN_INFO "Rx: re-queue RXD.\n"); 1667 pr_debug("Rx: re-queue RXD.\n");
1668 mac_drv_requeue_rxd(smc, rxd, frag_count); 1668 mac_drv_requeue_rxd(smc, rxd, frag_count);
1669 smc->os.MacStat.gen.rx_errors++; // Count receive packets 1669 smc->os.MacStat.gen.rx_errors++; // Count receive packets
1670 // not indicated. 1670 // not indicated.
@@ -1775,7 +1775,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
1775 struct sk_buff *skb; 1775 struct sk_buff *skb;
1776 volatile struct s_smt_fp_rxd *rxd; 1776 volatile struct s_smt_fp_rxd *rxd;
1777 1777
1778 pr_debug(KERN_INFO "entering mac_drv_fill_rxd\n"); 1778 pr_debug("entering mac_drv_fill_rxd\n");
1779 1779
1780 // Walk through the list of free receive buffers, passing receive 1780 // Walk through the list of free receive buffers, passing receive
1781 // buffers to the HWM as long as RXDs are available. 1781 // buffers to the HWM as long as RXDs are available.
@@ -1783,7 +1783,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
1783 MaxFrameSize = smc->os.MaxFrameSize; 1783 MaxFrameSize = smc->os.MaxFrameSize;
1784 // Check if there is any RXD left. 1784 // Check if there is any RXD left.
1785 while (HWM_GET_RX_FREE(smc) > 0) { 1785 while (HWM_GET_RX_FREE(smc) > 0) {
1786 pr_debug(KERN_INFO ".\n"); 1786 pr_debug(".\n");
1787 1787
1788 rxd = HWM_GET_CURR_RXD(smc); 1788 rxd = HWM_GET_CURR_RXD(smc);
1789 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC); 1789 skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
@@ -1814,7 +1814,7 @@ void mac_drv_fill_rxd(struct s_smc *smc)
1814 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize, 1814 hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
1815 FIRST_FRAG | LAST_FRAG); 1815 FIRST_FRAG | LAST_FRAG);
1816 } 1816 }
1817 pr_debug(KERN_INFO "leaving mac_drv_fill_rxd\n"); 1817 pr_debug("leaving mac_drv_fill_rxd\n");
1818} // mac_drv_fill_rxd 1818} // mac_drv_fill_rxd
1819 1819
1820 1820
@@ -1904,12 +1904,12 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1904 pr_debug("fddi: Discard invalid local SMT frame\n"); 1904 pr_debug("fddi: Discard invalid local SMT frame\n");
1905 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n", 1905 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1906 len, la_len, (unsigned long) look_ahead); 1906 len, la_len, (unsigned long) look_ahead);
1907 return (0); 1907 return 0;
1908 } 1908 }
1909 skb = alloc_skb(len + 3, GFP_ATOMIC); 1909 skb = alloc_skb(len + 3, GFP_ATOMIC);
1910 if (!skb) { 1910 if (!skb) {
1911 pr_debug("fddi: Local SMT: skb memory exhausted.\n"); 1911 pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1912 return (0); 1912 return 0;
1913 } 1913 }
1914 skb_reserve(skb, 3); 1914 skb_reserve(skb, 3);
1915 skb_put(skb, len); 1915 skb_put(skb, len);
@@ -1919,7 +1919,7 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1919 skb->protocol = fddi_type_trans(skb, smc->os.dev); 1919 skb->protocol = fddi_type_trans(skb, smc->os.dev);
1920 netif_rx(skb); 1920 netif_rx(skb);
1921 1921
1922 return (0); 1922 return 0;
1923} // mac_drv_rx_init 1923} // mac_drv_rx_init
1924 1924
1925 1925
@@ -2034,17 +2034,17 @@ void smt_stat_counter(struct s_smc *smc, int stat)
2034{ 2034{
2035// BOOLEAN RingIsUp ; 2035// BOOLEAN RingIsUp ;
2036 2036
2037 pr_debug(KERN_INFO "smt_stat_counter\n"); 2037 pr_debug("smt_stat_counter\n");
2038 switch (stat) { 2038 switch (stat) {
2039 case 0: 2039 case 0:
2040 pr_debug(KERN_INFO "Ring operational change.\n"); 2040 pr_debug("Ring operational change.\n");
2041 break; 2041 break;
2042 case 1: 2042 case 1:
2043 pr_debug(KERN_INFO "Receive fifo overflow.\n"); 2043 pr_debug("Receive fifo overflow.\n");
2044 smc->os.MacStat.gen.rx_errors++; 2044 smc->os.MacStat.gen.rx_errors++;
2045 break; 2045 break;
2046 default: 2046 default:
2047 pr_debug(KERN_INFO "Unknown status (%d).\n", stat); 2047 pr_debug("Unknown status (%d).\n", stat);
2048 break; 2048 break;
2049 } 2049 }
2050} // smt_stat_counter 2050} // smt_stat_counter
@@ -2100,10 +2100,10 @@ void cfm_state_change(struct s_smc *smc, int c_state)
2100 s = "SC11_C_WRAP_S"; 2100 s = "SC11_C_WRAP_S";
2101 break; 2101 break;
2102 default: 2102 default:
2103 pr_debug(KERN_INFO "cfm_state_change: unknown %d\n", c_state); 2103 pr_debug("cfm_state_change: unknown %d\n", c_state);
2104 return; 2104 return;
2105 } 2105 }
2106 pr_debug(KERN_INFO "cfm_state_change: %s\n", s); 2106 pr_debug("cfm_state_change: %s\n", s);
2107#endif // DRIVERDEBUG 2107#endif // DRIVERDEBUG
2108} // cfm_state_change 2108} // cfm_state_change
2109 2109
@@ -2158,7 +2158,7 @@ void ecm_state_change(struct s_smc *smc, int e_state)
2158 s = "unknown"; 2158 s = "unknown";
2159 break; 2159 break;
2160 } 2160 }
2161 pr_debug(KERN_INFO "ecm_state_change: %s\n", s); 2161 pr_debug("ecm_state_change: %s\n", s);
2162#endif //DRIVERDEBUG 2162#endif //DRIVERDEBUG
2163} // ecm_state_change 2163} // ecm_state_change
2164 2164
@@ -2213,7 +2213,7 @@ void rmt_state_change(struct s_smc *smc, int r_state)
2213 s = "unknown"; 2213 s = "unknown";
2214 break; 2214 break;
2215 } 2215 }
2216 pr_debug(KERN_INFO "[rmt_state_change: %s]\n", s); 2216 pr_debug("[rmt_state_change: %s]\n", s);
2217#endif // DRIVERDEBUG 2217#endif // DRIVERDEBUG
2218} // rmt_state_change 2218} // rmt_state_change
2219 2219
@@ -2233,7 +2233,7 @@ void rmt_state_change(struct s_smc *smc, int r_state)
2233 ************************/ 2233 ************************/
2234void drv_reset_indication(struct s_smc *smc) 2234void drv_reset_indication(struct s_smc *smc)
2235{ 2235{
2236 pr_debug(KERN_INFO "entering drv_reset_indication\n"); 2236 pr_debug("entering drv_reset_indication\n");
2237 2237
2238 smc->os.ResetRequested = TRUE; // Set flag. 2238 smc->os.ResetRequested = TRUE; // Set flag.
2239 2239
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 6f35bb77595f..2d9941c045bc 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -127,22 +127,22 @@ static inline int is_my_addr(const struct s_smc *smc,
127 127
128static inline int is_broadcast(const struct fddi_addr *addr) 128static inline int is_broadcast(const struct fddi_addr *addr)
129{ 129{
130 return(*(u_short *)(&addr->a[0]) == 0xffff && 130 return *(u_short *)(&addr->a[0]) == 0xffff &&
131 *(u_short *)(&addr->a[2]) == 0xffff && 131 *(u_short *)(&addr->a[2]) == 0xffff &&
132 *(u_short *)(&addr->a[4]) == 0xffff ) ; 132 *(u_short *)(&addr->a[4]) == 0xffff;
133} 133}
134 134
135static inline int is_individual(const struct fddi_addr *addr) 135static inline int is_individual(const struct fddi_addr *addr)
136{ 136{
137 return(!(addr->a[0] & GROUP_ADDR)) ; 137 return !(addr->a[0] & GROUP_ADDR);
138} 138}
139 139
140static inline int is_equal(const struct fddi_addr *addr1, 140static inline int is_equal(const struct fddi_addr *addr1,
141 const struct fddi_addr *addr2) 141 const struct fddi_addr *addr2)
142{ 142{
143 return(*(u_short *)(&addr1->a[0]) == *(u_short *)(&addr2->a[0]) && 143 return *(u_short *)(&addr1->a[0]) == *(u_short *)(&addr2->a[0]) &&
144 *(u_short *)(&addr1->a[2]) == *(u_short *)(&addr2->a[2]) && 144 *(u_short *)(&addr1->a[2]) == *(u_short *)(&addr2->a[2]) &&
145 *(u_short *)(&addr1->a[4]) == *(u_short *)(&addr2->a[4]) ) ; 145 *(u_short *)(&addr1->a[4]) == *(u_short *)(&addr2->a[4]);
146} 146}
147 147
148/* 148/*
@@ -457,8 +457,8 @@ static int div_ratio(u_long upper, u_long lower)
457 else 457 else
458 upper <<= 16L ; 458 upper <<= 16L ;
459 if (!lower) 459 if (!lower)
460 return(0) ; 460 return 0;
461 return((int)(upper/lower)) ; 461 return (int)(upper/lower) ;
462} 462}
463 463
464#ifndef SLIM_SMT 464#ifndef SLIM_SMT
@@ -1111,11 +1111,11 @@ SMbuf *smt_build_frame(struct s_smc *smc, int class, int type,
1111 1111
1112#if 0 1112#if 0
1113 if (!smc->r.sm_ma_avail) { 1113 if (!smc->r.sm_ma_avail) {
1114 return(0) ; 1114 return 0;
1115 } 1115 }
1116#endif 1116#endif
1117 if (!(mb = smt_get_mbuf(smc))) 1117 if (!(mb = smt_get_mbuf(smc)))
1118 return(mb) ; 1118 return mb;
1119 1119
1120 mb->sm_len = length ; 1120 mb->sm_len = length ;
1121 smt = smtod(mb, struct smt_header *) ; 1121 smt = smtod(mb, struct smt_header *) ;
@@ -1136,7 +1136,7 @@ SMbuf *smt_build_frame(struct s_smc *smc, int class, int type,
1136 smt->smt_tid = smt_get_tid(smc) ; /* set transaction ID */ 1136 smt->smt_tid = smt_get_tid(smc) ; /* set transaction ID */
1137 smt->smt_pad = 0 ; 1137 smt->smt_pad = 0 ;
1138 smt->smt_len = length - sizeof(struct smt_header) ; 1138 smt->smt_len = length - sizeof(struct smt_header) ;
1139 return(mb) ; 1139 return mb;
1140} 1140}
1141 1141
1142static void smt_add_frame_len(SMbuf *mb, int len) 1142static void smt_add_frame_len(SMbuf *mb, int len)
@@ -1375,7 +1375,7 @@ static int smt_fill_path(struct s_smc *smc, struct smt_p_path *path)
1375 pd_mac = (struct smt_mac_rec *) phy ; 1375 pd_mac = (struct smt_mac_rec *) phy ;
1376 pd_mac->mac_addr = smc->mib.m[MAC0].fddiMACSMTAddress ; 1376 pd_mac->mac_addr = smc->mib.m[MAC0].fddiMACSMTAddress ;
1377 pd_mac->mac_resource_idx = mac_con_resource_index(smc,1) ; 1377 pd_mac->mac_resource_idx = mac_con_resource_index(smc,1) ;
1378 return(len) ; 1378 return len;
1379} 1379}
1380 1380
1381/* 1381/*
@@ -1563,7 +1563,7 @@ u_long smt_get_tid(struct s_smc *smc)
1563 u_long tid ; 1563 u_long tid ;
1564 while ((tid = ++(smc->sm.smt_tid) ^ SMT_TID_MAGIC) == 0) 1564 while ((tid = ++(smc->sm.smt_tid) ^ SMT_TID_MAGIC) == 0)
1565 ; 1565 ;
1566 return(tid & 0x3fffffffL) ; 1566 return tid & 0x3fffffffL;
1567} 1567}
1568 1568
1569 1569
@@ -1654,11 +1654,11 @@ int smt_check_para(struct s_smc *smc, struct smt_header *sm,
1654 while (*p) { 1654 while (*p) {
1655 if (!sm_to_para(smc,sm,(int) *p)) { 1655 if (!sm_to_para(smc,sm,(int) *p)) {
1656 DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0); 1656 DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0);
1657 return(-1) ; 1657 return -1;
1658 } 1658 }
1659 p++ ; 1659 p++ ;
1660 } 1660 }
1661 return(0) ; 1661 return 0;
1662} 1662}
1663 1663
1664void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para) 1664void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para)
@@ -1687,7 +1687,7 @@ void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para)
1687 return NULL; 1687 return NULL;
1688 } 1688 }
1689 if (found) 1689 if (found)
1690 return(found) ; 1690 return found;
1691 } 1691 }
1692 return NULL; 1692 return NULL;
1693} 1693}
@@ -1732,7 +1732,7 @@ char *addr_to_string(struct fddi_addr *addr)
1732 string[i * 3 + 2] = ':'; 1732 string[i * 3 + 2] = ':';
1733 } 1733 }
1734 string[5 * 3 + 2] = 0; 1734 string[5 * 3 + 2] = 0;
1735 return(string); 1735 return string;
1736} 1736}
1737#endif 1737#endif
1738 1738
@@ -1742,9 +1742,9 @@ int smt_ifconfig(int argc, char *argv[])
1742 if (argc >= 2 && !strcmp(argv[0],"opt_bypass") && 1742 if (argc >= 2 && !strcmp(argv[0],"opt_bypass") &&
1743 !strcmp(argv[1],"yes")) { 1743 !strcmp(argv[1],"yes")) {
1744 smc->mib.fddiSMTBypassPresent = 1 ; 1744 smc->mib.fddiSMTBypassPresent = 1 ;
1745 return(0) ; 1745 return 0;
1746 } 1746 }
1747 return(amdfddi_config(0,argc,argv)) ; 1747 return amdfddi_config(0, argc, argv);
1748} 1748}
1749#endif 1749#endif
1750 1750
@@ -1756,9 +1756,9 @@ static int mac_index(struct s_smc *smc, int mac)
1756 SK_UNUSED(mac) ; 1756 SK_UNUSED(mac) ;
1757#ifdef CONCENTRATOR 1757#ifdef CONCENTRATOR
1758 SK_UNUSED(smc) ; 1758 SK_UNUSED(smc) ;
1759 return(NUMPHYS+1) ; 1759 return NUMPHYS + 1;
1760#else 1760#else
1761 return((smc->s.sas == SMT_SAS) ? 2 : 3) ; 1761 return (smc->s.sas == SMT_SAS) ? 2 : 3;
1762#endif 1762#endif
1763} 1763}
1764 1764
@@ -1768,7 +1768,7 @@ static int mac_index(struct s_smc *smc, int mac)
1768static int phy_index(struct s_smc *smc, int phy) 1768static int phy_index(struct s_smc *smc, int phy)
1769{ 1769{
1770 SK_UNUSED(smc) ; 1770 SK_UNUSED(smc) ;
1771 return(phy+1); 1771 return phy + 1;
1772} 1772}
1773 1773
1774/* 1774/*
@@ -1779,19 +1779,19 @@ static int mac_con_resource_index(struct s_smc *smc, int mac)
1779#ifdef CONCENTRATOR 1779#ifdef CONCENTRATOR
1780 SK_UNUSED(smc) ; 1780 SK_UNUSED(smc) ;
1781 SK_UNUSED(mac) ; 1781 SK_UNUSED(mac) ;
1782 return(entity_to_index(smc,cem_get_downstream(smc,ENTITY_MAC))) ; 1782 return entity_to_index(smc, cem_get_downstream(smc, ENTITY_MAC));
1783#else 1783#else
1784 SK_UNUSED(mac) ; 1784 SK_UNUSED(mac) ;
1785 switch (smc->mib.fddiSMTCF_State) { 1785 switch (smc->mib.fddiSMTCF_State) {
1786 case SC9_C_WRAP_A : 1786 case SC9_C_WRAP_A :
1787 case SC5_THRU_B : 1787 case SC5_THRU_B :
1788 case SC11_C_WRAP_S : 1788 case SC11_C_WRAP_S :
1789 return(1) ; 1789 return 1;
1790 case SC10_C_WRAP_B : 1790 case SC10_C_WRAP_B :
1791 case SC4_THRU_A : 1791 case SC4_THRU_A :
1792 return(2) ; 1792 return 2;
1793 } 1793 }
1794 return(smc->s.sas == SMT_SAS ? 2 : 3) ; 1794 return smc->s.sas == SMT_SAS ? 2 : 3;
1795#endif 1795#endif
1796} 1796}
1797 1797
@@ -1801,21 +1801,21 @@ static int mac_con_resource_index(struct s_smc *smc, int mac)
1801static int phy_con_resource_index(struct s_smc *smc, int phy) 1801static int phy_con_resource_index(struct s_smc *smc, int phy)
1802{ 1802{
1803#ifdef CONCENTRATOR 1803#ifdef CONCENTRATOR
1804 return(entity_to_index(smc,cem_get_downstream(smc,ENTITY_PHY(phy)))) ; 1804 return entity_to_index(smc, cem_get_downstream(smc, ENTITY_PHY(phy))) ;
1805#else 1805#else
1806 switch (smc->mib.fddiSMTCF_State) { 1806 switch (smc->mib.fddiSMTCF_State) {
1807 case SC9_C_WRAP_A : 1807 case SC9_C_WRAP_A :
1808 return(phy == PA ? 3 : 2) ; 1808 return phy == PA ? 3 : 2;
1809 case SC10_C_WRAP_B : 1809 case SC10_C_WRAP_B :
1810 return(phy == PA ? 1 : 3) ; 1810 return phy == PA ? 1 : 3;
1811 case SC4_THRU_A : 1811 case SC4_THRU_A :
1812 return(phy == PA ? 3 : 1) ; 1812 return phy == PA ? 3 : 1;
1813 case SC5_THRU_B : 1813 case SC5_THRU_B :
1814 return(phy == PA ? 2 : 3) ; 1814 return phy == PA ? 2 : 3;
1815 case SC11_C_WRAP_S : 1815 case SC11_C_WRAP_S :
1816 return(2) ; 1816 return 2;
1817 } 1817 }
1818 return(phy) ; 1818 return phy;
1819#endif 1819#endif
1820} 1820}
1821 1821
@@ -1823,16 +1823,16 @@ static int phy_con_resource_index(struct s_smc *smc, int phy)
1823static int entity_to_index(struct s_smc *smc, int e) 1823static int entity_to_index(struct s_smc *smc, int e)
1824{ 1824{
1825 if (e == ENTITY_MAC) 1825 if (e == ENTITY_MAC)
1826 return(mac_index(smc,1)) ; 1826 return mac_index(smc, 1);
1827 else 1827 else
1828 return(phy_index(smc,e - ENTITY_PHY(0))) ; 1828 return phy_index(smc, e - ENTITY_PHY(0));
1829} 1829}
1830#endif 1830#endif
1831 1831
1832#ifdef LITTLE_ENDIAN 1832#ifdef LITTLE_ENDIAN
1833static int smt_swap_short(u_short s) 1833static int smt_swap_short(u_short s)
1834{ 1834{
1835 return(((s>>8)&0xff)|((s&0xff)<<8)) ; 1835 return ((s>>8)&0xff) | ((s&0xff)<<8);
1836} 1836}
1837 1837
1838void smt_swap_para(struct smt_header *sm, int len, int direction) 1838void smt_swap_para(struct smt_header *sm, int len, int direction)
@@ -1996,7 +1996,7 @@ int smt_action(struct s_smc *smc, int class, int code, int index)
1996 } 1996 }
1997 break ; 1997 break ;
1998 default : 1998 default :
1999 return(1) ; 1999 return 1;
2000 } 2000 }
2001 break ; 2001 break ;
2002 case SMT_PORT_ACTION : 2002 case SMT_PORT_ACTION :
@@ -2017,14 +2017,14 @@ int smt_action(struct s_smc *smc, int class, int code, int index)
2017 event = PC_STOP ; 2017 event = PC_STOP ;
2018 break ; 2018 break ;
2019 default : 2019 default :
2020 return(1) ; 2020 return 1;
2021 } 2021 }
2022 queue_event(smc,EVENT_PCM+index,event) ; 2022 queue_event(smc,EVENT_PCM+index,event) ;
2023 break ; 2023 break ;
2024 default : 2024 default :
2025 return(1) ; 2025 return 1;
2026 } 2026 }
2027 return(0) ; 2027 return 0;
2028} 2028}
2029 2029
2030/* 2030/*
diff --git a/drivers/net/skfp/smtdef.c b/drivers/net/skfp/smtdef.c
index 4e07ff7073f1..1acab0b368e3 100644
--- a/drivers/net/skfp/smtdef.c
+++ b/drivers/net/skfp/smtdef.c
@@ -303,7 +303,7 @@ int smt_set_mac_opvalues(struct s_smc *smc)
303 FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_T_REQ, 303 FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_T_REQ,
304 smt_get_event_word(smc)); 304 smt_get_event_word(smc));
305 } 305 }
306 return(st) ; 306 return st;
307} 307}
308 308
309void smt_fixup_mib(struct s_smc *smc) 309void smt_fixup_mib(struct s_smc *smc)
@@ -350,6 +350,6 @@ static int set_min_max(int maxflag, u_long mib, u_long limit, u_long *oper)
350 *oper = limit ; 350 *oper = limit ;
351 else 351 else
352 *oper = mib ; 352 *oper = mib ;
353 return(old != *oper) ; 353 return old != *oper;
354} 354}
355 355
diff --git a/drivers/net/skfp/smtinit.c b/drivers/net/skfp/smtinit.c
index 3c8964ce1837..e3a0c0bc2233 100644
--- a/drivers/net/skfp/smtinit.c
+++ b/drivers/net/skfp/smtinit.c
@@ -120,6 +120,6 @@ int init_smt(struct s_smc *smc, u_char *mac_addr)
120 120
121 PNMI_INIT(smc) ; /* PNMI initialization */ 121 PNMI_INIT(smc) ; /* PNMI initialization */
122 122
123 return(0) ; 123 return 0;
124} 124}
125 125
diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c
index 40882b3faba6..f6f7baf9f27a 100644
--- a/drivers/net/skfp/srf.c
+++ b/drivers/net/skfp/srf.c
@@ -165,7 +165,7 @@ static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
165 165
166 for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) { 166 for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
167 if (evc->evc_code == code && evc->evc_index == index) 167 if (evc->evc_code == code && evc->evc_index == index)
168 return(evc) ; 168 return evc;
169 } 169 }
170 return NULL; 170 return NULL;
171} 171}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 40e5c46e7571..a8a63581d63d 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3178,8 +3178,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
3178 3178
3179 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); 3179 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
3180 if (likely(skb)) { 3180 if (likely(skb)) {
3181 netif_receive_skb(skb); 3181 napi_gro_receive(napi, skb);
3182
3183 ++work_done; 3182 ++work_done;
3184 } 3183 }
3185 } 3184 }
@@ -3192,6 +3191,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
3192 if (work_done < to_do) { 3191 if (work_done < to_do) {
3193 unsigned long flags; 3192 unsigned long flags;
3194 3193
3194 napi_gro_flush(napi);
3195 spin_lock_irqsave(&hw->hw_lock, flags); 3195 spin_lock_irqsave(&hw->hw_lock, flags);
3196 __napi_complete(napi); 3196 __napi_complete(napi);
3197 hw->intr_mask |= napimask[skge->port]; 3197 hw->intr_mask |= napimask[skge->port];
@@ -3849,6 +3849,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3849 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3849 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3850 skge->rx_csum = 1; 3850 skge->rx_csum = 1;
3851 } 3851 }
3852 dev->features |= NETIF_F_GRO;
3852 3853
3853 /* read the mac address */ 3854 /* read the mac address */
3854 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3855 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 194e5cf8c763..3ef9b67ac6e6 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4581,7 +4581,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4581 4581
4582 sky2->port = port; 4582 sky2->port = port;
4583 4583
4584 dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG; 4584 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
4585 | NETIF_F_TSO | NETIF_F_GRO;
4585 if (highmem) 4586 if (highmem)
4586 dev->features |= NETIF_F_HIGHDMA; 4587 dev->features |= NETIF_F_HIGHDMA;
4587 4588
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index fa434fb8fb7c..86cbb9ea2f26 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -271,7 +271,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
271 memcpy(sl->xbuff, sl->xhead, sl->xleft); 271 memcpy(sl->xbuff, sl->xhead, sl->xleft);
272 } else { 272 } else {
273 sl->xleft = 0; 273 sl->xleft = 0;
274 sl->tx_dropped++; 274 dev->stats.tx_dropped++;
275 } 275 }
276 } 276 }
277 sl->xhead = sl->xbuff; 277 sl->xhead = sl->xbuff;
@@ -281,7 +281,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
281 memcpy(sl->rbuff, rbuff, sl->rcount); 281 memcpy(sl->rbuff, rbuff, sl->rcount);
282 } else { 282 } else {
283 sl->rcount = 0; 283 sl->rcount = 0;
284 sl->rx_over_errors++; 284 dev->stats.rx_over_errors++;
285 set_bit(SLF_ERROR, &sl->flags); 285 set_bit(SLF_ERROR, &sl->flags);
286 } 286 }
287 } 287 }
@@ -319,6 +319,7 @@ static inline void sl_unlock(struct slip *sl)
319/* Send one completely decapsulated IP datagram to the IP layer. */ 319/* Send one completely decapsulated IP datagram to the IP layer. */
320static void sl_bump(struct slip *sl) 320static void sl_bump(struct slip *sl)
321{ 321{
322 struct net_device *dev = sl->dev;
322 struct sk_buff *skb; 323 struct sk_buff *skb;
323 int count; 324 int count;
324 325
@@ -329,13 +330,13 @@ static void sl_bump(struct slip *sl)
329 if (c & SL_TYPE_COMPRESSED_TCP) { 330 if (c & SL_TYPE_COMPRESSED_TCP) {
330 /* ignore compressed packets when CSLIP is off */ 331 /* ignore compressed packets when CSLIP is off */
331 if (!(sl->mode & SL_MODE_CSLIP)) { 332 if (!(sl->mode & SL_MODE_CSLIP)) {
332 printk(KERN_WARNING "%s: compressed packet ignored\n", sl->dev->name); 333 printk(KERN_WARNING "%s: compressed packet ignored\n", dev->name);
333 return; 334 return;
334 } 335 }
335 /* make sure we've reserved enough space for uncompress 336 /* make sure we've reserved enough space for uncompress
336 to use */ 337 to use */
337 if (count + 80 > sl->buffsize) { 338 if (count + 80 > sl->buffsize) {
338 sl->rx_over_errors++; 339 dev->stats.rx_over_errors++;
339 return; 340 return;
340 } 341 }
341 count = slhc_uncompress(sl->slcomp, sl->rbuff, count); 342 count = slhc_uncompress(sl->slcomp, sl->rbuff, count);
@@ -346,7 +347,7 @@ static void sl_bump(struct slip *sl)
346 /* turn on header compression */ 347 /* turn on header compression */
347 sl->mode |= SL_MODE_CSLIP; 348 sl->mode |= SL_MODE_CSLIP;
348 sl->mode &= ~SL_MODE_ADAPTIVE; 349 sl->mode &= ~SL_MODE_ADAPTIVE;
349 printk(KERN_INFO "%s: header compression turned on\n", sl->dev->name); 350 printk(KERN_INFO "%s: header compression turned on\n", dev->name);
350 } 351 }
351 sl->rbuff[0] &= 0x4f; 352 sl->rbuff[0] &= 0x4f;
352 if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0) 353 if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0)
@@ -355,20 +356,20 @@ static void sl_bump(struct slip *sl)
355 } 356 }
356#endif /* SL_INCLUDE_CSLIP */ 357#endif /* SL_INCLUDE_CSLIP */
357 358
358 sl->rx_bytes += count; 359 dev->stats.rx_bytes += count;
359 360
360 skb = dev_alloc_skb(count); 361 skb = dev_alloc_skb(count);
361 if (skb == NULL) { 362 if (skb == NULL) {
362 printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", sl->dev->name); 363 printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
363 sl->rx_dropped++; 364 dev->stats.rx_dropped++;
364 return; 365 return;
365 } 366 }
366 skb->dev = sl->dev; 367 skb->dev = dev;
367 memcpy(skb_put(skb, count), sl->rbuff, count); 368 memcpy(skb_put(skb, count), sl->rbuff, count);
368 skb_reset_mac_header(skb); 369 skb_reset_mac_header(skb);
369 skb->protocol = htons(ETH_P_IP); 370 skb->protocol = htons(ETH_P_IP);
370 netif_rx(skb); 371 netif_rx(skb);
371 sl->rx_packets++; 372 dev->stats.rx_packets++;
372} 373}
373 374
374/* Encapsulate one IP datagram and stuff into a TTY queue. */ 375/* Encapsulate one IP datagram and stuff into a TTY queue. */
@@ -379,7 +380,7 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
379 380
380 if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */ 381 if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */
381 printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name); 382 printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name);
382 sl->tx_dropped++; 383 sl->dev->stats.tx_dropped++;
383 sl_unlock(sl); 384 sl_unlock(sl);
384 return; 385 return;
385 } 386 }
@@ -433,7 +434,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
433 if (sl->xleft <= 0) { 434 if (sl->xleft <= 0) {
434 /* Now serial buffer is almost free & we can start 435 /* Now serial buffer is almost free & we can start
435 * transmission of another packet */ 436 * transmission of another packet */
436 sl->tx_packets++; 437 sl->dev->stats.tx_packets++;
437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 438 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
438 sl_unlock(sl); 439 sl_unlock(sl);
439 return; 440 return;
@@ -496,7 +497,7 @@ sl_xmit(struct sk_buff *skb, struct net_device *dev)
496 } 497 }
497 498
498 sl_lock(sl); 499 sl_lock(sl);
499 sl->tx_bytes += skb->len; 500 dev->stats.tx_bytes += skb->len;
500 sl_encaps(sl, skb->data, skb->len); 501 sl_encaps(sl, skb->data, skb->len);
501 spin_unlock(&sl->lock); 502 spin_unlock(&sl->lock);
502 503
@@ -558,39 +559,39 @@ static int sl_change_mtu(struct net_device *dev, int new_mtu)
558 559
559/* Netdevice get statistics request */ 560/* Netdevice get statistics request */
560 561
561static struct net_device_stats * 562static struct rtnl_link_stats64 *
562sl_get_stats(struct net_device *dev) 563sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
563{ 564{
564 static struct net_device_stats stats; 565 struct net_device_stats *devstats = &dev->stats;
565 struct slip *sl = netdev_priv(dev); 566 unsigned long c_rx_dropped = 0;
566#ifdef SL_INCLUDE_CSLIP 567#ifdef SL_INCLUDE_CSLIP
567 struct slcompress *comp; 568 unsigned long c_rx_fifo_errors = 0;
568#endif 569 unsigned long c_tx_fifo_errors = 0;
570 unsigned long c_collisions = 0;
571 struct slip *sl = netdev_priv(dev);
572 struct slcompress *comp = sl->slcomp;
569 573
570 memset(&stats, 0, sizeof(struct net_device_stats));
571
572 stats.rx_packets = sl->rx_packets;
573 stats.tx_packets = sl->tx_packets;
574 stats.rx_bytes = sl->rx_bytes;
575 stats.tx_bytes = sl->tx_bytes;
576 stats.rx_dropped = sl->rx_dropped;
577 stats.tx_dropped = sl->tx_dropped;
578 stats.tx_errors = sl->tx_errors;
579 stats.rx_errors = sl->rx_errors;
580 stats.rx_over_errors = sl->rx_over_errors;
581#ifdef SL_INCLUDE_CSLIP
582 stats.rx_fifo_errors = sl->rx_compressed;
583 stats.tx_fifo_errors = sl->tx_compressed;
584 stats.collisions = sl->tx_misses;
585 comp = sl->slcomp;
586 if (comp) { 574 if (comp) {
587 stats.rx_fifo_errors += comp->sls_i_compressed; 575 c_rx_fifo_errors = comp->sls_i_compressed;
588 stats.rx_dropped += comp->sls_i_tossed; 576 c_rx_dropped = comp->sls_i_tossed;
589 stats.tx_fifo_errors += comp->sls_o_compressed; 577 c_tx_fifo_errors = comp->sls_o_compressed;
590 stats.collisions += comp->sls_o_misses; 578 c_collisions = comp->sls_o_misses;
591 } 579 }
592#endif /* CONFIG_INET */ 580 stats->rx_fifo_errors = sl->rx_compressed + c_rx_fifo_errors;
593 return (&stats); 581 stats->tx_fifo_errors = sl->tx_compressed + c_tx_fifo_errors;
582 stats->collisions = sl->tx_misses + c_collisions;
583#endif
584 stats->rx_packets = devstats->rx_packets;
585 stats->tx_packets = devstats->tx_packets;
586 stats->rx_bytes = devstats->rx_bytes;
587 stats->tx_bytes = devstats->tx_bytes;
588 stats->rx_dropped = devstats->rx_dropped + c_rx_dropped;
589 stats->tx_dropped = devstats->tx_dropped;
590 stats->tx_errors = devstats->tx_errors;
591 stats->rx_errors = devstats->rx_errors;
592 stats->rx_over_errors = devstats->rx_over_errors;
593
594 return stats;
594} 595}
595 596
596/* Netdevice register callback */ 597/* Netdevice register callback */
@@ -633,7 +634,7 @@ static const struct net_device_ops sl_netdev_ops = {
633 .ndo_open = sl_open, 634 .ndo_open = sl_open,
634 .ndo_stop = sl_close, 635 .ndo_stop = sl_close,
635 .ndo_start_xmit = sl_xmit, 636 .ndo_start_xmit = sl_xmit,
636 .ndo_get_stats = sl_get_stats, 637 .ndo_get_stats64 = sl_get_stats64,
637 .ndo_change_mtu = sl_change_mtu, 638 .ndo_change_mtu = sl_change_mtu,
638 .ndo_tx_timeout = sl_tx_timeout, 639 .ndo_tx_timeout = sl_tx_timeout,
639#ifdef CONFIG_SLIP_SMART 640#ifdef CONFIG_SLIP_SMART
@@ -681,7 +682,7 @@ static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
681 while (count--) { 682 while (count--) {
682 if (fp && *fp++) { 683 if (fp && *fp++) {
683 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) 684 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
684 sl->rx_errors++; 685 sl->dev->stats.rx_errors++;
685 cp++; 686 cp++;
686 continue; 687 continue;
687 } 688 }
@@ -943,7 +944,7 @@ static int slip_esc(unsigned char *s, unsigned char *d, int len)
943 } 944 }
944 } 945 }
945 *ptr++ = END; 946 *ptr++ = END;
946 return (ptr - d); 947 return ptr - d;
947} 948}
948 949
949static void slip_unesc(struct slip *sl, unsigned char s) 950static void slip_unesc(struct slip *sl, unsigned char s)
@@ -981,7 +982,7 @@ static void slip_unesc(struct slip *sl, unsigned char s)
981 sl->rbuff[sl->rcount++] = s; 982 sl->rbuff[sl->rcount++] = s;
982 return; 983 return;
983 } 984 }
984 sl->rx_over_errors++; 985 sl->dev->stats.rx_over_errors++;
985 set_bit(SLF_ERROR, &sl->flags); 986 set_bit(SLF_ERROR, &sl->flags);
986 } 987 }
987} 988}
@@ -1057,7 +1058,7 @@ static void slip_unesc6(struct slip *sl, unsigned char s)
1057 sl->rbuff[sl->rcount++] = c; 1058 sl->rbuff[sl->rcount++] = c;
1058 return; 1059 return;
1059 } 1060 }
1060 sl->rx_over_errors++; 1061 sl->dev->stats.rx_over_errors++;
1061 set_bit(SLF_ERROR, &sl->flags); 1062 set_bit(SLF_ERROR, &sl->flags);
1062 } 1063 }
1063 } 1064 }
diff --git a/drivers/net/slip.h b/drivers/net/slip.h
index 9ea5c11287d2..914e958abbfc 100644
--- a/drivers/net/slip.h
+++ b/drivers/net/slip.h
@@ -67,15 +67,6 @@ struct slip {
67 int xleft; /* bytes left in XMIT queue */ 67 int xleft; /* bytes left in XMIT queue */
68 68
69 /* SLIP interface statistics. */ 69 /* SLIP interface statistics. */
70 unsigned long rx_packets; /* inbound frames counter */
71 unsigned long tx_packets; /* outbound frames counter */
72 unsigned long rx_bytes; /* inbound byte counte */
73 unsigned long tx_bytes; /* outbound byte counter */
74 unsigned long rx_errors; /* Parity, etc. errors */
75 unsigned long tx_errors; /* Planned stuff */
76 unsigned long rx_dropped; /* No memory for skb */
77 unsigned long tx_dropped; /* When MTU change */
78 unsigned long rx_over_errors; /* Frame bigger than SLIP buf. */
79#ifdef SL_INCLUDE_CSLIP 70#ifdef SL_INCLUDE_CSLIP
80 unsigned long tx_compressed; 71 unsigned long tx_compressed;
81 unsigned long rx_compressed; 72 unsigned long rx_compressed;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 8150ba154116..a8e5856ce882 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1049,7 +1049,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
1049 smsc911x_rx_readfifo(pdata, (unsigned int *)skb->head, 1049 smsc911x_rx_readfifo(pdata, (unsigned int *)skb->head,
1050 pktwords); 1050 pktwords);
1051 skb->protocol = eth_type_trans(skb, dev); 1051 skb->protocol = eth_type_trans(skb, dev);
1052 skb->ip_summed = CHECKSUM_NONE; 1052 skb_checksum_none_assert(skb);
1053 netif_receive_skb(skb); 1053 netif_receive_skb(skb);
1054 1054
1055 /* Update counters */ 1055 /* Update counters */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 1636a34d95dd..cb6bcca9d541 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1000,9 +1000,9 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1000 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK)) 1000 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
1001 skb->ip_summed = CHECKSUM_UNNECESSARY; 1001 skb->ip_summed = CHECKSUM_UNNECESSARY;
1002 else 1002 else
1003 skb->ip_summed = CHECKSUM_NONE; 1003 skb_checksum_none_assert(skb);
1004 } else 1004 } else
1005 skb->ip_summed = CHECKSUM_NONE; 1005 skb_checksum_none_assert(skb);
1006 1006
1007 if (data_status & SPIDER_NET_VLAN_PACKET) { 1007 if (data_status & SPIDER_NET_VLAN_PACKET) {
1008 /* further enhancements: HW-accel VLAN 1008 /* further enhancements: HW-accel VLAN
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index a42b6873370b..4adf12422787 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -148,7 +148,7 @@ static int full_duplex[MAX_UNITS] = {0, };
148 * This SUCKS. 148 * This SUCKS.
149 * We need a much better method to determine if dma_addr_t is 64-bit. 149 * We need a much better method to determine if dma_addr_t is 64-bit.
150 */ 150 */
151#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) 151#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) || (defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT))
152/* 64-bit dma_addr_t */ 152/* 64-bit dma_addr_t */
153#define ADDR_64BITS /* This chip uses 64 bit addresses. */ 153#define ADDR_64BITS /* This chip uses 64 bit addresses. */
154#define netdrv_addr_t __le64 154#define netdrv_addr_t __le64
@@ -302,7 +302,7 @@ enum chipset {
302}; 302};
303 303
304static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = { 304static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
305 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 }, 305 { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
306 { 0, } 306 { 0, }
307}; 307};
308MODULE_DEVICE_TABLE(pci, starfire_pci_tbl); 308MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
@@ -2078,11 +2078,7 @@ static int __init starfire_init (void)
2078 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n"); 2078 printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2079#endif 2079#endif
2080 2080
2081 /* we can do this test only at run-time... sigh */ 2081 BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2082 if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
2083 printk("This driver has dma_addr_t issues, please send email to maintainer\n");
2084 return -ENODEV;
2085 }
2086 2082
2087 return pci_register_driver(&starfire_driver); 2083 return pci_register_driver(&starfire_driver);
2088} 2084}
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index eb63d44748a7..3c2af7c6a39b 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -3,10 +3,10 @@ config STMMAC_ETH
3 select MII 3 select MII
4 select PHYLIB 4 select PHYLIB
5 select CRC32 5 select CRC32
6 depends on NETDEVICES && CPU_SUBTYPE_ST40 6 depends on NETDEVICES
7 help 7 help
8 This is the driver for the Ethernet IPs are built around a 8 This is the driver for the Ethernet IPs are built around a
9 Synopsys IP Core and fully tested on the STMicroelectronics 9 Synopsys IP Core and only tested on the STMicroelectronics
10 platforms. 10 platforms.
11 11
12if STMMAC_ETH 12if STMMAC_ETH
@@ -32,6 +32,7 @@ config STMMAC_DUAL_MAC
32config STMMAC_TIMER 32config STMMAC_TIMER
33 bool "STMMAC Timer optimisation" 33 bool "STMMAC Timer optimisation"
34 default n 34 default n
35 depends on RTC_HCTOSYS_DEVICE
35 help 36 help
36 Use an external timer for mitigating the number of network 37 Use an external timer for mitigating the number of network
37 interrupts. Currently, for SH architectures, it is possible 38 interrupts. Currently, for SH architectures, it is possible
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 66b9da0260fe..dec7ce40c27a 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -102,8 +102,6 @@ struct stmmac_extra_stats {
102 102
103#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ 103#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
104 104
105#define HW_CSUM 1
106#define NO_HW_CSUM 0
107enum rx_frame_status { /* IPC status */ 105enum rx_frame_status { /* IPC status */
108 good_frame = 0, 106 good_frame = 0,
109 discard_frame = 1, 107 discard_frame = 1,
@@ -167,7 +165,7 @@ struct stmmac_desc_ops {
167 int (*get_tx_ls) (struct dma_desc *p); 165 int (*get_tx_ls) (struct dma_desc *p);
168 /* Return the transmit status looking at the TDES1 */ 166 /* Return the transmit status looking at the TDES1 */
169 int (*tx_status) (void *data, struct stmmac_extra_stats *x, 167 int (*tx_status) (void *data, struct stmmac_extra_stats *x,
170 struct dma_desc *p, unsigned long ioaddr); 168 struct dma_desc *p, void __iomem *ioaddr);
171 /* Get the buffer size from the descriptor */ 169 /* Get the buffer size from the descriptor */
172 int (*get_tx_len) (struct dma_desc *p); 170 int (*get_tx_len) (struct dma_desc *p);
173 /* Handle extra events on specific interrupts hw dependent */ 171 /* Handle extra events on specific interrupts hw dependent */
@@ -182,44 +180,46 @@ struct stmmac_desc_ops {
182 180
183struct stmmac_dma_ops { 181struct stmmac_dma_ops {
184 /* DMA core initialization */ 182 /* DMA core initialization */
185 int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx); 183 int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
186 /* Dump DMA registers */ 184 /* Dump DMA registers */
187 void (*dump_regs) (unsigned long ioaddr); 185 void (*dump_regs) (void __iomem *ioaddr);
188 /* Set tx/rx threshold in the csr6 register 186 /* Set tx/rx threshold in the csr6 register
189 * An invalid value enables the store-and-forward mode */ 187 * An invalid value enables the store-and-forward mode */
190 void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode); 188 void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode);
191 /* To track extra statistic (if supported) */ 189 /* To track extra statistic (if supported) */
192 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, 190 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
193 unsigned long ioaddr); 191 void __iomem *ioaddr);
194 void (*enable_dma_transmission) (unsigned long ioaddr); 192 void (*enable_dma_transmission) (void __iomem *ioaddr);
195 void (*enable_dma_irq) (unsigned long ioaddr); 193 void (*enable_dma_irq) (void __iomem *ioaddr);
196 void (*disable_dma_irq) (unsigned long ioaddr); 194 void (*disable_dma_irq) (void __iomem *ioaddr);
197 void (*start_tx) (unsigned long ioaddr); 195 void (*start_tx) (void __iomem *ioaddr);
198 void (*stop_tx) (unsigned long ioaddr); 196 void (*stop_tx) (void __iomem *ioaddr);
199 void (*start_rx) (unsigned long ioaddr); 197 void (*start_rx) (void __iomem *ioaddr);
200 void (*stop_rx) (unsigned long ioaddr); 198 void (*stop_rx) (void __iomem *ioaddr);
201 int (*dma_interrupt) (unsigned long ioaddr, 199 int (*dma_interrupt) (void __iomem *ioaddr,
202 struct stmmac_extra_stats *x); 200 struct stmmac_extra_stats *x);
203}; 201};
204 202
205struct stmmac_ops { 203struct stmmac_ops {
206 /* MAC core initialization */ 204 /* MAC core initialization */
207 void (*core_init) (unsigned long ioaddr) ____cacheline_aligned; 205 void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
206 /* Support checksum offload engine */
207 int (*rx_coe) (void __iomem *ioaddr);
208 /* Dump MAC registers */ 208 /* Dump MAC registers */
209 void (*dump_regs) (unsigned long ioaddr); 209 void (*dump_regs) (void __iomem *ioaddr);
210 /* Handle extra events on specific interrupts hw dependent */ 210 /* Handle extra events on specific interrupts hw dependent */
211 void (*host_irq_status) (unsigned long ioaddr); 211 void (*host_irq_status) (void __iomem *ioaddr);
212 /* Multicast filter setting */ 212 /* Multicast filter setting */
213 void (*set_filter) (struct net_device *dev); 213 void (*set_filter) (struct net_device *dev);
214 /* Flow control setting */ 214 /* Flow control setting */
215 void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex, 215 void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
216 unsigned int fc, unsigned int pause_time); 216 unsigned int fc, unsigned int pause_time);
217 /* Set power management mode (e.g. magic frame) */ 217 /* Set power management mode (e.g. magic frame) */
218 void (*pmt) (unsigned long ioaddr, unsigned long mode); 218 void (*pmt) (void __iomem *ioaddr, unsigned long mode);
219 /* Set/Get Unicast MAC addresses */ 219 /* Set/Get Unicast MAC addresses */
220 void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr, 220 void (*set_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
221 unsigned int reg_n); 221 unsigned int reg_n);
222 void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr, 222 void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
223 unsigned int reg_n); 223 unsigned int reg_n);
224}; 224};
225 225
@@ -238,16 +238,15 @@ struct mac_device_info {
238 struct stmmac_ops *mac; 238 struct stmmac_ops *mac;
239 struct stmmac_desc_ops *desc; 239 struct stmmac_desc_ops *desc;
240 struct stmmac_dma_ops *dma; 240 struct stmmac_dma_ops *dma;
241 unsigned int pmt; /* support Power-Down */
242 struct mii_regs mii; /* MII register Addresses */ 241 struct mii_regs mii; /* MII register Addresses */
243 struct mac_link link; 242 struct mac_link link;
244}; 243};
245 244
246struct mac_device_info *dwmac1000_setup(unsigned long addr); 245struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
247struct mac_device_info *dwmac100_setup(unsigned long addr); 246struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
248 247
249extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], 248extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
250 unsigned int high, unsigned int low); 249 unsigned int high, unsigned int low);
251extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr, 250extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
252 unsigned int high, unsigned int low); 251 unsigned int high, unsigned int low);
253extern void dwmac_dma_flush_tx_fifo(unsigned long ioaddr); 252extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index 8b20b19971cb..81ee4fd04386 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -99,7 +99,7 @@ enum inter_frame_gap {
99#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ 99#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
100 100
101#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ 101#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
102 GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE) 102 GMAC_CONTROL_JE | GMAC_CONTROL_BE)
103 103
104/* GMAC Frame Filter defines */ 104/* GMAC Frame Filter defines */
105#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ 105#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 2b2f5c8caf1c..65667b692024 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -30,7 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include "dwmac1000.h" 31#include "dwmac1000.h"
32 32
33static void dwmac1000_core_init(unsigned long ioaddr) 33static void dwmac1000_core_init(void __iomem *ioaddr)
34{ 34{
35 u32 value = readl(ioaddr + GMAC_CONTROL); 35 u32 value = readl(ioaddr + GMAC_CONTROL);
36 value |= GMAC_CORE_INIT; 36 value |= GMAC_CORE_INIT;
@@ -50,10 +50,22 @@ static void dwmac1000_core_init(unsigned long ioaddr)
50#endif 50#endif
51} 51}
52 52
53static void dwmac1000_dump_regs(unsigned long ioaddr) 53static int dwmac1000_rx_coe_supported(void __iomem *ioaddr)
54{
55 u32 value = readl(ioaddr + GMAC_CONTROL);
56
57 value |= GMAC_CONTROL_IPC;
58 writel(value, ioaddr + GMAC_CONTROL);
59
60 value = readl(ioaddr + GMAC_CONTROL);
61
62 return !!(value & GMAC_CONTROL_IPC);
63}
64
65static void dwmac1000_dump_regs(void __iomem *ioaddr)
54{ 66{
55 int i; 67 int i;
56 pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr); 68 pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr);
57 69
58 for (i = 0; i < 55; i++) { 70 for (i = 0; i < 55; i++) {
59 int offset = i * 4; 71 int offset = i * 4;
@@ -62,14 +74,14 @@ static void dwmac1000_dump_regs(unsigned long ioaddr)
62 } 74 }
63} 75}
64 76
65static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr, 77static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
66 unsigned int reg_n) 78 unsigned int reg_n)
67{ 79{
68 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), 80 stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
69 GMAC_ADDR_LOW(reg_n)); 81 GMAC_ADDR_LOW(reg_n));
70} 82}
71 83
72static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr, 84static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
73 unsigned int reg_n) 85 unsigned int reg_n)
74{ 86{
75 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), 87 stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
@@ -78,7 +90,7 @@ static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
78 90
79static void dwmac1000_set_filter(struct net_device *dev) 91static void dwmac1000_set_filter(struct net_device *dev)
80{ 92{
81 unsigned long ioaddr = dev->base_addr; 93 void __iomem *ioaddr = (void __iomem *) dev->base_addr;
82 unsigned int value = 0; 94 unsigned int value = 0;
83 95
84 CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", 96 CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
@@ -139,7 +151,7 @@ static void dwmac1000_set_filter(struct net_device *dev)
139 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); 151 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
140} 152}
141 153
142static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex, 154static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
143 unsigned int fc, unsigned int pause_time) 155 unsigned int fc, unsigned int pause_time)
144{ 156{
145 unsigned int flow = 0; 157 unsigned int flow = 0;
@@ -162,7 +174,7 @@ static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
162 writel(flow, ioaddr + GMAC_FLOW_CTRL); 174 writel(flow, ioaddr + GMAC_FLOW_CTRL);
163} 175}
164 176
165static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode) 177static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
166{ 178{
167 unsigned int pmt = 0; 179 unsigned int pmt = 0;
168 180
@@ -178,7 +190,7 @@ static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
178} 190}
179 191
180 192
181static void dwmac1000_irq_status(unsigned long ioaddr) 193static void dwmac1000_irq_status(void __iomem *ioaddr)
182{ 194{
183 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); 195 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
184 196
@@ -202,6 +214,7 @@ static void dwmac1000_irq_status(unsigned long ioaddr)
202 214
203struct stmmac_ops dwmac1000_ops = { 215struct stmmac_ops dwmac1000_ops = {
204 .core_init = dwmac1000_core_init, 216 .core_init = dwmac1000_core_init,
217 .rx_coe = dwmac1000_rx_coe_supported,
205 .dump_regs = dwmac1000_dump_regs, 218 .dump_regs = dwmac1000_dump_regs,
206 .host_irq_status = dwmac1000_irq_status, 219 .host_irq_status = dwmac1000_irq_status,
207 .set_filter = dwmac1000_set_filter, 220 .set_filter = dwmac1000_set_filter,
@@ -211,7 +224,7 @@ struct stmmac_ops dwmac1000_ops = {
211 .get_umac_addr = dwmac1000_get_umac_addr, 224 .get_umac_addr = dwmac1000_get_umac_addr,
212}; 225};
213 226
214struct mac_device_info *dwmac1000_setup(unsigned long ioaddr) 227struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
215{ 228{
216 struct mac_device_info *mac; 229 struct mac_device_info *mac;
217 u32 uid = readl(ioaddr + GMAC_VERSION); 230 u32 uid = readl(ioaddr + GMAC_VERSION);
@@ -226,7 +239,6 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
226 mac->mac = &dwmac1000_ops; 239 mac->mac = &dwmac1000_ops;
227 mac->dma = &dwmac1000_dma_ops; 240 mac->dma = &dwmac1000_dma_ops;
228 241
229 mac->pmt = PMT_SUPPORTED;
230 mac->link.port = GMAC_CONTROL_PS; 242 mac->link.port = GMAC_CONTROL_PS;
231 mac->link.duplex = GMAC_CONTROL_DM; 243 mac->link.duplex = GMAC_CONTROL_DM;
232 mac->link.speed = GMAC_CONTROL_FES; 244 mac->link.speed = GMAC_CONTROL_FES;
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 415805057cb0..ce6163e39cd5 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -29,14 +29,22 @@
29#include "dwmac1000.h" 29#include "dwmac1000.h"
30#include "dwmac_dma.h" 30#include "dwmac_dma.h"
31 31
32static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, 32static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
33 u32 dma_rx) 33 u32 dma_rx)
34{ 34{
35 u32 value = readl(ioaddr + DMA_BUS_MODE); 35 u32 value = readl(ioaddr + DMA_BUS_MODE);
36 int limit;
37
36 /* DMA SW reset */ 38 /* DMA SW reset */
37 value |= DMA_BUS_MODE_SFT_RESET; 39 value |= DMA_BUS_MODE_SFT_RESET;
38 writel(value, ioaddr + DMA_BUS_MODE); 40 writel(value, ioaddr + DMA_BUS_MODE);
39 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); 41 limit = 15000;
42 while (limit--) {
43 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
44 break;
45 }
46 if (limit < 0)
47 return -EBUSY;
40 48
41 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL | 49 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
42 ((pbl << DMA_BUS_MODE_PBL_SHIFT) | 50 ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
@@ -58,7 +66,7 @@ static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
58 return 0; 66 return 0;
59} 67}
60 68
61static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode, 69static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
62 int rxmode) 70 int rxmode)
63{ 71{
64 u32 csr6 = readl(ioaddr + DMA_CONTROL); 72 u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -111,12 +119,12 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
111 119
112/* Not yet implemented --- no RMON module */ 120/* Not yet implemented --- no RMON module */
113static void dwmac1000_dma_diagnostic_fr(void *data, 121static void dwmac1000_dma_diagnostic_fr(void *data,
114 struct stmmac_extra_stats *x, unsigned long ioaddr) 122 struct stmmac_extra_stats *x, void __iomem *ioaddr)
115{ 123{
116 return; 124 return;
117} 125}
118 126
119static void dwmac1000_dump_dma_regs(unsigned long ioaddr) 127static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
120{ 128{
121 int i; 129 int i;
122 pr_info(" DMA registers\n"); 130 pr_info(" DMA registers\n");
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
index 2fb165fa2ba0..94eeccf3a8a0 100644
--- a/drivers/net/stmmac/dwmac100_core.c
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -31,7 +31,7 @@
31#include <linux/crc32.h> 31#include <linux/crc32.h>
32#include "dwmac100.h" 32#include "dwmac100.h"
33 33
34static void dwmac100_core_init(unsigned long ioaddr) 34static void dwmac100_core_init(void __iomem *ioaddr)
35{ 35{
36 u32 value = readl(ioaddr + MAC_CONTROL); 36 u32 value = readl(ioaddr + MAC_CONTROL);
37 37
@@ -42,12 +42,17 @@ static void dwmac100_core_init(unsigned long ioaddr)
42#endif 42#endif
43} 43}
44 44
45static void dwmac100_dump_mac_regs(unsigned long ioaddr) 45static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
46{
47 return 0;
48}
49
50static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
46{ 51{
47 pr_info("\t----------------------------------------------\n" 52 pr_info("\t----------------------------------------------\n"
48 "\t DWMAC 100 CSR (base addr = 0x%8x)\n" 53 "\t DWMAC 100 CSR (base addr = 0x%p)\n"
49 "\t----------------------------------------------\n", 54 "\t----------------------------------------------\n",
50 (unsigned int)ioaddr); 55 ioaddr);
51 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, 56 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
52 readl(ioaddr + MAC_CONTROL)); 57 readl(ioaddr + MAC_CONTROL));
53 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, 58 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
@@ -77,18 +82,18 @@ static void dwmac100_dump_mac_regs(unsigned long ioaddr)
77 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK)); 82 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
78} 83}
79 84
80static void dwmac100_irq_status(unsigned long ioaddr) 85static void dwmac100_irq_status(void __iomem *ioaddr)
81{ 86{
82 return; 87 return;
83} 88}
84 89
85static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr, 90static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
86 unsigned int reg_n) 91 unsigned int reg_n)
87{ 92{
88 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); 93 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
89} 94}
90 95
91static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr, 96static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
92 unsigned int reg_n) 97 unsigned int reg_n)
93{ 98{
94 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); 99 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
@@ -96,7 +101,7 @@ static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
96 101
97static void dwmac100_set_filter(struct net_device *dev) 102static void dwmac100_set_filter(struct net_device *dev)
98{ 103{
99 unsigned long ioaddr = dev->base_addr; 104 void __iomem *ioaddr = (void __iomem *) dev->base_addr;
100 u32 value = readl(ioaddr + MAC_CONTROL); 105 u32 value = readl(ioaddr + MAC_CONTROL);
101 106
102 if (dev->flags & IFF_PROMISC) { 107 if (dev->flags & IFF_PROMISC) {
@@ -145,7 +150,7 @@ static void dwmac100_set_filter(struct net_device *dev)
145 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW)); 150 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
146} 151}
147 152
148static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex, 153static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
149 unsigned int fc, unsigned int pause_time) 154 unsigned int fc, unsigned int pause_time)
150{ 155{
151 unsigned int flow = MAC_FLOW_CTRL_ENABLE; 156 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -158,13 +163,14 @@ static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
158/* No PMT module supported for this Ethernet Controller. 163/* No PMT module supported for this Ethernet Controller.
159 * Tested on ST platforms only. 164 * Tested on ST platforms only.
160 */ 165 */
161static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode) 166static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
162{ 167{
163 return; 168 return;
164} 169}
165 170
166struct stmmac_ops dwmac100_ops = { 171struct stmmac_ops dwmac100_ops = {
167 .core_init = dwmac100_core_init, 172 .core_init = dwmac100_core_init,
173 .rx_coe = dwmac100_rx_coe_supported,
168 .dump_regs = dwmac100_dump_mac_regs, 174 .dump_regs = dwmac100_dump_mac_regs,
169 .host_irq_status = dwmac100_irq_status, 175 .host_irq_status = dwmac100_irq_status,
170 .set_filter = dwmac100_set_filter, 176 .set_filter = dwmac100_set_filter,
@@ -174,7 +180,7 @@ struct stmmac_ops dwmac100_ops = {
174 .get_umac_addr = dwmac100_get_umac_addr, 180 .get_umac_addr = dwmac100_get_umac_addr,
175}; 181};
176 182
177struct mac_device_info *dwmac100_setup(unsigned long ioaddr) 183struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
178{ 184{
179 struct mac_device_info *mac; 185 struct mac_device_info *mac;
180 186
@@ -187,7 +193,6 @@ struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
187 mac->mac = &dwmac100_ops; 193 mac->mac = &dwmac100_ops;
188 mac->dma = &dwmac100_dma_ops; 194 mac->dma = &dwmac100_dma_ops;
189 195
190 mac->pmt = PMT_NOT_SUPPORTED;
191 mac->link.port = MAC_CONTROL_PS; 196 mac->link.port = MAC_CONTROL_PS;
192 mac->link.duplex = MAC_CONTROL_F; 197 mac->link.duplex = MAC_CONTROL_F;
193 mac->link.speed = 0; 198 mac->link.speed = 0;
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
index 2fece7b72727..96aac93b789b 100644
--- a/drivers/net/stmmac/dwmac100_dma.c
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -31,14 +31,22 @@
31#include "dwmac100.h" 31#include "dwmac100.h"
32#include "dwmac_dma.h" 32#include "dwmac_dma.h"
33 33
34static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, 34static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
35 u32 dma_rx) 35 u32 dma_rx)
36{ 36{
37 u32 value = readl(ioaddr + DMA_BUS_MODE); 37 u32 value = readl(ioaddr + DMA_BUS_MODE);
38 int limit;
39
38 /* DMA SW reset */ 40 /* DMA SW reset */
39 value |= DMA_BUS_MODE_SFT_RESET; 41 value |= DMA_BUS_MODE_SFT_RESET;
40 writel(value, ioaddr + DMA_BUS_MODE); 42 writel(value, ioaddr + DMA_BUS_MODE);
41 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); 43 limit = 15000;
44 while (limit--) {
45 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
46 break;
47 }
48 if (limit < 0)
49 return -EBUSY;
42 50
43 /* Enable Application Access by writing to DMA CSR0 */ 51 /* Enable Application Access by writing to DMA CSR0 */
44 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), 52 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
@@ -58,7 +66,7 @@ static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
58/* Store and Forward capability is not used at all.. 66/* Store and Forward capability is not used at all..
59 * The transmit threshold can be programmed by 67 * The transmit threshold can be programmed by
60 * setting the TTC bits in the DMA control register.*/ 68 * setting the TTC bits in the DMA control register.*/
61static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode, 69static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
62 int rxmode) 70 int rxmode)
63{ 71{
64 u32 csr6 = readl(ioaddr + DMA_CONTROL); 72 u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -73,7 +81,7 @@ static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
73 writel(csr6, ioaddr + DMA_CONTROL); 81 writel(csr6, ioaddr + DMA_CONTROL);
74} 82}
75 83
76static void dwmac100_dump_dma_regs(unsigned long ioaddr) 84static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
77{ 85{
78 int i; 86 int i;
79 87
@@ -91,7 +99,7 @@ static void dwmac100_dump_dma_regs(unsigned long ioaddr)
91/* DMA controller has two counters to track the number of 99/* DMA controller has two counters to track the number of
92 * the receive missed frames. */ 100 * the receive missed frames. */
93static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, 101static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
94 unsigned long ioaddr) 102 void __iomem *ioaddr)
95{ 103{
96 struct net_device_stats *stats = (struct net_device_stats *)data; 104 struct net_device_stats *stats = (struct net_device_stats *)data;
97 u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); 105 u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
index 7b815a1b7b8c..da3f5ccf83d3 100644
--- a/drivers/net/stmmac/dwmac_dma.h
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -97,12 +97,12 @@
97#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ 97#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
98#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ 98#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
99 99
100extern void dwmac_enable_dma_transmission(unsigned long ioaddr); 100extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
101extern void dwmac_enable_dma_irq(unsigned long ioaddr); 101extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
102extern void dwmac_disable_dma_irq(unsigned long ioaddr); 102extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
103extern void dwmac_dma_start_tx(unsigned long ioaddr); 103extern void dwmac_dma_start_tx(void __iomem *ioaddr);
104extern void dwmac_dma_stop_tx(unsigned long ioaddr); 104extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
105extern void dwmac_dma_start_rx(unsigned long ioaddr); 105extern void dwmac_dma_start_rx(void __iomem *ioaddr);
106extern void dwmac_dma_stop_rx(unsigned long ioaddr); 106extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
107extern int dwmac_dma_interrupt(unsigned long ioaddr, 107extern int dwmac_dma_interrupt(void __iomem *ioaddr,
108 struct stmmac_extra_stats *x); 108 struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index a85415216ef4..d65fab1ba790 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -32,43 +32,43 @@
32#endif 32#endif
33 33
34/* CSR1 enables the transmit DMA to check for new descriptor */ 34/* CSR1 enables the transmit DMA to check for new descriptor */
35void dwmac_enable_dma_transmission(unsigned long ioaddr) 35void dwmac_enable_dma_transmission(void __iomem *ioaddr)
36{ 36{
37 writel(1, ioaddr + DMA_XMT_POLL_DEMAND); 37 writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
38} 38}
39 39
40void dwmac_enable_dma_irq(unsigned long ioaddr) 40void dwmac_enable_dma_irq(void __iomem *ioaddr)
41{ 41{
42 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); 42 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
43} 43}
44 44
45void dwmac_disable_dma_irq(unsigned long ioaddr) 45void dwmac_disable_dma_irq(void __iomem *ioaddr)
46{ 46{
47 writel(0, ioaddr + DMA_INTR_ENA); 47 writel(0, ioaddr + DMA_INTR_ENA);
48} 48}
49 49
50void dwmac_dma_start_tx(unsigned long ioaddr) 50void dwmac_dma_start_tx(void __iomem *ioaddr)
51{ 51{
52 u32 value = readl(ioaddr + DMA_CONTROL); 52 u32 value = readl(ioaddr + DMA_CONTROL);
53 value |= DMA_CONTROL_ST; 53 value |= DMA_CONTROL_ST;
54 writel(value, ioaddr + DMA_CONTROL); 54 writel(value, ioaddr + DMA_CONTROL);
55} 55}
56 56
57void dwmac_dma_stop_tx(unsigned long ioaddr) 57void dwmac_dma_stop_tx(void __iomem *ioaddr)
58{ 58{
59 u32 value = readl(ioaddr + DMA_CONTROL); 59 u32 value = readl(ioaddr + DMA_CONTROL);
60 value &= ~DMA_CONTROL_ST; 60 value &= ~DMA_CONTROL_ST;
61 writel(value, ioaddr + DMA_CONTROL); 61 writel(value, ioaddr + DMA_CONTROL);
62} 62}
63 63
64void dwmac_dma_start_rx(unsigned long ioaddr) 64void dwmac_dma_start_rx(void __iomem *ioaddr)
65{ 65{
66 u32 value = readl(ioaddr + DMA_CONTROL); 66 u32 value = readl(ioaddr + DMA_CONTROL);
67 value |= DMA_CONTROL_SR; 67 value |= DMA_CONTROL_SR;
68 writel(value, ioaddr + DMA_CONTROL); 68 writel(value, ioaddr + DMA_CONTROL);
69} 69}
70 70
71void dwmac_dma_stop_rx(unsigned long ioaddr) 71void dwmac_dma_stop_rx(void __iomem *ioaddr)
72{ 72{
73 u32 value = readl(ioaddr + DMA_CONTROL); 73 u32 value = readl(ioaddr + DMA_CONTROL);
74 value &= ~DMA_CONTROL_SR; 74 value &= ~DMA_CONTROL_SR;
@@ -145,7 +145,7 @@ static void show_rx_process_state(unsigned int status)
145} 145}
146#endif 146#endif
147 147
148int dwmac_dma_interrupt(unsigned long ioaddr, 148int dwmac_dma_interrupt(void __iomem *ioaddr,
149 struct stmmac_extra_stats *x) 149 struct stmmac_extra_stats *x)
150{ 150{
151 int ret = 0; 151 int ret = 0;
@@ -219,7 +219,7 @@ int dwmac_dma_interrupt(unsigned long ioaddr,
219 return ret; 219 return ret;
220} 220}
221 221
222void dwmac_dma_flush_tx_fifo(unsigned long ioaddr) 222void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
223{ 223{
224 u32 csr6 = readl(ioaddr + DMA_CONTROL); 224 u32 csr6 = readl(ioaddr + DMA_CONTROL);
225 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); 225 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -227,7 +227,7 @@ void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
227 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); 227 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
228} 228}
229 229
230void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], 230void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
231 unsigned int high, unsigned int low) 231 unsigned int high, unsigned int low)
232{ 232{
233 unsigned long data; 233 unsigned long data;
@@ -238,7 +238,7 @@ void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
238 writel(data, ioaddr + low); 238 writel(data, ioaddr + low);
239} 239}
240 240
241void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr, 241void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
242 unsigned int high, unsigned int low) 242 unsigned int high, unsigned int low)
243{ 243{
244 unsigned int hi_addr, lo_addr; 244 unsigned int hi_addr, lo_addr;
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
index f612f986a7e1..5d1471d8f8f6 100644
--- a/drivers/net/stmmac/enh_desc.c
+++ b/drivers/net/stmmac/enh_desc.c
@@ -25,7 +25,7 @@
25#include "common.h" 25#include "common.h"
26 26
27static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, 27static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
28 struct dma_desc *p, unsigned long ioaddr) 28 struct dma_desc *p, void __iomem *ioaddr)
29{ 29{
30 int ret = 0; 30 int ret = 0;
31 struct net_device_stats *stats = (struct net_device_stats *)data; 31 struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -284,7 +284,7 @@ static void enh_desc_release_tx_desc(struct dma_desc *p)
284{ 284{
285 int ter = p->des01.etx.end_ring; 285 int ter = p->des01.etx.end_ring;
286 286
287 memset(p, 0, sizeof(struct dma_desc)); 287 memset(p, 0, offsetof(struct dma_desc, des2));
288 p->des01.etx.end_ring = ter; 288 p->des01.etx.end_ring = ter;
289} 289}
290 290
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
index 31ad53643792..0dce90cb8124 100644
--- a/drivers/net/stmmac/norm_desc.c
+++ b/drivers/net/stmmac/norm_desc.c
@@ -25,7 +25,7 @@
25#include "common.h" 25#include "common.h"
26 26
27static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, 27static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
28 struct dma_desc *p, unsigned long ioaddr) 28 struct dma_desc *p, void __iomem *ioaddr)
29{ 29{
30 int ret = 0; 30 int ret = 0;
31 struct net_device_stats *stats = (struct net_device_stats *)data; 31 struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -174,22 +174,7 @@ static void ndesc_release_tx_desc(struct dma_desc *p)
174{ 174{
175 int ter = p->des01.tx.end_ring; 175 int ter = p->des01.tx.end_ring;
176 176
177 /* clean field used within the xmit */ 177 memset(p, 0, offsetof(struct dma_desc, des2));
178 p->des01.tx.first_segment = 0;
179 p->des01.tx.last_segment = 0;
180 p->des01.tx.buffer1_size = 0;
181
182 /* clean status reported */
183 p->des01.tx.error_summary = 0;
184 p->des01.tx.underflow_error = 0;
185 p->des01.tx.no_carrier = 0;
186 p->des01.tx.loss_carrier = 0;
187 p->des01.tx.excessive_deferral = 0;
188 p->des01.tx.excessive_collisions = 0;
189 p->des01.tx.late_collision = 0;
190 p->des01.tx.heartbeat_fail = 0;
191 p->des01.tx.deferred = 0;
192
193 /* set termination field */ 178 /* set termination field */
194 p->des01.tx.end_ring = ter; 179 p->des01.tx.end_ring = ter;
195} 180}
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index ebebc644b1b8..92154ff7d702 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -21,6 +21,7 @@
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Apr_2010" 23#define DRV_MODULE_VERSION "Apr_2010"
24#include <linux/platform_device.h>
24#include <linux/stmmac.h> 25#include <linux/stmmac.h>
25 26
26#include "common.h" 27#include "common.h"
@@ -50,10 +51,10 @@ struct stmmac_priv {
50 int is_gmac; 51 int is_gmac;
51 dma_addr_t dma_rx_phy; 52 dma_addr_t dma_rx_phy;
52 unsigned int dma_rx_size; 53 unsigned int dma_rx_size;
53 int rx_csum;
54 unsigned int dma_buf_sz; 54 unsigned int dma_buf_sz;
55 struct device *device; 55 struct device *device;
56 struct mac_device_info *hw; 56 struct mac_device_info *hw;
57 void __iomem *ioaddr;
57 58
58 struct stmmac_extra_stats xstats; 59 struct stmmac_extra_stats xstats;
59 struct napi_struct napi; 60 struct napi_struct napi;
@@ -65,7 +66,7 @@ struct stmmac_priv {
65 int phy_mask; 66 int phy_mask;
66 int (*phy_reset) (void *priv); 67 int (*phy_reset) (void *priv);
67 void (*fix_mac_speed) (void *priv, unsigned int speed); 68 void (*fix_mac_speed) (void *priv, unsigned int speed);
68 void (*bus_setup)(unsigned long ioaddr); 69 void (*bus_setup)(void __iomem *ioaddr);
69 void *bsp_priv; 70 void *bsp_priv;
70 71
71 int phy_irq; 72 int phy_irq;
@@ -76,6 +77,7 @@ struct stmmac_priv {
76 unsigned int flow_ctrl; 77 unsigned int flow_ctrl;
77 unsigned int pause; 78 unsigned int pause;
78 struct mii_bus *mii; 79 struct mii_bus *mii;
80 int mii_clk_csr;
79 81
80 u32 msg_enable; 82 u32 msg_enable;
81 spinlock_t lock; 83 spinlock_t lock;
@@ -89,6 +91,9 @@ struct stmmac_priv {
89 struct vlan_group *vlgrp; 91 struct vlan_group *vlgrp;
90#endif 92#endif
91 int enh_desc; 93 int enh_desc;
94 int rx_coe;
95 int bugged_jumbo;
96 int no_csum_insertion;
92}; 97};
93 98
94#ifdef CONFIG_STM_DRIVERS 99#ifdef CONFIG_STM_DRIVERS
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index f080509923f0..25a7e385f8ec 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -177,21 +177,21 @@ void stmmac_ethtool_gregs(struct net_device *dev,
177 if (!priv->is_gmac) { 177 if (!priv->is_gmac) {
178 /* MAC registers */ 178 /* MAC registers */
179 for (i = 0; i < 12; i++) 179 for (i = 0; i < 12; i++)
180 reg_space[i] = readl(dev->base_addr + (i * 4)); 180 reg_space[i] = readl(priv->ioaddr + (i * 4));
181 /* DMA registers */ 181 /* DMA registers */
182 for (i = 0; i < 9; i++) 182 for (i = 0; i < 9; i++)
183 reg_space[i + 12] = 183 reg_space[i + 12] =
184 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4))); 184 readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
185 reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR); 185 reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
186 reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR); 186 reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
187 } else { 187 } else {
188 /* MAC registers */ 188 /* MAC registers */
189 for (i = 0; i < 55; i++) 189 for (i = 0; i < 55; i++)
190 reg_space[i] = readl(dev->base_addr + (i * 4)); 190 reg_space[i] = readl(priv->ioaddr + (i * 4));
191 /* DMA registers */ 191 /* DMA registers */
192 for (i = 0; i < 22; i++) 192 for (i = 0; i < 22; i++)
193 reg_space[i + 55] = 193 reg_space[i + 55] =
194 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4))); 194 readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
195 } 195 }
196} 196}
197 197
@@ -209,7 +209,7 @@ u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
209{ 209{
210 struct stmmac_priv *priv = netdev_priv(dev); 210 struct stmmac_priv *priv = netdev_priv(dev);
211 211
212 return priv->rx_csum; 212 return priv->rx_coe;
213} 213}
214 214
215static void 215static void
@@ -263,11 +263,9 @@ stmmac_set_pauseparam(struct net_device *netdev,
263 cmd.phy_address = phy->addr; 263 cmd.phy_address = phy->addr;
264 ret = phy_ethtool_sset(phy, &cmd); 264 ret = phy_ethtool_sset(phy, &cmd);
265 } 265 }
266 } else { 266 } else
267 unsigned long ioaddr = netdev->base_addr; 267 priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
268 priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
269 priv->flow_ctrl, priv->pause); 268 priv->flow_ctrl, priv->pause);
270 }
271 spin_unlock(&priv->lock); 269 spin_unlock(&priv->lock);
272 return ret; 270 return ret;
273} 271}
@@ -276,12 +274,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
276 struct ethtool_stats *dummy, u64 *data) 274 struct ethtool_stats *dummy, u64 *data)
277{ 275{
278 struct stmmac_priv *priv = netdev_priv(dev); 276 struct stmmac_priv *priv = netdev_priv(dev);
279 unsigned long ioaddr = dev->base_addr;
280 int i; 277 int i;
281 278
282 /* Update HW stats if supported */ 279 /* Update HW stats if supported */
283 priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats, 280 priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
284 ioaddr); 281 priv->ioaddr);
285 282
286 for (i = 0; i < STMMAC_STATS_LEN; i++) { 283 for (i = 0; i < STMMAC_STATS_LEN; i++) {
287 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; 284 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -325,7 +322,7 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
325 struct stmmac_priv *priv = netdev_priv(dev); 322 struct stmmac_priv *priv = netdev_priv(dev);
326 323
327 spin_lock_irq(&priv->lock); 324 spin_lock_irq(&priv->lock);
328 if (priv->wolenabled == PMT_SUPPORTED) { 325 if (device_can_wakeup(priv->device)) {
329 wol->supported = WAKE_MAGIC; 326 wol->supported = WAKE_MAGIC;
330 wol->wolopts = priv->wolopts; 327 wol->wolopts = priv->wolopts;
331 } 328 }
@@ -337,16 +334,20 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
337 struct stmmac_priv *priv = netdev_priv(dev); 334 struct stmmac_priv *priv = netdev_priv(dev);
338 u32 support = WAKE_MAGIC; 335 u32 support = WAKE_MAGIC;
339 336
340 if (priv->wolenabled == PMT_NOT_SUPPORTED) 337 if (!device_can_wakeup(priv->device))
341 return -EINVAL; 338 return -EINVAL;
342 339
343 if (wol->wolopts & ~support) 340 if (wol->wolopts & ~support)
344 return -EINVAL; 341 return -EINVAL;
345 342
346 if (wol->wolopts == 0) 343 if (wol->wolopts) {
347 device_set_wakeup_enable(priv->device, 0); 344 pr_info("stmmac: wakeup enable\n");
348 else
349 device_set_wakeup_enable(priv->device, 1); 345 device_set_wakeup_enable(priv->device, 1);
346 enable_irq_wake(dev->irq);
347 } else {
348 device_set_wakeup_enable(priv->device, 0);
349 disable_irq_wake(dev->irq);
350 }
350 351
351 spin_lock_irq(&priv->lock); 352 spin_lock_irq(&priv->lock);
352 priv->wolopts = wol->wolopts; 353 priv->wolopts = wol->wolopts;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index ea0461eb2dbe..823b9e6431d5 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -134,13 +134,6 @@ static int buf_sz = DMA_BUFFER_SIZE;
134module_param(buf_sz, int, S_IRUGO | S_IWUSR); 134module_param(buf_sz, int, S_IRUGO | S_IWUSR);
135MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 135MODULE_PARM_DESC(buf_sz, "DMA buffer size");
136 136
137/* In case of Giga ETH, we can enable/disable the COE for the
138 * transmit HW checksum computation.
139 * Note that, if tx csum is off in HW, SG will be still supported. */
140static int tx_coe = HW_CSUM;
141module_param(tx_coe, int, S_IRUGO | S_IWUSR);
142MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]");
143
144static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 137static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
145 NETIF_MSG_LINK | NETIF_MSG_IFUP | 138 NETIF_MSG_LINK | NETIF_MSG_IFUP |
146 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 139 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
@@ -202,7 +195,6 @@ static void stmmac_adjust_link(struct net_device *dev)
202{ 195{
203 struct stmmac_priv *priv = netdev_priv(dev); 196 struct stmmac_priv *priv = netdev_priv(dev);
204 struct phy_device *phydev = priv->phydev; 197 struct phy_device *phydev = priv->phydev;
205 unsigned long ioaddr = dev->base_addr;
206 unsigned long flags; 198 unsigned long flags;
207 int new_state = 0; 199 int new_state = 0;
208 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; 200 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
@@ -215,7 +207,7 @@ static void stmmac_adjust_link(struct net_device *dev)
215 207
216 spin_lock_irqsave(&priv->lock, flags); 208 spin_lock_irqsave(&priv->lock, flags);
217 if (phydev->link) { 209 if (phydev->link) {
218 u32 ctrl = readl(ioaddr + MAC_CTRL_REG); 210 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
219 211
220 /* Now we make sure that we can be in full duplex mode. 212 /* Now we make sure that we can be in full duplex mode.
221 * If not, we operate in half-duplex mode. */ 213 * If not, we operate in half-duplex mode. */
@@ -229,7 +221,7 @@ static void stmmac_adjust_link(struct net_device *dev)
229 } 221 }
230 /* Flow Control operation */ 222 /* Flow Control operation */
231 if (phydev->pause) 223 if (phydev->pause)
232 priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex, 224 priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
233 fc, pause_time); 225 fc, pause_time);
234 226
235 if (phydev->speed != priv->speed) { 227 if (phydev->speed != priv->speed) {
@@ -238,6 +230,9 @@ static void stmmac_adjust_link(struct net_device *dev)
238 case 1000: 230 case 1000:
239 if (likely(priv->is_gmac)) 231 if (likely(priv->is_gmac))
240 ctrl &= ~priv->hw->link.port; 232 ctrl &= ~priv->hw->link.port;
233 if (likely(priv->fix_mac_speed))
234 priv->fix_mac_speed(priv->bsp_priv,
235 phydev->speed);
241 break; 236 break;
242 case 100: 237 case 100:
243 case 10: 238 case 10:
@@ -265,7 +260,7 @@ static void stmmac_adjust_link(struct net_device *dev)
265 priv->speed = phydev->speed; 260 priv->speed = phydev->speed;
266 } 261 }
267 262
268 writel(ctrl, ioaddr + MAC_CTRL_REG); 263 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
269 264
270 if (!priv->oldlink) { 265 if (!priv->oldlink) {
271 new_state = 1; 266 new_state = 1;
@@ -342,7 +337,7 @@ static int stmmac_init_phy(struct net_device *dev)
342 return 0; 337 return 0;
343} 338}
344 339
345static inline void stmmac_mac_enable_rx(unsigned long ioaddr) 340static inline void stmmac_mac_enable_rx(void __iomem *ioaddr)
346{ 341{
347 u32 value = readl(ioaddr + MAC_CTRL_REG); 342 u32 value = readl(ioaddr + MAC_CTRL_REG);
348 value |= MAC_RNABLE_RX; 343 value |= MAC_RNABLE_RX;
@@ -350,7 +345,7 @@ static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
350 writel(value, ioaddr + MAC_CTRL_REG); 345 writel(value, ioaddr + MAC_CTRL_REG);
351} 346}
352 347
353static inline void stmmac_mac_enable_tx(unsigned long ioaddr) 348static inline void stmmac_mac_enable_tx(void __iomem *ioaddr)
354{ 349{
355 u32 value = readl(ioaddr + MAC_CTRL_REG); 350 u32 value = readl(ioaddr + MAC_CTRL_REG);
356 value |= MAC_ENABLE_TX; 351 value |= MAC_ENABLE_TX;
@@ -358,14 +353,14 @@ static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
358 writel(value, ioaddr + MAC_CTRL_REG); 353 writel(value, ioaddr + MAC_CTRL_REG);
359} 354}
360 355
361static inline void stmmac_mac_disable_rx(unsigned long ioaddr) 356static inline void stmmac_mac_disable_rx(void __iomem *ioaddr)
362{ 357{
363 u32 value = readl(ioaddr + MAC_CTRL_REG); 358 u32 value = readl(ioaddr + MAC_CTRL_REG);
364 value &= ~MAC_RNABLE_RX; 359 value &= ~MAC_RNABLE_RX;
365 writel(value, ioaddr + MAC_CTRL_REG); 360 writel(value, ioaddr + MAC_CTRL_REG);
366} 361}
367 362
368static inline void stmmac_mac_disable_tx(unsigned long ioaddr) 363static inline void stmmac_mac_disable_tx(void __iomem *ioaddr)
369{ 364{
370 u32 value = readl(ioaddr + MAC_CTRL_REG); 365 u32 value = readl(ioaddr + MAC_CTRL_REG);
371 value &= ~MAC_ENABLE_TX; 366 value &= ~MAC_ENABLE_TX;
@@ -567,29 +562,22 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
567 * stmmac_dma_operation_mode - HW DMA operation mode 562 * stmmac_dma_operation_mode - HW DMA operation mode
568 * @priv : pointer to the private device structure. 563 * @priv : pointer to the private device structure.
569 * Description: it sets the DMA operation mode: tx/rx DMA thresholds 564 * Description: it sets the DMA operation mode: tx/rx DMA thresholds
570 * or Store-And-Forward capability. It also verifies the COE for the 565 * or Store-And-Forward capability.
571 * transmission in case of Giga ETH.
572 */ 566 */
573static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 567static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
574{ 568{
575 if (!priv->is_gmac) { 569 if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) {
576 /* MAC 10/100 */ 570 /* In case of GMAC, SF mode has to be enabled
577 priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0); 571 * to perform the TX COE. This depends on:
578 priv->tx_coe = NO_HW_CSUM; 572 * 1) TX COE if actually supported
579 } else { 573 * 2) There is no bugged Jumbo frame support
580 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) { 574 * that needs to not insert csum in the TDES.
581 priv->hw->dma->dma_mode(priv->dev->base_addr, 575 */
582 SF_DMA_MODE, SF_DMA_MODE); 576 priv->hw->dma->dma_mode(priv->ioaddr,
583 tc = SF_DMA_MODE; 577 SF_DMA_MODE, SF_DMA_MODE);
584 priv->tx_coe = HW_CSUM; 578 tc = SF_DMA_MODE;
585 } else { 579 } else
586 /* Checksum computation is performed in software. */ 580 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
587 priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
588 SF_DMA_MODE);
589 priv->tx_coe = NO_HW_CSUM;
590 }
591 }
592 tx_coe = priv->tx_coe;
593} 581}
594 582
595/** 583/**
@@ -600,7 +588,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
600static void stmmac_tx(struct stmmac_priv *priv) 588static void stmmac_tx(struct stmmac_priv *priv)
601{ 589{
602 unsigned int txsize = priv->dma_tx_size; 590 unsigned int txsize = priv->dma_tx_size;
603 unsigned long ioaddr = priv->dev->base_addr;
604 591
605 while (priv->dirty_tx != priv->cur_tx) { 592 while (priv->dirty_tx != priv->cur_tx) {
606 int last; 593 int last;
@@ -618,7 +605,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
618 int tx_error = 605 int tx_error =
619 priv->hw->desc->tx_status(&priv->dev->stats, 606 priv->hw->desc->tx_status(&priv->dev->stats,
620 &priv->xstats, p, 607 &priv->xstats, p,
621 ioaddr); 608 priv->ioaddr);
622 if (likely(tx_error == 0)) { 609 if (likely(tx_error == 0)) {
623 priv->dev->stats.tx_packets++; 610 priv->dev->stats.tx_packets++;
624 priv->xstats.tx_pkt_n++; 611 priv->xstats.tx_pkt_n++;
@@ -674,7 +661,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
674 priv->tm->timer_start(tmrate); 661 priv->tm->timer_start(tmrate);
675 else 662 else
676#endif 663#endif
677 priv->hw->dma->enable_dma_irq(priv->dev->base_addr); 664 priv->hw->dma->enable_dma_irq(priv->ioaddr);
678} 665}
679 666
680static inline void stmmac_disable_irq(struct stmmac_priv *priv) 667static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -684,7 +671,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
684 priv->tm->timer_stop(); 671 priv->tm->timer_stop();
685 else 672 else
686#endif 673#endif
687 priv->hw->dma->disable_dma_irq(priv->dev->base_addr); 674 priv->hw->dma->disable_dma_irq(priv->ioaddr);
688} 675}
689 676
690static int stmmac_has_work(struct stmmac_priv *priv) 677static int stmmac_has_work(struct stmmac_priv *priv)
@@ -739,14 +726,15 @@ static void stmmac_no_timer_stopped(void)
739 */ 726 */
740static void stmmac_tx_err(struct stmmac_priv *priv) 727static void stmmac_tx_err(struct stmmac_priv *priv)
741{ 728{
729
742 netif_stop_queue(priv->dev); 730 netif_stop_queue(priv->dev);
743 731
744 priv->hw->dma->stop_tx(priv->dev->base_addr); 732 priv->hw->dma->stop_tx(priv->ioaddr);
745 dma_free_tx_skbufs(priv); 733 dma_free_tx_skbufs(priv);
746 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 734 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
747 priv->dirty_tx = 0; 735 priv->dirty_tx = 0;
748 priv->cur_tx = 0; 736 priv->cur_tx = 0;
749 priv->hw->dma->start_tx(priv->dev->base_addr); 737 priv->hw->dma->start_tx(priv->ioaddr);
750 738
751 priv->dev->stats.tx_errors++; 739 priv->dev->stats.tx_errors++;
752 netif_wake_queue(priv->dev); 740 netif_wake_queue(priv->dev);
@@ -755,11 +743,9 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
755 743
756static void stmmac_dma_interrupt(struct stmmac_priv *priv) 744static void stmmac_dma_interrupt(struct stmmac_priv *priv)
757{ 745{
758 unsigned long ioaddr = priv->dev->base_addr;
759 int status; 746 int status;
760 747
761 status = priv->hw->dma->dma_interrupt(priv->dev->base_addr, 748 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
762 &priv->xstats);
763 if (likely(status == handle_tx_rx)) 749 if (likely(status == handle_tx_rx))
764 _stmmac_schedule(priv); 750 _stmmac_schedule(priv);
765 751
@@ -767,7 +753,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
767 /* Try to bump up the dma threshold on this failure */ 753 /* Try to bump up the dma threshold on this failure */
768 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 754 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
769 tc += 64; 755 tc += 64;
770 priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE); 756 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
771 priv->xstats.threshold = tc; 757 priv->xstats.threshold = tc;
772 } 758 }
773 stmmac_tx_err(priv); 759 stmmac_tx_err(priv);
@@ -787,7 +773,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
787static int stmmac_open(struct net_device *dev) 773static int stmmac_open(struct net_device *dev)
788{ 774{
789 struct stmmac_priv *priv = netdev_priv(dev); 775 struct stmmac_priv *priv = netdev_priv(dev);
790 unsigned long ioaddr = dev->base_addr;
791 int ret; 776 int ret;
792 777
793 /* Check that the MAC address is valid. If its not, refuse 778 /* Check that the MAC address is valid. If its not, refuse
@@ -843,7 +828,8 @@ static int stmmac_open(struct net_device *dev)
843 init_dma_desc_rings(dev); 828 init_dma_desc_rings(dev);
844 829
845 /* DMA initialization and SW reset */ 830 /* DMA initialization and SW reset */
846 if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy, 831 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl,
832 priv->dma_tx_phy,
847 priv->dma_rx_phy) < 0)) { 833 priv->dma_rx_phy) < 0)) {
848 834
849 pr_err("%s: DMA initialization failed\n", __func__); 835 pr_err("%s: DMA initialization failed\n", __func__);
@@ -851,22 +837,28 @@ static int stmmac_open(struct net_device *dev)
851 } 837 }
852 838
853 /* Copy the MAC addr into the HW */ 839 /* Copy the MAC addr into the HW */
854 priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0); 840 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
855 /* If required, perform hw setup of the bus. */ 841 /* If required, perform hw setup of the bus. */
856 if (priv->bus_setup) 842 if (priv->bus_setup)
857 priv->bus_setup(ioaddr); 843 priv->bus_setup(priv->ioaddr);
858 /* Initialize the MAC Core */ 844 /* Initialize the MAC Core */
859 priv->hw->mac->core_init(ioaddr); 845 priv->hw->mac->core_init(priv->ioaddr);
846
847 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
848 if (priv->rx_coe)
849 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
850 if (priv->tx_coe)
851 pr_info("\tTX Checksum insertion supported\n");
860 852
861 priv->shutdown = 0; 853 priv->shutdown = 0;
862 854
863 /* Initialise the MMC (if present) to disable all interrupts. */ 855 /* Initialise the MMC (if present) to disable all interrupts. */
864 writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK); 856 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
865 writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK); 857 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
866 858
867 /* Enable the MAC Rx/Tx */ 859 /* Enable the MAC Rx/Tx */
868 stmmac_mac_enable_rx(ioaddr); 860 stmmac_mac_enable_rx(priv->ioaddr);
869 stmmac_mac_enable_tx(ioaddr); 861 stmmac_mac_enable_tx(priv->ioaddr);
870 862
871 /* Set the HW DMA mode and the COE */ 863 /* Set the HW DMA mode and the COE */
872 stmmac_dma_operation_mode(priv); 864 stmmac_dma_operation_mode(priv);
@@ -877,16 +869,16 @@ static int stmmac_open(struct net_device *dev)
877 869
878 /* Start the ball rolling... */ 870 /* Start the ball rolling... */
879 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); 871 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
880 priv->hw->dma->start_tx(ioaddr); 872 priv->hw->dma->start_tx(priv->ioaddr);
881 priv->hw->dma->start_rx(ioaddr); 873 priv->hw->dma->start_rx(priv->ioaddr);
882 874
883#ifdef CONFIG_STMMAC_TIMER 875#ifdef CONFIG_STMMAC_TIMER
884 priv->tm->timer_start(tmrate); 876 priv->tm->timer_start(tmrate);
885#endif 877#endif
886 /* Dump DMA/MAC registers */ 878 /* Dump DMA/MAC registers */
887 if (netif_msg_hw(priv)) { 879 if (netif_msg_hw(priv)) {
888 priv->hw->mac->dump_regs(ioaddr); 880 priv->hw->mac->dump_regs(priv->ioaddr);
889 priv->hw->dma->dump_regs(ioaddr); 881 priv->hw->dma->dump_regs(priv->ioaddr);
890 } 882 }
891 883
892 if (priv->phydev) 884 if (priv->phydev)
@@ -930,15 +922,15 @@ static int stmmac_release(struct net_device *dev)
930 free_irq(dev->irq, dev); 922 free_irq(dev->irq, dev);
931 923
932 /* Stop TX/RX DMA and clear the descriptors */ 924 /* Stop TX/RX DMA and clear the descriptors */
933 priv->hw->dma->stop_tx(dev->base_addr); 925 priv->hw->dma->stop_tx(priv->ioaddr);
934 priv->hw->dma->stop_rx(dev->base_addr); 926 priv->hw->dma->stop_rx(priv->ioaddr);
935 927
936 /* Release and free the Rx/Tx resources */ 928 /* Release and free the Rx/Tx resources */
937 free_dma_desc_resources(priv); 929 free_dma_desc_resources(priv);
938 930
939 /* Disable the MAC core */ 931 /* Disable the MAC core */
940 stmmac_mac_disable_tx(dev->base_addr); 932 stmmac_mac_disable_tx(priv->ioaddr);
941 stmmac_mac_disable_rx(dev->base_addr); 933 stmmac_mac_disable_rx(priv->ioaddr);
942 934
943 netif_carrier_off(dev); 935 netif_carrier_off(dev);
944 936
@@ -1066,7 +1058,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1066 return stmmac_sw_tso(priv, skb); 1058 return stmmac_sw_tso(priv, skb);
1067 1059
1068 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) { 1060 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1069 if (likely(priv->tx_coe == NO_HW_CSUM)) 1061 if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion)))
1070 skb_checksum_help(skb); 1062 skb_checksum_help(skb);
1071 else 1063 else
1072 csum_insertion = 1; 1064 csum_insertion = 1;
@@ -1140,7 +1132,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1140 1132
1141 dev->stats.tx_bytes += skb->len; 1133 dev->stats.tx_bytes += skb->len;
1142 1134
1143 priv->hw->dma->enable_dma_transmission(dev->base_addr); 1135 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1144 1136
1145 return NETDEV_TX_OK; 1137 return NETDEV_TX_OK;
1146} 1138}
@@ -1256,7 +1248,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1256 1248
1257 if (unlikely(status == csum_none)) { 1249 if (unlikely(status == csum_none)) {
1258 /* always for the old mac 10/100 */ 1250 /* always for the old mac 10/100 */
1259 skb->ip_summed = CHECKSUM_NONE; 1251 skb_checksum_none_assert(skb);
1260 netif_receive_skb(skb); 1252 netif_receive_skb(skb);
1261 } else { 1253 } else {
1262 skb->ip_summed = CHECKSUM_UNNECESSARY; 1254 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1390,6 +1382,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1390 return -EINVAL; 1382 return -EINVAL;
1391 } 1383 }
1392 1384
1385 /* Some GMAC devices have a bugged Jumbo frame support that
1386 * needs to have the Tx COE disabled for oversized frames
1387 * (due to limited buffer sizes). In this case we disable
1388 * the TX csum insertionin the TDES and not use SF. */
1389 if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
1390 priv->no_csum_insertion = 1;
1391 else
1392 priv->no_csum_insertion = 0;
1393
1393 dev->mtu = new_mtu; 1394 dev->mtu = new_mtu;
1394 1395
1395 return 0; 1396 return 0;
@@ -1405,11 +1406,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1405 return IRQ_NONE; 1406 return IRQ_NONE;
1406 } 1407 }
1407 1408
1408 if (priv->is_gmac) { 1409 if (priv->is_gmac)
1409 unsigned long ioaddr = dev->base_addr;
1410 /* To handle GMAC own interrupts */ 1410 /* To handle GMAC own interrupts */
1411 priv->hw->mac->host_irq_status(ioaddr); 1411 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
1412 }
1413 1412
1414 stmmac_dma_interrupt(priv); 1413 stmmac_dma_interrupt(priv);
1415 1414
@@ -1512,9 +1511,6 @@ static int stmmac_probe(struct net_device *dev)
1512#endif 1511#endif
1513 priv->msg_enable = netif_msg_init(debug, default_msg_level); 1512 priv->msg_enable = netif_msg_init(debug, default_msg_level);
1514 1513
1515 if (priv->is_gmac)
1516 priv->rx_csum = 1;
1517
1518 if (flow_ctrl) 1514 if (flow_ctrl)
1519 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 1515 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
1520 1516
@@ -1522,7 +1518,8 @@ static int stmmac_probe(struct net_device *dev)
1522 netif_napi_add(dev, &priv->napi, stmmac_poll, 64); 1518 netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
1523 1519
1524 /* Get the MAC address */ 1520 /* Get the MAC address */
1525 priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0); 1521 priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
1522 dev->dev_addr, 0);
1526 1523
1527 if (!is_valid_ether_addr(dev->dev_addr)) 1524 if (!is_valid_ether_addr(dev->dev_addr))
1528 pr_warning("\tno valid MAC address;" 1525 pr_warning("\tno valid MAC address;"
@@ -1552,14 +1549,13 @@ static int stmmac_probe(struct net_device *dev)
1552static int stmmac_mac_device_setup(struct net_device *dev) 1549static int stmmac_mac_device_setup(struct net_device *dev)
1553{ 1550{
1554 struct stmmac_priv *priv = netdev_priv(dev); 1551 struct stmmac_priv *priv = netdev_priv(dev);
1555 unsigned long ioaddr = dev->base_addr;
1556 1552
1557 struct mac_device_info *device; 1553 struct mac_device_info *device;
1558 1554
1559 if (priv->is_gmac) 1555 if (priv->is_gmac)
1560 device = dwmac1000_setup(ioaddr); 1556 device = dwmac1000_setup(priv->ioaddr);
1561 else 1557 else
1562 device = dwmac100_setup(ioaddr); 1558 device = dwmac100_setup(priv->ioaddr);
1563 1559
1564 if (!device) 1560 if (!device)
1565 return -ENOMEM; 1561 return -ENOMEM;
@@ -1572,9 +1568,8 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1572 1568
1573 priv->hw = device; 1569 priv->hw = device;
1574 1570
1575 priv->wolenabled = priv->hw->pmt; /* PMT supported */ 1571 if (device_can_wakeup(priv->device))
1576 if (priv->wolenabled == PMT_SUPPORTED) 1572 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1577 priv->wolopts = WAKE_MAGIC; /* Magic Frame */
1578 1573
1579 return 0; 1574 return 0;
1580} 1575}
@@ -1653,7 +1648,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1653{ 1648{
1654 int ret = 0; 1649 int ret = 0;
1655 struct resource *res; 1650 struct resource *res;
1656 unsigned int *addr = NULL; 1651 void __iomem *addr = NULL;
1657 struct net_device *ndev = NULL; 1652 struct net_device *ndev = NULL;
1658 struct stmmac_priv *priv; 1653 struct stmmac_priv *priv;
1659 struct plat_stmmacenet_data *plat_dat; 1654 struct plat_stmmacenet_data *plat_dat;
@@ -1664,7 +1659,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1664 ret = -ENODEV; 1659 ret = -ENODEV;
1665 goto out; 1660 goto out;
1666 } 1661 }
1667 pr_info("done!\n"); 1662 pr_info("\tdone!\n");
1668 1663
1669 if (!request_mem_region(res->start, resource_size(res), 1664 if (!request_mem_region(res->start, resource_size(res),
1670 pdev->name)) { 1665 pdev->name)) {
@@ -1706,8 +1701,18 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1706 plat_dat = pdev->dev.platform_data; 1701 plat_dat = pdev->dev.platform_data;
1707 priv->bus_id = plat_dat->bus_id; 1702 priv->bus_id = plat_dat->bus_id;
1708 priv->pbl = plat_dat->pbl; /* TLI */ 1703 priv->pbl = plat_dat->pbl; /* TLI */
1704 priv->mii_clk_csr = plat_dat->clk_csr;
1705 priv->tx_coe = plat_dat->tx_coe;
1706 priv->bugged_jumbo = plat_dat->bugged_jumbo;
1709 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ 1707 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1710 priv->enh_desc = plat_dat->enh_desc; 1708 priv->enh_desc = plat_dat->enh_desc;
1709 priv->ioaddr = addr;
1710
1711 /* PMT module is not integrated in all the MAC devices. */
1712 if (plat_dat->pmt) {
1713 pr_info("\tPMT module supported\n");
1714 device_set_wakeup_capable(&pdev->dev, 1);
1715 }
1711 1716
1712 platform_set_drvdata(pdev, ndev); 1717 platform_set_drvdata(pdev, ndev);
1713 1718
@@ -1743,8 +1748,8 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1743 priv->bsp_priv = plat_dat->bsp_priv; 1748 priv->bsp_priv = plat_dat->bsp_priv;
1744 1749
1745 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" 1750 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
1746 "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name, 1751 "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
1747 pdev->id, ndev->irq, (unsigned int)addr); 1752 pdev->id, ndev->irq, addr);
1748 1753
1749 /* MDIO bus Registration */ 1754 /* MDIO bus Registration */
1750 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id); 1755 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
@@ -1779,11 +1784,11 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1779 1784
1780 pr_info("%s:\n\tremoving driver", __func__); 1785 pr_info("%s:\n\tremoving driver", __func__);
1781 1786
1782 priv->hw->dma->stop_rx(ndev->base_addr); 1787 priv->hw->dma->stop_rx(priv->ioaddr);
1783 priv->hw->dma->stop_tx(ndev->base_addr); 1788 priv->hw->dma->stop_tx(priv->ioaddr);
1784 1789
1785 stmmac_mac_disable_rx(ndev->base_addr); 1790 stmmac_mac_disable_rx(priv->ioaddr);
1786 stmmac_mac_disable_tx(ndev->base_addr); 1791 stmmac_mac_disable_tx(priv->ioaddr);
1787 1792
1788 netif_carrier_off(ndev); 1793 netif_carrier_off(ndev);
1789 1794
@@ -1792,7 +1797,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1792 platform_set_drvdata(pdev, NULL); 1797 platform_set_drvdata(pdev, NULL);
1793 unregister_netdev(ndev); 1798 unregister_netdev(ndev);
1794 1799
1795 iounmap((void *)ndev->base_addr); 1800 iounmap((void *)priv->ioaddr);
1796 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1801 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1797 release_mem_region(res->start, resource_size(res)); 1802 release_mem_region(res->start, resource_size(res));
1798 1803
@@ -1827,23 +1832,20 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
1827 napi_disable(&priv->napi); 1832 napi_disable(&priv->napi);
1828 1833
1829 /* Stop TX/RX DMA */ 1834 /* Stop TX/RX DMA */
1830 priv->hw->dma->stop_tx(dev->base_addr); 1835 priv->hw->dma->stop_tx(priv->ioaddr);
1831 priv->hw->dma->stop_rx(dev->base_addr); 1836 priv->hw->dma->stop_rx(priv->ioaddr);
1832 /* Clear the Rx/Tx descriptors */ 1837 /* Clear the Rx/Tx descriptors */
1833 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 1838 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
1834 dis_ic); 1839 dis_ic);
1835 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 1840 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
1836 1841
1837 stmmac_mac_disable_tx(dev->base_addr); 1842 stmmac_mac_disable_tx(priv->ioaddr);
1838 1843
1839 if (device_may_wakeup(&(pdev->dev))) { 1844 /* Enable Power down mode by programming the PMT regs */
1840 /* Enable Power down mode by programming the PMT regs */ 1845 if (device_can_wakeup(priv->device))
1841 if (priv->wolenabled == PMT_SUPPORTED) 1846 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1842 priv->hw->mac->pmt(dev->base_addr, 1847 else
1843 priv->wolopts); 1848 stmmac_mac_disable_rx(priv->ioaddr);
1844 } else {
1845 stmmac_mac_disable_rx(dev->base_addr);
1846 }
1847 } else { 1849 } else {
1848 priv->shutdown = 1; 1850 priv->shutdown = 1;
1849 /* Although this can appear slightly redundant it actually 1851 /* Although this can appear slightly redundant it actually
@@ -1860,7 +1862,6 @@ static int stmmac_resume(struct platform_device *pdev)
1860{ 1862{
1861 struct net_device *dev = platform_get_drvdata(pdev); 1863 struct net_device *dev = platform_get_drvdata(pdev);
1862 struct stmmac_priv *priv = netdev_priv(dev); 1864 struct stmmac_priv *priv = netdev_priv(dev);
1863 unsigned long ioaddr = dev->base_addr;
1864 1865
1865 if (!netif_running(dev)) 1866 if (!netif_running(dev))
1866 return 0; 1867 return 0;
@@ -1879,17 +1880,16 @@ static int stmmac_resume(struct platform_device *pdev)
1879 * is received. Anyway, it's better to manually clear 1880 * is received. Anyway, it's better to manually clear
1880 * this bit because it can generate problems while resuming 1881 * this bit because it can generate problems while resuming
1881 * from another devices (e.g. serial console). */ 1882 * from another devices (e.g. serial console). */
1882 if (device_may_wakeup(&(pdev->dev))) 1883 if (device_can_wakeup(priv->device))
1883 if (priv->wolenabled == PMT_SUPPORTED) 1884 priv->hw->mac->pmt(priv->ioaddr, 0);
1884 priv->hw->mac->pmt(dev->base_addr, 0);
1885 1885
1886 netif_device_attach(dev); 1886 netif_device_attach(dev);
1887 1887
1888 /* Enable the MAC and DMA */ 1888 /* Enable the MAC and DMA */
1889 stmmac_mac_enable_rx(ioaddr); 1889 stmmac_mac_enable_rx(priv->ioaddr);
1890 stmmac_mac_enable_tx(ioaddr); 1890 stmmac_mac_enable_tx(priv->ioaddr);
1891 priv->hw->dma->start_tx(ioaddr); 1891 priv->hw->dma->start_tx(priv->ioaddr);
1892 priv->hw->dma->start_rx(ioaddr); 1892 priv->hw->dma->start_rx(priv->ioaddr);
1893 1893
1894#ifdef CONFIG_STMMAC_TIMER 1894#ifdef CONFIG_STMMAC_TIMER
1895 priv->tm->timer_start(tmrate); 1895 priv->tm->timer_start(tmrate);
@@ -1968,8 +1968,6 @@ static int __init stmmac_cmdline_opt(char *str)
1968 strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz); 1968 strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
1969 else if (!strncmp(opt, "tc:", 3)) 1969 else if (!strncmp(opt, "tc:", 3))
1970 strict_strtoul(opt + 3, 0, (unsigned long *)&tc); 1970 strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
1971 else if (!strncmp(opt, "tx_coe:", 7))
1972 strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe);
1973 else if (!strncmp(opt, "watchdog:", 9)) 1971 else if (!strncmp(opt, "watchdog:", 9))
1974 strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog); 1972 strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
1975 else if (!strncmp(opt, "flow_ctrl:", 10)) 1973 else if (!strncmp(opt, "flow_ctrl:", 10))
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 40b2c7929719..d7441616357d 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -47,21 +47,20 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
47{ 47{
48 struct net_device *ndev = bus->priv; 48 struct net_device *ndev = bus->priv;
49 struct stmmac_priv *priv = netdev_priv(ndev); 49 struct stmmac_priv *priv = netdev_priv(ndev);
50 unsigned long ioaddr = ndev->base_addr;
51 unsigned int mii_address = priv->hw->mii.addr; 50 unsigned int mii_address = priv->hw->mii.addr;
52 unsigned int mii_data = priv->hw->mii.data; 51 unsigned int mii_data = priv->hw->mii.data;
53 52
54 int data; 53 int data;
55 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
56 ((phyreg << 6) & (0x000007C0))); 55 ((phyreg << 6) & (0x000007C0)));
57 regValue |= MII_BUSY; /* in case of GMAC */ 56 regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
58 57
59 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); 58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
60 writel(regValue, ioaddr + mii_address); 59 writel(regValue, priv->ioaddr + mii_address);
61 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); 60 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
62 61
63 /* Read the data from the MII data register */ 62 /* Read the data from the MII data register */
64 data = (int)readl(ioaddr + mii_data); 63 data = (int)readl(priv->ioaddr + mii_data);
65 64
66 return data; 65 return data;
67} 66}
@@ -79,7 +78,6 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
79{ 78{
80 struct net_device *ndev = bus->priv; 79 struct net_device *ndev = bus->priv;
81 struct stmmac_priv *priv = netdev_priv(ndev); 80 struct stmmac_priv *priv = netdev_priv(ndev);
82 unsigned long ioaddr = ndev->base_addr;
83 unsigned int mii_address = priv->hw->mii.addr; 81 unsigned int mii_address = priv->hw->mii.addr;
84 unsigned int mii_data = priv->hw->mii.data; 82 unsigned int mii_data = priv->hw->mii.data;
85 83
@@ -87,17 +85,18 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
87 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 85 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
88 | MII_WRITE; 86 | MII_WRITE;
89 87
90 value |= MII_BUSY; 88 value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
89
91 90
92 /* Wait until any existing MII operation is complete */ 91 /* Wait until any existing MII operation is complete */
93 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); 92 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
94 93
95 /* Set the MII address register to write */ 94 /* Set the MII address register to write */
96 writel(phydata, ioaddr + mii_data); 95 writel(phydata, priv->ioaddr + mii_data);
97 writel(value, ioaddr + mii_address); 96 writel(value, priv->ioaddr + mii_address);
98 97
99 /* Wait until any existing MII operation is complete */ 98 /* Wait until any existing MII operation is complete */
100 do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); 99 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
101 100
102 return 0; 101 return 0;
103} 102}
@@ -111,7 +110,6 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
111{ 110{
112 struct net_device *ndev = bus->priv; 111 struct net_device *ndev = bus->priv;
113 struct stmmac_priv *priv = netdev_priv(ndev); 112 struct stmmac_priv *priv = netdev_priv(ndev);
114 unsigned long ioaddr = ndev->base_addr;
115 unsigned int mii_address = priv->hw->mii.addr; 113 unsigned int mii_address = priv->hw->mii.addr;
116 114
117 if (priv->phy_reset) { 115 if (priv->phy_reset) {
@@ -123,7 +121,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
123 * It doesn't complete its reset until at least one clock cycle 121 * It doesn't complete its reset until at least one clock cycle
124 * on MDC, so perform a dummy mdio read. 122 * on MDC, so perform a dummy mdio read.
125 */ 123 */
126 writel(0, ioaddr + mii_address); 124 writel(0, priv->ioaddr + mii_address);
127 125
128 return 0; 126 return 0;
129} 127}
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 358c22f9acbe..7d9ec23aabf6 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -436,7 +436,7 @@ static int lance_open( struct net_device *dev )
436 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", 436 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
437 dev->name, i, DREG )); 437 dev->name, i, DREG ));
438 DREG = CSR0_STOP; 438 DREG = CSR0_STOP;
439 return( -EIO ); 439 return -EIO;
440 } 440 }
441 441
442 DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA; 442 DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA;
@@ -445,7 +445,7 @@ static int lance_open( struct net_device *dev )
445 445
446 DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG )); 446 DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
447 447
448 return( 0 ); 448 return 0;
449} 449}
450 450
451 451
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 618643e3ca3e..0a6a5ced3c1c 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -617,7 +617,7 @@ static void bigmac_begin_auto_negotiation(struct bigmac *bp)
617 bp->timer_ticks = 0; 617 bp->timer_ticks = 0;
618 bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; 618 bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10;
619 bp->bigmac_timer.data = (unsigned long) bp; 619 bp->bigmac_timer.data = (unsigned long) bp;
620 bp->bigmac_timer.function = &bigmac_timer; 620 bp->bigmac_timer.function = bigmac_timer;
621 add_timer(&bp->bigmac_timer); 621 add_timer(&bp->bigmac_timer);
622} 622}
623 623
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 2678588ea4b2..8b5aeca24d5d 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -96,6 +96,7 @@ static char *media[MAX_UNITS];
96#include <asm/io.h> 96#include <asm/io.h>
97#include <linux/delay.h> 97#include <linux/delay.h>
98#include <linux/spinlock.h> 98#include <linux/spinlock.h>
99#include <linux/dma-mapping.h>
99#ifndef _COMPAT_WITH_OLD_KERNEL 100#ifndef _COMPAT_WITH_OLD_KERNEL
100#include <linux/crc32.h> 101#include <linux/crc32.h>
101#include <linux/ethtool.h> 102#include <linux/ethtool.h>
@@ -523,13 +524,15 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
523 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); 524 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
524 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); 525 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
525 526
526 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 527 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
528 &ring_dma, GFP_KERNEL);
527 if (!ring_space) 529 if (!ring_space)
528 goto err_out_cleardev; 530 goto err_out_cleardev;
529 np->tx_ring = (struct netdev_desc *)ring_space; 531 np->tx_ring = (struct netdev_desc *)ring_space;
530 np->tx_ring_dma = ring_dma; 532 np->tx_ring_dma = ring_dma;
531 533
532 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 534 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
535 &ring_dma, GFP_KERNEL);
533 if (!ring_space) 536 if (!ring_space)
534 goto err_out_unmap_tx; 537 goto err_out_unmap_tx;
535 np->rx_ring = (struct netdev_desc *)ring_space; 538 np->rx_ring = (struct netdev_desc *)ring_space;
@@ -663,9 +666,11 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
663err_out_unregister: 666err_out_unregister:
664 unregister_netdev(dev); 667 unregister_netdev(dev);
665err_out_unmap_rx: 668err_out_unmap_rx:
666 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 669 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
670 np->rx_ring, np->rx_ring_dma);
667err_out_unmap_tx: 671err_out_unmap_tx:
668 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 672 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
673 np->tx_ring, np->tx_ring_dma);
669err_out_cleardev: 674err_out_cleardev:
670 pci_set_drvdata(pdev, NULL); 675 pci_set_drvdata(pdev, NULL);
671 pci_iounmap(pdev, ioaddr); 676 pci_iounmap(pdev, ioaddr);
@@ -874,7 +879,7 @@ static int netdev_open(struct net_device *dev)
874 init_timer(&np->timer); 879 init_timer(&np->timer);
875 np->timer.expires = jiffies + 3*HZ; 880 np->timer.expires = jiffies + 3*HZ;
876 np->timer.data = (unsigned long)dev; 881 np->timer.data = (unsigned long)dev;
877 np->timer.function = &netdev_timer; /* timer handler */ 882 np->timer.function = netdev_timer; /* timer handler */
878 add_timer(&np->timer); 883 add_timer(&np->timer);
879 884
880 /* Enable interrupts by setting the interrupt mask. */ 885 /* Enable interrupts by setting the interrupt mask. */
@@ -1011,8 +1016,14 @@ static void init_ring(struct net_device *dev)
1011 skb->dev = dev; /* Mark as being used by this device. */ 1016 skb->dev = dev; /* Mark as being used by this device. */
1012 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1017 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1013 np->rx_ring[i].frag[0].addr = cpu_to_le32( 1018 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1014 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, 1019 dma_map_single(&np->pci_dev->dev, skb->data,
1015 PCI_DMA_FROMDEVICE)); 1020 np->rx_buf_sz, DMA_FROM_DEVICE));
1021 if (dma_mapping_error(&np->pci_dev->dev,
1022 np->rx_ring[i].frag[0].addr)) {
1023 dev_kfree_skb(skb);
1024 np->rx_skbuff[i] = NULL;
1025 break;
1026 }
1016 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); 1027 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1017 } 1028 }
1018 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 1029 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1063,9 +1074,11 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
1063 1074
1064 txdesc->next_desc = 0; 1075 txdesc->next_desc = 0;
1065 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); 1076 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1066 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data, 1077 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1067 skb->len, 1078 skb->data, skb->len, DMA_TO_DEVICE));
1068 PCI_DMA_TODEVICE)); 1079 if (dma_mapping_error(&np->pci_dev->dev,
1080 txdesc->frag[0].addr))
1081 goto drop_frame;
1069 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); 1082 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1070 1083
1071 /* Increment cur_tx before tasklet_schedule() */ 1084 /* Increment cur_tx before tasklet_schedule() */
@@ -1087,6 +1100,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
1087 dev->name, np->cur_tx, entry); 1100 dev->name, np->cur_tx, entry);
1088 } 1101 }
1089 return NETDEV_TX_OK; 1102 return NETDEV_TX_OK;
1103
1104drop_frame:
1105 dev_kfree_skb(skb);
1106 np->tx_skbuff[entry] = NULL;
1107 dev->stats.tx_dropped++;
1108 return NETDEV_TX_OK;
1090} 1109}
1091 1110
1092/* Reset hardware tx and free all of tx buffers */ 1111/* Reset hardware tx and free all of tx buffers */
@@ -1097,7 +1116,6 @@ reset_tx (struct net_device *dev)
1097 void __iomem *ioaddr = np->base; 1116 void __iomem *ioaddr = np->base;
1098 struct sk_buff *skb; 1117 struct sk_buff *skb;
1099 int i; 1118 int i;
1100 int irq = in_interrupt();
1101 1119
1102 /* Reset tx logic, TxListPtr will be cleaned */ 1120 /* Reset tx logic, TxListPtr will be cleaned */
1103 iowrite16 (TxDisable, ioaddr + MACCtrl1); 1121 iowrite16 (TxDisable, ioaddr + MACCtrl1);
@@ -1109,13 +1127,10 @@ reset_tx (struct net_device *dev)
1109 1127
1110 skb = np->tx_skbuff[i]; 1128 skb = np->tx_skbuff[i];
1111 if (skb) { 1129 if (skb) {
1112 pci_unmap_single(np->pci_dev, 1130 dma_unmap_single(&np->pci_dev->dev,
1113 le32_to_cpu(np->tx_ring[i].frag[0].addr), 1131 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1114 skb->len, PCI_DMA_TODEVICE); 1132 skb->len, DMA_TO_DEVICE);
1115 if (irq) 1133 dev_kfree_skb_any(skb);
1116 dev_kfree_skb_irq (skb);
1117 else
1118 dev_kfree_skb (skb);
1119 np->tx_skbuff[i] = NULL; 1134 np->tx_skbuff[i] = NULL;
1120 dev->stats.tx_dropped++; 1135 dev->stats.tx_dropped++;
1121 } 1136 }
@@ -1233,9 +1248,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1233 break; 1248 break;
1234 skb = np->tx_skbuff[entry]; 1249 skb = np->tx_skbuff[entry];
1235 /* Free the original skb. */ 1250 /* Free the original skb. */
1236 pci_unmap_single(np->pci_dev, 1251 dma_unmap_single(&np->pci_dev->dev,
1237 le32_to_cpu(np->tx_ring[entry].frag[0].addr), 1252 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1238 skb->len, PCI_DMA_TODEVICE); 1253 skb->len, DMA_TO_DEVICE);
1239 dev_kfree_skb_irq (np->tx_skbuff[entry]); 1254 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1240 np->tx_skbuff[entry] = NULL; 1255 np->tx_skbuff[entry] = NULL;
1241 np->tx_ring[entry].frag[0].addr = 0; 1256 np->tx_ring[entry].frag[0].addr = 0;
@@ -1252,9 +1267,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1252 break; 1267 break;
1253 skb = np->tx_skbuff[entry]; 1268 skb = np->tx_skbuff[entry];
1254 /* Free the original skb. */ 1269 /* Free the original skb. */
1255 pci_unmap_single(np->pci_dev, 1270 dma_unmap_single(&np->pci_dev->dev,
1256 le32_to_cpu(np->tx_ring[entry].frag[0].addr), 1271 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1257 skb->len, PCI_DMA_TODEVICE); 1272 skb->len, DMA_TO_DEVICE);
1258 dev_kfree_skb_irq (np->tx_skbuff[entry]); 1273 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1259 np->tx_skbuff[entry] = NULL; 1274 np->tx_skbuff[entry] = NULL;
1260 np->tx_ring[entry].frag[0].addr = 0; 1275 np->tx_ring[entry].frag[0].addr = 0;
@@ -1334,22 +1349,18 @@ static void rx_poll(unsigned long data)
1334 if (pkt_len < rx_copybreak && 1349 if (pkt_len < rx_copybreak &&
1335 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1350 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1336 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1351 skb_reserve(skb, 2); /* 16 byte align the IP header */
1337 pci_dma_sync_single_for_cpu(np->pci_dev, 1352 dma_sync_single_for_cpu(&np->pci_dev->dev,
1338 le32_to_cpu(desc->frag[0].addr), 1353 le32_to_cpu(desc->frag[0].addr),
1339 np->rx_buf_sz, 1354 np->rx_buf_sz, DMA_FROM_DEVICE);
1340 PCI_DMA_FROMDEVICE);
1341
1342 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); 1355 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1343 pci_dma_sync_single_for_device(np->pci_dev, 1356 dma_sync_single_for_device(&np->pci_dev->dev,
1344 le32_to_cpu(desc->frag[0].addr), 1357 le32_to_cpu(desc->frag[0].addr),
1345 np->rx_buf_sz, 1358 np->rx_buf_sz, DMA_FROM_DEVICE);
1346 PCI_DMA_FROMDEVICE);
1347 skb_put(skb, pkt_len); 1359 skb_put(skb, pkt_len);
1348 } else { 1360 } else {
1349 pci_unmap_single(np->pci_dev, 1361 dma_unmap_single(&np->pci_dev->dev,
1350 le32_to_cpu(desc->frag[0].addr), 1362 le32_to_cpu(desc->frag[0].addr),
1351 np->rx_buf_sz, 1363 np->rx_buf_sz, DMA_FROM_DEVICE);
1352 PCI_DMA_FROMDEVICE);
1353 skb_put(skb = np->rx_skbuff[entry], pkt_len); 1364 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1354 np->rx_skbuff[entry] = NULL; 1365 np->rx_skbuff[entry] = NULL;
1355 } 1366 }
@@ -1396,8 +1407,14 @@ static void refill_rx (struct net_device *dev)
1396 skb->dev = dev; /* Mark as being used by this device. */ 1407 skb->dev = dev; /* Mark as being used by this device. */
1397 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1408 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1398 np->rx_ring[entry].frag[0].addr = cpu_to_le32( 1409 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1399 pci_map_single(np->pci_dev, skb->data, 1410 dma_map_single(&np->pci_dev->dev, skb->data,
1400 np->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1411 np->rx_buf_sz, DMA_FROM_DEVICE));
1412 if (dma_mapping_error(&np->pci_dev->dev,
1413 np->rx_ring[entry].frag[0].addr)) {
1414 dev_kfree_skb_irq(skb);
1415 np->rx_skbuff[entry] = NULL;
1416 break;
1417 }
1401 } 1418 }
1402 /* Perhaps we need not reset this field. */ 1419 /* Perhaps we need not reset this field. */
1403 np->rx_ring[entry].frag[0].length = 1420 np->rx_ring[entry].frag[0].length =
@@ -1715,9 +1732,9 @@ static int netdev_close(struct net_device *dev)
1715 np->rx_ring[i].status = 0; 1732 np->rx_ring[i].status = 0;
1716 skb = np->rx_skbuff[i]; 1733 skb = np->rx_skbuff[i];
1717 if (skb) { 1734 if (skb) {
1718 pci_unmap_single(np->pci_dev, 1735 dma_unmap_single(&np->pci_dev->dev,
1719 le32_to_cpu(np->rx_ring[i].frag[0].addr), 1736 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1720 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1737 np->rx_buf_sz, DMA_FROM_DEVICE);
1721 dev_kfree_skb(skb); 1738 dev_kfree_skb(skb);
1722 np->rx_skbuff[i] = NULL; 1739 np->rx_skbuff[i] = NULL;
1723 } 1740 }
@@ -1727,9 +1744,9 @@ static int netdev_close(struct net_device *dev)
1727 np->tx_ring[i].next_desc = 0; 1744 np->tx_ring[i].next_desc = 0;
1728 skb = np->tx_skbuff[i]; 1745 skb = np->tx_skbuff[i];
1729 if (skb) { 1746 if (skb) {
1730 pci_unmap_single(np->pci_dev, 1747 dma_unmap_single(&np->pci_dev->dev,
1731 le32_to_cpu(np->tx_ring[i].frag[0].addr), 1748 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1732 skb->len, PCI_DMA_TODEVICE); 1749 skb->len, DMA_TO_DEVICE);
1733 dev_kfree_skb(skb); 1750 dev_kfree_skb(skb);
1734 np->tx_skbuff[i] = NULL; 1751 np->tx_skbuff[i] = NULL;
1735 } 1752 }
@@ -1743,25 +1760,72 @@ static void __devexit sundance_remove1 (struct pci_dev *pdev)
1743 struct net_device *dev = pci_get_drvdata(pdev); 1760 struct net_device *dev = pci_get_drvdata(pdev);
1744 1761
1745 if (dev) { 1762 if (dev) {
1746 struct netdev_private *np = netdev_priv(dev); 1763 struct netdev_private *np = netdev_priv(dev);
1747 1764 unregister_netdev(dev);
1748 unregister_netdev(dev); 1765 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1749 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, 1766 np->rx_ring, np->rx_ring_dma);
1750 np->rx_ring_dma); 1767 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1751 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, 1768 np->tx_ring, np->tx_ring_dma);
1752 np->tx_ring_dma); 1769 pci_iounmap(pdev, np->base);
1753 pci_iounmap(pdev, np->base); 1770 pci_release_regions(pdev);
1754 pci_release_regions(pdev); 1771 free_netdev(dev);
1755 free_netdev(dev); 1772 pci_set_drvdata(pdev, NULL);
1756 pci_set_drvdata(pdev, NULL);
1757 } 1773 }
1758} 1774}
1759 1775
1776#ifdef CONFIG_PM
1777
1778static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1779{
1780 struct net_device *dev = pci_get_drvdata(pci_dev);
1781
1782 if (!netif_running(dev))
1783 return 0;
1784
1785 netdev_close(dev);
1786 netif_device_detach(dev);
1787
1788 pci_save_state(pci_dev);
1789 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1790
1791 return 0;
1792}
1793
1794static int sundance_resume(struct pci_dev *pci_dev)
1795{
1796 struct net_device *dev = pci_get_drvdata(pci_dev);
1797 int err = 0;
1798
1799 if (!netif_running(dev))
1800 return 0;
1801
1802 pci_set_power_state(pci_dev, PCI_D0);
1803 pci_restore_state(pci_dev);
1804
1805 err = netdev_open(dev);
1806 if (err) {
1807 printk(KERN_ERR "%s: Can't resume interface!\n",
1808 dev->name);
1809 goto out;
1810 }
1811
1812 netif_device_attach(dev);
1813
1814out:
1815 return err;
1816}
1817
1818#endif /* CONFIG_PM */
1819
1760static struct pci_driver sundance_driver = { 1820static struct pci_driver sundance_driver = {
1761 .name = DRV_NAME, 1821 .name = DRV_NAME,
1762 .id_table = sundance_pci_tbl, 1822 .id_table = sundance_pci_tbl,
1763 .probe = sundance_probe1, 1823 .probe = sundance_probe1,
1764 .remove = __devexit_p(sundance_remove1), 1824 .remove = __devexit_p(sundance_remove1),
1825#ifdef CONFIG_PM
1826 .suspend = sundance_suspend,
1827 .resume = sundance_resume,
1828#endif /* CONFIG_PM */
1765}; 1829};
1766 1830
1767static int __init sundance_init(void) 1831static int __init sundance_init(void)
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 434f9d735333..4ceb3cf6a9a9 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -31,6 +31,8 @@
31 * about when we can start taking interrupts or get xmit() called... 31 * about when we can start taking interrupts or get xmit() called...
32 */ 32 */
33 33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
34#include <linux/module.h> 36#include <linux/module.h>
35#include <linux/kernel.h> 37#include <linux/kernel.h>
36#include <linux/types.h> 38#include <linux/types.h>
@@ -105,7 +107,6 @@ MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
105MODULE_LICENSE("GPL"); 107MODULE_LICENSE("GPL");
106 108
107#define GEM_MODULE_NAME "gem" 109#define GEM_MODULE_NAME "gem"
108#define PFX GEM_MODULE_NAME ": "
109 110
110static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = { 111static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
111 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, 112 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
@@ -262,8 +263,7 @@ static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
262 gp->dev->name, pcs_istat); 263 gp->dev->name, pcs_istat);
263 264
264 if (!(pcs_istat & PCS_ISTAT_LSC)) { 265 if (!(pcs_istat & PCS_ISTAT_LSC)) {
265 printk(KERN_ERR "%s: PCS irq but no link status change???\n", 266 netdev_err(dev, "PCS irq but no link status change???\n");
266 dev->name);
267 return 0; 267 return 0;
268 } 268 }
269 269
@@ -282,20 +282,16 @@ static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
282 * when autoneg has completed. 282 * when autoneg has completed.
283 */ 283 */
284 if (pcs_miistat & PCS_MIISTAT_RF) 284 if (pcs_miistat & PCS_MIISTAT_RF)
285 printk(KERN_INFO "%s: PCS AutoNEG complete, " 285 netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
286 "RemoteFault\n", dev->name);
287 else 286 else
288 printk(KERN_INFO "%s: PCS AutoNEG complete.\n", 287 netdev_info(dev, "PCS AutoNEG complete\n");
289 dev->name);
290 } 288 }
291 289
292 if (pcs_miistat & PCS_MIISTAT_LS) { 290 if (pcs_miistat & PCS_MIISTAT_LS) {
293 printk(KERN_INFO "%s: PCS link is now up.\n", 291 netdev_info(dev, "PCS link is now up\n");
294 dev->name);
295 netif_carrier_on(gp->dev); 292 netif_carrier_on(gp->dev);
296 } else { 293 } else {
297 printk(KERN_INFO "%s: PCS link is now down.\n", 294 netdev_info(dev, "PCS link is now down\n");
298 dev->name);
299 netif_carrier_off(gp->dev); 295 netif_carrier_off(gp->dev);
300 /* If this happens and the link timer is not running, 296 /* If this happens and the link timer is not running,
301 * reset so we re-negotiate. 297 * reset so we re-negotiate.
@@ -323,14 +319,12 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
323 return 0; 319 return 0;
324 320
325 if (txmac_stat & MAC_TXSTAT_URUN) { 321 if (txmac_stat & MAC_TXSTAT_URUN) {
326 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 322 netdev_err(dev, "TX MAC xmit underrun\n");
327 dev->name);
328 gp->net_stats.tx_fifo_errors++; 323 gp->net_stats.tx_fifo_errors++;
329 } 324 }
330 325
331 if (txmac_stat & MAC_TXSTAT_MPE) { 326 if (txmac_stat & MAC_TXSTAT_MPE) {
332 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 327 netdev_err(dev, "TX MAC max packet size error\n");
333 dev->name);
334 gp->net_stats.tx_errors++; 328 gp->net_stats.tx_errors++;
335 } 329 }
336 330
@@ -377,8 +371,7 @@ static int gem_rxmac_reset(struct gem *gp)
377 udelay(10); 371 udelay(10);
378 } 372 }
379 if (limit == 5000) { 373 if (limit == 5000) {
380 printk(KERN_ERR "%s: RX MAC will not reset, resetting whole " 374 netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
381 "chip.\n", dev->name);
382 return 1; 375 return 1;
383 } 376 }
384 377
@@ -390,8 +383,7 @@ static int gem_rxmac_reset(struct gem *gp)
390 udelay(10); 383 udelay(10);
391 } 384 }
392 if (limit == 5000) { 385 if (limit == 5000) {
393 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 386 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
394 "chip.\n", dev->name);
395 return 1; 387 return 1;
396 } 388 }
397 389
@@ -403,8 +395,7 @@ static int gem_rxmac_reset(struct gem *gp)
403 udelay(10); 395 udelay(10);
404 } 396 }
405 if (limit == 5000) { 397 if (limit == 5000) {
406 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 398 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
407 "chip.\n", dev->name);
408 return 1; 399 return 1;
409 } 400 }
410 401
@@ -419,8 +410,7 @@ static int gem_rxmac_reset(struct gem *gp)
419 udelay(10); 410 udelay(10);
420 } 411 }
421 if (limit == 5000) { 412 if (limit == 5000) {
422 printk(KERN_ERR "%s: RX reset command will not execute, resetting " 413 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
423 "whole chip.\n", dev->name);
424 return 1; 414 return 1;
425 } 415 }
426 416
@@ -429,8 +419,7 @@ static int gem_rxmac_reset(struct gem *gp)
429 struct gem_rxd *rxd = &gp->init_block->rxd[i]; 419 struct gem_rxd *rxd = &gp->init_block->rxd[i];
430 420
431 if (gp->rx_skbs[i] == NULL) { 421 if (gp->rx_skbs[i] == NULL) {
432 printk(KERN_ERR "%s: Parts of RX ring empty, resetting " 422 netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
433 "whole chip.\n", dev->name);
434 return 1; 423 return 1;
435 } 424 }
436 425
@@ -479,8 +468,7 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
479 if (rxmac_stat & MAC_RXSTAT_OFLW) { 468 if (rxmac_stat & MAC_RXSTAT_OFLW) {
480 u32 smac = readl(gp->regs + MAC_SMACHINE); 469 u32 smac = readl(gp->regs + MAC_SMACHINE);
481 470
482 printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n", 471 netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
483 dev->name, smac);
484 gp->net_stats.rx_over_errors++; 472 gp->net_stats.rx_over_errors++;
485 gp->net_stats.rx_fifo_errors++; 473 gp->net_stats.rx_fifo_errors++;
486 474
@@ -542,19 +530,18 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
542 530
543 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && 531 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
544 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { 532 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
545 printk(KERN_ERR "%s: PCI error [%04x] ", 533 netdev_err(dev, "PCI error [%04x]", pci_estat);
546 dev->name, pci_estat);
547 534
548 if (pci_estat & GREG_PCIESTAT_BADACK) 535 if (pci_estat & GREG_PCIESTAT_BADACK)
549 printk("<No ACK64# during ABS64 cycle> "); 536 pr_cont(" <No ACK64# during ABS64 cycle>");
550 if (pci_estat & GREG_PCIESTAT_DTRTO) 537 if (pci_estat & GREG_PCIESTAT_DTRTO)
551 printk("<Delayed transaction timeout> "); 538 pr_cont(" <Delayed transaction timeout>");
552 if (pci_estat & GREG_PCIESTAT_OTHER) 539 if (pci_estat & GREG_PCIESTAT_OTHER)
553 printk("<other>"); 540 pr_cont(" <other>");
554 printk("\n"); 541 pr_cont("\n");
555 } else { 542 } else {
556 pci_estat |= GREG_PCIESTAT_OTHER; 543 pci_estat |= GREG_PCIESTAT_OTHER;
557 printk(KERN_ERR "%s: PCI error\n", dev->name); 544 netdev_err(dev, "PCI error\n");
558 } 545 }
559 546
560 if (pci_estat & GREG_PCIESTAT_OTHER) { 547 if (pci_estat & GREG_PCIESTAT_OTHER) {
@@ -565,26 +552,20 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta
565 */ 552 */
566 pci_read_config_word(gp->pdev, PCI_STATUS, 553 pci_read_config_word(gp->pdev, PCI_STATUS,
567 &pci_cfg_stat); 554 &pci_cfg_stat);
568 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 555 netdev_err(dev, "Read PCI cfg space status [%04x]\n",
569 dev->name, pci_cfg_stat); 556 pci_cfg_stat);
570 if (pci_cfg_stat & PCI_STATUS_PARITY) 557 if (pci_cfg_stat & PCI_STATUS_PARITY)
571 printk(KERN_ERR "%s: PCI parity error detected.\n", 558 netdev_err(dev, "PCI parity error detected\n");
572 dev->name);
573 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) 559 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
574 printk(KERN_ERR "%s: PCI target abort.\n", 560 netdev_err(dev, "PCI target abort\n");
575 dev->name);
576 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) 561 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
577 printk(KERN_ERR "%s: PCI master acks target abort.\n", 562 netdev_err(dev, "PCI master acks target abort\n");
578 dev->name);
579 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) 563 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
580 printk(KERN_ERR "%s: PCI master abort.\n", 564 netdev_err(dev, "PCI master abort\n");
581 dev->name);
582 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) 565 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
583 printk(KERN_ERR "%s: PCI system error SERR#.\n", 566 netdev_err(dev, "PCI system error SERR#\n");
584 dev->name);
585 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) 567 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
586 printk(KERN_ERR "%s: PCI parity error.\n", 568 netdev_err(dev, "PCI parity error\n");
587 dev->name);
588 569
589 /* Write the error bits back to clear them. */ 570 /* Write the error bits back to clear them. */
590 pci_cfg_stat &= (PCI_STATUS_PARITY | 571 pci_cfg_stat &= (PCI_STATUS_PARITY |
@@ -874,8 +855,7 @@ static int gem_rx(struct gem *gp, int work_to_do)
874 gp->rx_new = entry; 855 gp->rx_new = entry;
875 856
876 if (drops) 857 if (drops)
877 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 858 netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
878 gp->dev->name);
879 859
880 return work_done; 860 return work_done;
881} 861}
@@ -981,21 +961,19 @@ static void gem_tx_timeout(struct net_device *dev)
981{ 961{
982 struct gem *gp = netdev_priv(dev); 962 struct gem *gp = netdev_priv(dev);
983 963
984 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 964 netdev_err(dev, "transmit timed out, resetting\n");
985 if (!gp->running) { 965 if (!gp->running) {
986 printk("%s: hrm.. hw not running !\n", dev->name); 966 netdev_err(dev, "hrm.. hw not running !\n");
987 return; 967 return;
988 } 968 }
989 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n", 969 netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
990 dev->name, 970 readl(gp->regs + TXDMA_CFG),
991 readl(gp->regs + TXDMA_CFG), 971 readl(gp->regs + MAC_TXSTAT),
992 readl(gp->regs + MAC_TXSTAT), 972 readl(gp->regs + MAC_TXCFG));
993 readl(gp->regs + MAC_TXCFG)); 973 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
994 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 974 readl(gp->regs + RXDMA_CFG),
995 dev->name, 975 readl(gp->regs + MAC_RXSTAT),
996 readl(gp->regs + RXDMA_CFG), 976 readl(gp->regs + MAC_RXCFG));
997 readl(gp->regs + MAC_RXSTAT),
998 readl(gp->regs + MAC_RXCFG));
999 977
1000 spin_lock_irq(&gp->lock); 978 spin_lock_irq(&gp->lock);
1001 spin_lock(&gp->tx_lock); 979 spin_lock(&gp->tx_lock);
@@ -1048,8 +1026,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
1048 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { 1026 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
1049 netif_stop_queue(dev); 1027 netif_stop_queue(dev);
1050 spin_unlock_irqrestore(&gp->tx_lock, flags); 1028 spin_unlock_irqrestore(&gp->tx_lock, flags);
1051 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 1029 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1052 dev->name);
1053 return NETDEV_TX_BUSY; 1030 return NETDEV_TX_BUSY;
1054 } 1031 }
1055 1032
@@ -1158,8 +1135,7 @@ static void gem_pcs_reset(struct gem *gp)
1158 break; 1135 break;
1159 } 1136 }
1160 if (limit < 0) 1137 if (limit < 0)
1161 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", 1138 netdev_warn(gp->dev, "PCS reset bit would not clear\n");
1162 gp->dev->name);
1163} 1139}
1164 1140
1165static void gem_pcs_reinit_adv(struct gem *gp) 1141static void gem_pcs_reinit_adv(struct gem *gp)
@@ -1230,7 +1206,7 @@ static void gem_reset(struct gem *gp)
1230 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); 1206 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1231 1207
1232 if (limit < 0) 1208 if (limit < 0)
1233 printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); 1209 netdev_err(gp->dev, "SW reset is ghetto\n");
1234 1210
1235 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) 1211 if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
1236 gem_pcs_reinit_adv(gp); 1212 gem_pcs_reinit_adv(gp);
@@ -1395,9 +1371,8 @@ static int gem_set_link_modes(struct gem *gp)
1395 speed = SPEED_1000; 1371 speed = SPEED_1000;
1396 } 1372 }
1397 1373
1398 if (netif_msg_link(gp)) 1374 netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
1399 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n", 1375 speed, (full_duplex ? "full" : "half"));
1400 gp->dev->name, speed, (full_duplex ? "full" : "half"));
1401 1376
1402 if (!gp->running) 1377 if (!gp->running)
1403 return 0; 1378 return 0;
@@ -1451,15 +1426,13 @@ static int gem_set_link_modes(struct gem *gp)
1451 1426
1452 if (netif_msg_link(gp)) { 1427 if (netif_msg_link(gp)) {
1453 if (pause) { 1428 if (pause) {
1454 printk(KERN_INFO "%s: Pause is enabled " 1429 netdev_info(gp->dev,
1455 "(rxfifo: %d off: %d on: %d)\n", 1430 "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
1456 gp->dev->name, 1431 gp->rx_fifo_sz,
1457 gp->rx_fifo_sz, 1432 gp->rx_pause_off,
1458 gp->rx_pause_off, 1433 gp->rx_pause_on);
1459 gp->rx_pause_on);
1460 } else { 1434 } else {
1461 printk(KERN_INFO "%s: Pause is disabled\n", 1435 netdev_info(gp->dev, "Pause is disabled\n");
1462 gp->dev->name);
1463 } 1436 }
1464 } 1437 }
1465 1438
@@ -1484,9 +1457,8 @@ static int gem_mdio_link_not_up(struct gem *gp)
1484{ 1457{
1485 switch (gp->lstate) { 1458 switch (gp->lstate) {
1486 case link_force_ret: 1459 case link_force_ret:
1487 if (netif_msg_link(gp)) 1460 netif_info(gp, link, gp->dev,
1488 printk(KERN_INFO "%s: Autoneg failed again, keeping" 1461 "Autoneg failed again, keeping forced mode\n");
1489 " forced mode\n", gp->dev->name);
1490 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, 1462 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
1491 gp->last_forced_speed, DUPLEX_HALF); 1463 gp->last_forced_speed, DUPLEX_HALF);
1492 gp->timer_ticks = 5; 1464 gp->timer_ticks = 5;
@@ -1499,9 +1471,7 @@ static int gem_mdio_link_not_up(struct gem *gp)
1499 */ 1471 */
1500 if (gp->phy_mii.def->magic_aneg) 1472 if (gp->phy_mii.def->magic_aneg)
1501 return 1; 1473 return 1;
1502 if (netif_msg_link(gp)) 1474 netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
1503 printk(KERN_INFO "%s: switching to forced 100bt\n",
1504 gp->dev->name);
1505 /* Try forced modes. */ 1475 /* Try forced modes. */
1506 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, 1476 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
1507 DUPLEX_HALF); 1477 DUPLEX_HALF);
@@ -1517,9 +1487,8 @@ static int gem_mdio_link_not_up(struct gem *gp)
1517 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, 1487 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
1518 DUPLEX_HALF); 1488 DUPLEX_HALF);
1519 gp->timer_ticks = 5; 1489 gp->timer_ticks = 5;
1520 if (netif_msg_link(gp)) 1490 netif_info(gp, link, gp->dev,
1521 printk(KERN_INFO "%s: switching to forced 10bt\n", 1491 "switching to forced 10bt\n");
1522 gp->dev->name);
1523 return 0; 1492 return 0;
1524 } else 1493 } else
1525 return 1; 1494 return 1;
@@ -1574,8 +1543,8 @@ static void gem_link_timer(unsigned long data)
1574 gp->last_forced_speed = gp->phy_mii.speed; 1543 gp->last_forced_speed = gp->phy_mii.speed;
1575 gp->timer_ticks = 5; 1544 gp->timer_ticks = 5;
1576 if (netif_msg_link(gp)) 1545 if (netif_msg_link(gp))
1577 printk(KERN_INFO "%s: Got link after fallback, retrying" 1546 netdev_info(gp->dev,
1578 " autoneg once...\n", gp->dev->name); 1547 "Got link after fallback, retrying autoneg once...\n");
1579 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); 1548 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
1580 } else if (gp->lstate != link_up) { 1549 } else if (gp->lstate != link_up) {
1581 gp->lstate = link_up; 1550 gp->lstate = link_up;
@@ -1589,9 +1558,7 @@ static void gem_link_timer(unsigned long data)
1589 */ 1558 */
1590 if (gp->lstate == link_up) { 1559 if (gp->lstate == link_up) {
1591 gp->lstate = link_down; 1560 gp->lstate = link_down;
1592 if (netif_msg_link(gp)) 1561 netif_info(gp, link, gp->dev, "Link down\n");
1593 printk(KERN_INFO "%s: Link down\n",
1594 gp->dev->name);
1595 netif_carrier_off(gp->dev); 1562 netif_carrier_off(gp->dev);
1596 gp->reset_task_pending = 1; 1563 gp->reset_task_pending = 1;
1597 schedule_work(&gp->reset_task); 1564 schedule_work(&gp->reset_task);
@@ -1746,8 +1713,7 @@ static void gem_init_phy(struct gem *gp)
1746 if (phy_read(gp, MII_BMCR) != 0xffff) 1713 if (phy_read(gp, MII_BMCR) != 0xffff)
1747 break; 1714 break;
1748 if (i == 2) 1715 if (i == 2)
1749 printk(KERN_WARNING "%s: GMAC PHY not responding !\n", 1716 netdev_warn(gp->dev, "GMAC PHY not responding !\n");
1750 gp->dev->name);
1751 } 1717 }
1752 } 1718 }
1753 1719
@@ -2038,7 +2004,7 @@ static int gem_check_invariants(struct gem *gp)
2038 * as this chip has no gigabit PHY. 2004 * as this chip has no gigabit PHY.
2039 */ 2005 */
2040 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { 2006 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
2041 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n", 2007 pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
2042 mif_cfg); 2008 mif_cfg);
2043 return -1; 2009 return -1;
2044 } 2010 }
@@ -2078,7 +2044,7 @@ static int gem_check_invariants(struct gem *gp)
2078 } 2044 }
2079 if (i == 32) { 2045 if (i == 32) {
2080 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { 2046 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
2081 printk(KERN_ERR PFX "RIO MII phy will not respond.\n"); 2047 pr_err("RIO MII phy will not respond\n");
2082 return -1; 2048 return -1;
2083 } 2049 }
2084 gp->phy_type = phy_serdes; 2050 gp->phy_type = phy_serdes;
@@ -2093,7 +2059,7 @@ static int gem_check_invariants(struct gem *gp)
2093 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { 2059 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
2094 if (gp->tx_fifo_sz != (9 * 1024) || 2060 if (gp->tx_fifo_sz != (9 * 1024) ||
2095 gp->rx_fifo_sz != (20 * 1024)) { 2061 gp->rx_fifo_sz != (20 * 1024)) {
2096 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2062 pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2097 gp->tx_fifo_sz, gp->rx_fifo_sz); 2063 gp->tx_fifo_sz, gp->rx_fifo_sz);
2098 return -1; 2064 return -1;
2099 } 2065 }
@@ -2101,7 +2067,7 @@ static int gem_check_invariants(struct gem *gp)
2101 } else { 2067 } else {
2102 if (gp->tx_fifo_sz != (2 * 1024) || 2068 if (gp->tx_fifo_sz != (2 * 1024) ||
2103 gp->rx_fifo_sz != (2 * 1024)) { 2069 gp->rx_fifo_sz != (2 * 1024)) {
2104 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", 2070 pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2105 gp->tx_fifo_sz, gp->rx_fifo_sz); 2071 gp->tx_fifo_sz, gp->rx_fifo_sz);
2106 return -1; 2072 return -1;
2107 } 2073 }
@@ -2239,7 +2205,7 @@ static int gem_do_start(struct net_device *dev)
2239 2205
2240 if (request_irq(gp->pdev->irq, gem_interrupt, 2206 if (request_irq(gp->pdev->irq, gem_interrupt,
2241 IRQF_SHARED, dev->name, (void *)dev)) { 2207 IRQF_SHARED, dev->name, (void *)dev)) {
2242 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); 2208 netdev_err(dev, "failed to request irq !\n");
2243 2209
2244 spin_lock_irqsave(&gp->lock, flags); 2210 spin_lock_irqsave(&gp->lock, flags);
2245 spin_lock(&gp->tx_lock); 2211 spin_lock(&gp->tx_lock);
@@ -2378,9 +2344,8 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2378 2344
2379 mutex_lock(&gp->pm_mutex); 2345 mutex_lock(&gp->pm_mutex);
2380 2346
2381 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", 2347 netdev_info(dev, "suspending, WakeOnLan %s\n",
2382 dev->name, 2348 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
2383 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
2384 2349
2385 /* Keep the cell enabled during the entire operation */ 2350 /* Keep the cell enabled during the entire operation */
2386 spin_lock_irqsave(&gp->lock, flags); 2351 spin_lock_irqsave(&gp->lock, flags);
@@ -2440,7 +2405,7 @@ static int gem_resume(struct pci_dev *pdev)
2440 struct gem *gp = netdev_priv(dev); 2405 struct gem *gp = netdev_priv(dev);
2441 unsigned long flags; 2406 unsigned long flags;
2442 2407
2443 printk(KERN_INFO "%s: resuming\n", dev->name); 2408 netdev_info(dev, "resuming\n");
2444 2409
2445 mutex_lock(&gp->pm_mutex); 2410 mutex_lock(&gp->pm_mutex);
2446 2411
@@ -2452,8 +2417,7 @@ static int gem_resume(struct pci_dev *pdev)
2452 2417
2453 /* Make sure PCI access and bus master are enabled */ 2418 /* Make sure PCI access and bus master are enabled */
2454 if (pci_enable_device(gp->pdev)) { 2419 if (pci_enable_device(gp->pdev)) {
2455 printk(KERN_ERR "%s: Can't re-enable chip !\n", 2420 netdev_err(dev, "Can't re-enable chip !\n");
2456 dev->name);
2457 /* Put cell and forget it for now, it will be considered as 2421 /* Put cell and forget it for now, it will be considered as
2458 * still asleep, a new sleep cycle may bring it back 2422 * still asleep, a new sleep cycle may bring it back
2459 */ 2423 */
@@ -2938,7 +2902,7 @@ static int __devinit gem_get_device_address(struct gem *gp)
2938 addr = idprom->id_ethaddr; 2902 addr = idprom->id_ethaddr;
2939#else 2903#else
2940 printk("\n"); 2904 printk("\n");
2941 printk(KERN_ERR "%s: can't get mac-address\n", dev->name); 2905 pr_err("%s: can't get mac-address\n", dev->name);
2942 return -1; 2906 return -1;
2943#endif 2907#endif
2944 } 2908 }
@@ -3009,14 +2973,12 @@ static const struct net_device_ops gem_netdev_ops = {
3009static int __devinit gem_init_one(struct pci_dev *pdev, 2973static int __devinit gem_init_one(struct pci_dev *pdev,
3010 const struct pci_device_id *ent) 2974 const struct pci_device_id *ent)
3011{ 2975{
3012 static int gem_version_printed = 0;
3013 unsigned long gemreg_base, gemreg_len; 2976 unsigned long gemreg_base, gemreg_len;
3014 struct net_device *dev; 2977 struct net_device *dev;
3015 struct gem *gp; 2978 struct gem *gp;
3016 int err, pci_using_dac; 2979 int err, pci_using_dac;
3017 2980
3018 if (gem_version_printed++ == 0) 2981 printk_once(KERN_INFO "%s", version);
3019 printk(KERN_INFO "%s", version);
3020 2982
3021 /* Apple gmac note: during probe, the chip is powered up by 2983 /* Apple gmac note: during probe, the chip is powered up by
3022 * the arch code to allow the code below to work (and to let 2984 * the arch code to allow the code below to work (and to let
@@ -3026,8 +2988,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3026 */ 2988 */
3027 err = pci_enable_device(pdev); 2989 err = pci_enable_device(pdev);
3028 if (err) { 2990 if (err) {
3029 printk(KERN_ERR PFX "Cannot enable MMIO operation, " 2991 pr_err("Cannot enable MMIO operation, aborting\n");
3030 "aborting.\n");
3031 return err; 2992 return err;
3032 } 2993 }
3033 pci_set_master(pdev); 2994 pci_set_master(pdev);
@@ -3048,8 +3009,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3048 } else { 3009 } else {
3049 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3010 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3050 if (err) { 3011 if (err) {
3051 printk(KERN_ERR PFX "No usable DMA configuration, " 3012 pr_err("No usable DMA configuration, aborting\n");
3052 "aborting.\n");
3053 goto err_disable_device; 3013 goto err_disable_device;
3054 } 3014 }
3055 pci_using_dac = 0; 3015 pci_using_dac = 0;
@@ -3059,15 +3019,14 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3059 gemreg_len = pci_resource_len(pdev, 0); 3019 gemreg_len = pci_resource_len(pdev, 0);
3060 3020
3061 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { 3021 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
3062 printk(KERN_ERR PFX "Cannot find proper PCI device " 3022 pr_err("Cannot find proper PCI device base address, aborting\n");
3063 "base address, aborting.\n");
3064 err = -ENODEV; 3023 err = -ENODEV;
3065 goto err_disable_device; 3024 goto err_disable_device;
3066 } 3025 }
3067 3026
3068 dev = alloc_etherdev(sizeof(*gp)); 3027 dev = alloc_etherdev(sizeof(*gp));
3069 if (!dev) { 3028 if (!dev) {
3070 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 3029 pr_err("Etherdev alloc failed, aborting\n");
3071 err = -ENOMEM; 3030 err = -ENOMEM;
3072 goto err_disable_device; 3031 goto err_disable_device;
3073 } 3032 }
@@ -3077,8 +3036,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3077 3036
3078 err = pci_request_regions(pdev, DRV_NAME); 3037 err = pci_request_regions(pdev, DRV_NAME);
3079 if (err) { 3038 if (err) {
3080 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 3039 pr_err("Cannot obtain PCI resources, aborting\n");
3081 "aborting.\n");
3082 goto err_out_free_netdev; 3040 goto err_out_free_netdev;
3083 } 3041 }
3084 3042
@@ -3104,8 +3062,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3104 3062
3105 gp->regs = ioremap(gemreg_base, gemreg_len); 3063 gp->regs = ioremap(gemreg_base, gemreg_len);
3106 if (!gp->regs) { 3064 if (!gp->regs) {
3107 printk(KERN_ERR PFX "Cannot map device registers, " 3065 pr_err("Cannot map device registers, aborting\n");
3108 "aborting.\n");
3109 err = -EIO; 3066 err = -EIO;
3110 goto err_out_free_res; 3067 goto err_out_free_res;
3111 } 3068 }
@@ -3150,8 +3107,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3150 pci_alloc_consistent(pdev, sizeof(struct gem_init_block), 3107 pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
3151 &gp->gblock_dvma); 3108 &gp->gblock_dvma);
3152 if (!gp->init_block) { 3109 if (!gp->init_block) {
3153 printk(KERN_ERR PFX "Cannot allocate init block, " 3110 pr_err("Cannot allocate init block, aborting\n");
3154 "aborting.\n");
3155 err = -ENOMEM; 3111 err = -ENOMEM;
3156 goto err_out_iounmap; 3112 goto err_out_iounmap;
3157 } 3113 }
@@ -3180,19 +3136,18 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3180 3136
3181 /* Register with kernel */ 3137 /* Register with kernel */
3182 if (register_netdev(dev)) { 3138 if (register_netdev(dev)) {
3183 printk(KERN_ERR PFX "Cannot register net device, " 3139 pr_err("Cannot register net device, aborting\n");
3184 "aborting.\n");
3185 err = -ENOMEM; 3140 err = -ENOMEM;
3186 goto err_out_free_consistent; 3141 goto err_out_free_consistent;
3187 } 3142 }
3188 3143
3189 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", 3144 netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
3190 dev->name, dev->dev_addr); 3145 dev->dev_addr);
3191 3146
3192 if (gp->phy_type == phy_mii_mdio0 || 3147 if (gp->phy_type == phy_mii_mdio0 ||
3193 gp->phy_type == phy_mii_mdio1) 3148 gp->phy_type == phy_mii_mdio1)
3194 printk(KERN_INFO "%s: Found %s PHY\n", dev->name, 3149 netdev_info(dev, "Found %s PHY\n",
3195 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3150 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
3196 3151
3197 /* GEM can do it all... */ 3152 /* GEM can do it all... */
3198 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; 3153 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 78f8cee5fd74..d16880d7099b 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -88,7 +88,7 @@ static int reset_one_mii_phy(struct mii_phy* phy, int phy_id)
88 if ((val & BMCR_ISOLATE) && limit > 0) 88 if ((val & BMCR_ISOLATE) && limit > 0)
89 __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); 89 __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE);
90 90
91 return (limit <= 0); 91 return limit <= 0;
92} 92}
93 93
94static int bcm5201_init(struct mii_phy* phy) 94static int bcm5201_init(struct mii_phy* phy)
@@ -1175,7 +1175,8 @@ int mii_phy_probe(struct mii_phy *phy, int mii_id)
1175 1175
1176 /* Read ID and find matching entry */ 1176 /* Read ID and find matching entry */
1177 id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); 1177 id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2));
1178 printk(KERN_DEBUG "PHY ID: %x, addr: %x\n", id, mii_id); 1178 printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n",
1179 id, mii_id);
1179 for (i=0; (def = mii_phy_table[i]) != NULL; i++) 1180 for (i=0; (def = mii_phy_table[i]) != NULL; i++)
1180 if ((id & def->phy_id_mask) == def->phy_id) 1181 if ((id & def->phy_id_mask) == def->phy_id)
1181 break; 1182 break;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index bd0df1c14955..5e28c414421a 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1409,7 +1409,7 @@ force_link:
1409 hp->timer_ticks = 0; 1409 hp->timer_ticks = 0;
1410 hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ 1410 hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
1411 hp->happy_timer.data = (unsigned long) hp; 1411 hp->happy_timer.data = (unsigned long) hp;
1412 hp->happy_timer.function = &happy_meal_timer; 1412 hp->happy_timer.function = happy_meal_timer;
1413 add_timer(&hp->happy_timer); 1413 add_timer(&hp->happy_timer);
1414} 1414}
1415 1415
@@ -2497,7 +2497,7 @@ static u32 hme_get_link(struct net_device *dev)
2497 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); 2497 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2498 spin_unlock_irq(&hp->happy_lock); 2498 spin_unlock_irq(&hp->happy_lock);
2499 2499
2500 return (hp->sw_bmsr & BMSR_LSTATUS); 2500 return hp->sw_bmsr & BMSR_LSTATUS;
2501} 2501}
2502 2502
2503static const struct ethtool_ops hme_ethtool_ops = { 2503static const struct ethtool_ops hme_ethtool_ops = {
@@ -2808,7 +2808,8 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
2808 happy_meal_set_initial_advertisement(hp); 2808 happy_meal_set_initial_advertisement(hp);
2809 spin_unlock_irq(&hp->happy_lock); 2809 spin_unlock_irq(&hp->happy_lock);
2810 2810
2811 if (register_netdev(hp->dev)) { 2811 err = register_netdev(hp->dev);
2812 if (err) {
2812 printk(KERN_ERR "happymeal: Cannot register net device, " 2813 printk(KERN_ERR "happymeal: Cannot register net device, "
2813 "aborting.\n"); 2814 "aborting.\n");
2814 goto err_out_free_coherent; 2815 goto err_out_free_coherent;
@@ -3130,7 +3131,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3130 happy_meal_set_initial_advertisement(hp); 3131 happy_meal_set_initial_advertisement(hp);
3131 spin_unlock_irq(&hp->happy_lock); 3132 spin_unlock_irq(&hp->happy_lock);
3132 3133
3133 if (register_netdev(hp->dev)) { 3134 err = register_netdev(hp->dev);
3135 if (err) {
3134 printk(KERN_ERR "happymeal(PCI): Cannot register net device, " 3136 printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
3135 "aborting.\n"); 3137 "aborting.\n");
3136 goto err_out_iounmap; 3138 goto err_out_iounmap;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 8dcb858f2168..2cf84e5968b2 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1483,7 +1483,7 @@ no_link_test:
1483 */ 1483 */
1484 init_timer(&lp->multicast_timer); 1484 init_timer(&lp->multicast_timer);
1485 lp->multicast_timer.data = (unsigned long) dev; 1485 lp->multicast_timer.data = (unsigned long) dev;
1486 lp->multicast_timer.function = &lance_set_multicast_retry; 1486 lp->multicast_timer.function = lance_set_multicast_retry;
1487 1487
1488 if (register_netdev(dev)) { 1488 if (register_netdev(dev)) {
1489 printk(KERN_ERR "SunLance: Cannot register device.\n"); 1489 printk(KERN_ERR "SunLance: Cannot register device.\n");
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 72e65d4666ef..9536b2f010be 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -711,7 +711,7 @@ static u32 qe_get_link(struct net_device *dev)
711 phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); 711 phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
712 spin_unlock_irq(&qep->lock); 712 spin_unlock_irq(&qep->lock);
713 713
714 return (phyconfig & MREGS_PHYCONFIG_LSTAT); 714 return phyconfig & MREGS_PHYCONFIG_LSTAT;
715} 715}
716 716
717static const struct ethtool_ops qe_ethtool_ops = { 717static const struct ethtool_ops qe_ethtool_ops = {
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index d281a7b34701..bf3c762de620 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -3,6 +3,8 @@
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> 3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4 */ 4 */
5 5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
6#include <linux/module.h> 8#include <linux/module.h>
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/types.h> 10#include <linux/types.h>
@@ -20,7 +22,6 @@
20#include "sunvnet.h" 22#include "sunvnet.h"
21 23
22#define DRV_MODULE_NAME "sunvnet" 24#define DRV_MODULE_NAME "sunvnet"
23#define PFX DRV_MODULE_NAME ": "
24#define DRV_MODULE_VERSION "1.0" 25#define DRV_MODULE_VERSION "1.0"
25#define DRV_MODULE_RELDATE "June 25, 2007" 26#define DRV_MODULE_RELDATE "June 25, 2007"
26 27
@@ -45,9 +46,9 @@ static int vnet_handle_unknown(struct vnet_port *port, void *arg)
45{ 46{
46 struct vio_msg_tag *pkt = arg; 47 struct vio_msg_tag *pkt = arg;
47 48
48 printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n", 49 pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
49 pkt->type, pkt->stype, pkt->stype_env, pkt->sid); 50 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
50 printk(KERN_ERR PFX "Resetting connection.\n"); 51 pr_err("Resetting connection\n");
51 52
52 ldc_disconnect(port->vio.lp); 53 ldc_disconnect(port->vio.lp);
53 54
@@ -400,8 +401,8 @@ static int vnet_rx(struct vnet_port *port, void *msgbuf)
400 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) 401 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
401 return 0; 402 return 0;
402 if (unlikely(pkt->seq != dr->rcv_nxt)) { 403 if (unlikely(pkt->seq != dr->rcv_nxt)) {
403 printk(KERN_ERR PFX "RX out of sequence seq[0x%llx] " 404 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
404 "rcv_nxt[0x%llx]\n", pkt->seq, dr->rcv_nxt); 405 pkt->seq, dr->rcv_nxt);
405 return 0; 406 return 0;
406 } 407 }
407 408
@@ -464,8 +465,7 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf)
464 struct vio_net_mcast_info *pkt = msgbuf; 465 struct vio_net_mcast_info *pkt = msgbuf;
465 466
466 if (pkt->tag.stype != VIO_SUBTYPE_ACK) 467 if (pkt->tag.stype != VIO_SUBTYPE_ACK)
467 printk(KERN_ERR PFX "%s: Got unexpected MCAST reply " 468 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
468 "[%02x:%02x:%04x:%08x]\n",
469 port->vp->dev->name, 469 port->vp->dev->name,
470 pkt->tag.type, 470 pkt->tag.type,
471 pkt->tag.stype, 471 pkt->tag.stype,
@@ -520,7 +520,7 @@ static void vnet_event(void *arg, int event)
520 } 520 }
521 521
522 if (unlikely(event != LDC_EVENT_DATA_READY)) { 522 if (unlikely(event != LDC_EVENT_DATA_READY)) {
523 printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); 523 pr_warning("Unexpected LDC event %d\n", event);
524 spin_unlock_irqrestore(&vio->lock, flags); 524 spin_unlock_irqrestore(&vio->lock, flags);
525 return; 525 return;
526 } 526 }
@@ -662,8 +662,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
662 netif_stop_queue(dev); 662 netif_stop_queue(dev);
663 663
664 /* This is a hard error, log it. */ 664 /* This is a hard error, log it. */
665 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 665 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
666 "queue awake!\n", dev->name);
667 dev->stats.tx_errors++; 666 dev->stats.tx_errors++;
668 } 667 }
669 spin_unlock_irqrestore(&port->vio.lock, flags); 668 spin_unlock_irqrestore(&port->vio.lock, flags);
@@ -696,8 +695,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
696 695
697 err = __vnet_tx_trigger(port); 696 err = __vnet_tx_trigger(port);
698 if (unlikely(err < 0)) { 697 if (unlikely(err < 0)) {
699 printk(KERN_INFO PFX "%s: TX trigger error %d\n", 698 netdev_info(dev, "TX trigger error %d\n", err);
700 dev->name, err);
701 d->hdr.state = VIO_DESC_FREE; 699 d->hdr.state = VIO_DESC_FREE;
702 dev->stats.tx_carrier_errors++; 700 dev->stats.tx_carrier_errors++;
703 goto out_dropped_unlock; 701 goto out_dropped_unlock;
@@ -952,12 +950,12 @@ static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port)
952 950
953 err = -ENOMEM; 951 err = -ENOMEM;
954 if (!buf) { 952 if (!buf) {
955 printk(KERN_ERR "TX buffer allocation failure\n"); 953 pr_err("TX buffer allocation failure\n");
956 goto err_out; 954 goto err_out;
957 } 955 }
958 err = -EFAULT; 956 err = -EFAULT;
959 if ((unsigned long)buf & (8UL - 1)) { 957 if ((unsigned long)buf & (8UL - 1)) {
960 printk(KERN_ERR "TX buffer misaligned\n"); 958 pr_err("TX buffer misaligned\n");
961 kfree(buf); 959 kfree(buf);
962 goto err_out; 960 goto err_out;
963 } 961 }
@@ -1030,7 +1028,7 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
1030 1028
1031 dev = alloc_etherdev(sizeof(*vp)); 1029 dev = alloc_etherdev(sizeof(*vp));
1032 if (!dev) { 1030 if (!dev) {
1033 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 1031 pr_err("Etherdev alloc failed, aborting\n");
1034 return ERR_PTR(-ENOMEM); 1032 return ERR_PTR(-ENOMEM);
1035 } 1033 }
1036 1034
@@ -1056,12 +1054,11 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
1056 1054
1057 err = register_netdev(dev); 1055 err = register_netdev(dev);
1058 if (err) { 1056 if (err) {
1059 printk(KERN_ERR PFX "Cannot register net device, " 1057 pr_err("Cannot register net device, aborting\n");
1060 "aborting.\n");
1061 goto err_out_free_dev; 1058 goto err_out_free_dev;
1062 } 1059 }
1063 1060
1064 printk(KERN_INFO "%s: Sun LDOM vnet %pM\n", dev->name, dev->dev_addr); 1061 netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
1065 1062
1066 list_add(&vp->list, &vnet_list); 1063 list_add(&vp->list, &vnet_list);
1067 1064
@@ -1133,10 +1130,7 @@ static struct vio_driver_ops vnet_vio_ops = {
1133 1130
1134static void __devinit print_version(void) 1131static void __devinit print_version(void)
1135{ 1132{
1136 static int version_printed; 1133 printk_once(KERN_INFO "%s", version);
1137
1138 if (version_printed++ == 0)
1139 printk(KERN_INFO "%s", version);
1140} 1134}
1141 1135
1142const char *remote_macaddr_prop = "remote-mac-address"; 1136const char *remote_macaddr_prop = "remote-mac-address";
@@ -1157,7 +1151,7 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
1157 1151
1158 vp = vnet_find_parent(hp, vdev->mp); 1152 vp = vnet_find_parent(hp, vdev->mp);
1159 if (IS_ERR(vp)) { 1153 if (IS_ERR(vp)) {
1160 printk(KERN_ERR PFX "Cannot find port parent vnet.\n"); 1154 pr_err("Cannot find port parent vnet\n");
1161 err = PTR_ERR(vp); 1155 err = PTR_ERR(vp);
1162 goto err_out_put_mdesc; 1156 goto err_out_put_mdesc;
1163 } 1157 }
@@ -1165,15 +1159,14 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
1165 rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); 1159 rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
1166 err = -ENODEV; 1160 err = -ENODEV;
1167 if (!rmac) { 1161 if (!rmac) {
1168 printk(KERN_ERR PFX "Port lacks %s property.\n", 1162 pr_err("Port lacks %s property\n", remote_macaddr_prop);
1169 remote_macaddr_prop);
1170 goto err_out_put_mdesc; 1163 goto err_out_put_mdesc;
1171 } 1164 }
1172 1165
1173 port = kzalloc(sizeof(*port), GFP_KERNEL); 1166 port = kzalloc(sizeof(*port), GFP_KERNEL);
1174 err = -ENOMEM; 1167 err = -ENOMEM;
1175 if (!port) { 1168 if (!port) {
1176 printk(KERN_ERR PFX "Cannot allocate vnet_port.\n"); 1169 pr_err("Cannot allocate vnet_port\n");
1177 goto err_out_put_mdesc; 1170 goto err_out_put_mdesc;
1178 } 1171 }
1179 1172
@@ -1214,9 +1207,8 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
1214 1207
1215 dev_set_drvdata(&vdev->dev, port); 1208 dev_set_drvdata(&vdev->dev, port);
1216 1209
1217 printk(KERN_INFO "%s: PORT ( remote-mac %pM%s )\n", 1210 pr_info("%s: PORT ( remote-mac %pM%s )\n",
1218 vp->dev->name, port->raddr, 1211 vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
1219 switch_port ? " switch-port" : "");
1220 1212
1221 vio_port_up(&port->vio); 1213 vio_port_up(&port->vio);
1222 1214
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 99e423a5b9f1..b6eec8cea209 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1167,7 +1167,7 @@ static void print_eth(const u8 *add)
1167static int tc35815_tx_full(struct net_device *dev) 1167static int tc35815_tx_full(struct net_device *dev)
1168{ 1168{
1169 struct tc35815_local *lp = netdev_priv(dev); 1169 struct tc35815_local *lp = netdev_priv(dev);
1170 return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end); 1170 return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end;
1171} 1171}
1172 1172
1173static void tc35815_restart(struct net_device *dev) 1173static void tc35815_restart(struct net_device *dev)
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 737df6032bbc..8b3dc1eb4015 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -92,7 +92,7 @@ static void bdx_rx_free(struct bdx_priv *priv);
92static void bdx_tx_free(struct bdx_priv *priv); 92static void bdx_tx_free(struct bdx_priv *priv);
93 93
94/* Definitions needed by bdx_probe */ 94/* Definitions needed by bdx_probe */
95static void bdx_ethtool_ops(struct net_device *netdev); 95static void bdx_set_ethtool_ops(struct net_device *netdev);
96 96
97/************************************************************************* 97/*************************************************************************
98 * Print Info * 98 * Print Info *
@@ -927,13 +927,6 @@ static void bdx_update_stats(struct bdx_priv *priv)
927 BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i); 927 BDX_ASSERT((sizeof(struct bdx_stats) / sizeof(u64)) != i);
928} 928}
929 929
930static struct net_device_stats *bdx_get_stats(struct net_device *ndev)
931{
932 struct bdx_priv *priv = netdev_priv(ndev);
933 struct net_device_stats *net_stat = &priv->net_stats;
934 return net_stat;
935}
936
937static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len, 930static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
938 u16 rxd_vlan); 931 u16 rxd_vlan);
939static void print_rxfd(struct rxf_desc *rxfd); 932static void print_rxfd(struct rxf_desc *rxfd);
@@ -1220,6 +1213,7 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
1220 1213
1221static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget) 1214static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1222{ 1215{
1216 struct net_device *ndev = priv->ndev;
1223 struct sk_buff *skb, *skb2; 1217 struct sk_buff *skb, *skb2;
1224 struct rxd_desc *rxdd; 1218 struct rxd_desc *rxdd;
1225 struct rx_map *dm; 1219 struct rx_map *dm;
@@ -1273,7 +1267,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1273 1267
1274 if (unlikely(GET_RXD_ERR(rxd_val1))) { 1268 if (unlikely(GET_RXD_ERR(rxd_val1))) {
1275 DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1)); 1269 DBG("rxd_err = 0x%x\n", GET_RXD_ERR(rxd_val1));
1276 priv->net_stats.rx_errors++; 1270 ndev->stats.rx_errors++;
1277 bdx_recycle_skb(priv, rxdd); 1271 bdx_recycle_skb(priv, rxdd);
1278 continue; 1272 continue;
1279 } 1273 }
@@ -1300,15 +1294,16 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1300 bdx_rxdb_free_elem(db, rxdd->va_lo); 1294 bdx_rxdb_free_elem(db, rxdd->va_lo);
1301 } 1295 }
1302 1296
1303 priv->net_stats.rx_bytes += len; 1297 ndev->stats.rx_bytes += len;
1304 1298
1305 skb_put(skb, len); 1299 skb_put(skb, len);
1306 skb->ip_summed = CHECKSUM_UNNECESSARY; 1300 skb->protocol = eth_type_trans(skb, ndev);
1307 skb->protocol = eth_type_trans(skb, priv->ndev);
1308 1301
1309 /* Non-IP packets aren't checksum-offloaded */ 1302 /* Non-IP packets aren't checksum-offloaded */
1310 if (GET_RXD_PKT_ID(rxd_val1) == 0) 1303 if (GET_RXD_PKT_ID(rxd_val1) == 0)
1311 skb->ip_summed = CHECKSUM_NONE; 1304 skb_checksum_none_assert(skb);
1305 else
1306 skb->ip_summed = CHECKSUM_UNNECESSARY;
1312 1307
1313 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb); 1308 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1314 1309
@@ -1316,7 +1311,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1316 break; 1311 break;
1317 } 1312 }
1318 1313
1319 priv->net_stats.rx_packets += done; 1314 ndev->stats.rx_packets += done;
1320 1315
1321 /* FIXME: do smth to minimize pci accesses */ 1316 /* FIXME: do smth to minimize pci accesses */
1322 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR); 1317 WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
@@ -1712,8 +1707,8 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
1712#ifdef BDX_LLTX 1707#ifdef BDX_LLTX
1713 ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ 1708 ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1714#endif 1709#endif
1715 priv->net_stats.tx_packets++; 1710 ndev->stats.tx_packets++;
1716 priv->net_stats.tx_bytes += skb->len; 1711 ndev->stats.tx_bytes += skb->len;
1717 1712
1718 if (priv->tx_level < BDX_MIN_TX_LEVEL) { 1713 if (priv->tx_level < BDX_MIN_TX_LEVEL) {
1719 DBG("%s: %s: TX Q STOP level %d\n", 1714 DBG("%s: %s: TX Q STOP level %d\n",
@@ -1888,7 +1883,6 @@ static const struct net_device_ops bdx_netdev_ops = {
1888 .ndo_validate_addr = eth_validate_addr, 1883 .ndo_validate_addr = eth_validate_addr,
1889 .ndo_do_ioctl = bdx_ioctl, 1884 .ndo_do_ioctl = bdx_ioctl,
1890 .ndo_set_multicast_list = bdx_setmulti, 1885 .ndo_set_multicast_list = bdx_setmulti,
1891 .ndo_get_stats = bdx_get_stats,
1892 .ndo_change_mtu = bdx_change_mtu, 1886 .ndo_change_mtu = bdx_change_mtu,
1893 .ndo_set_mac_address = bdx_set_mac, 1887 .ndo_set_mac_address = bdx_set_mac,
1894 .ndo_vlan_rx_register = bdx_vlan_rx_register, 1888 .ndo_vlan_rx_register = bdx_vlan_rx_register,
@@ -2012,7 +2006,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2012 ndev->netdev_ops = &bdx_netdev_ops; 2006 ndev->netdev_ops = &bdx_netdev_ops;
2013 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN; 2007 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
2014 2008
2015 bdx_ethtool_ops(ndev); /* ethtool interface */ 2009 bdx_set_ethtool_ops(ndev); /* ethtool interface */
2016 2010
2017 /* these fields are used for info purposes only 2011 /* these fields are used for info purposes only
2018 * so we can have them same for all ports of the board */ 2012 * so we can have them same for all ports of the board */
@@ -2417,10 +2411,10 @@ static void bdx_get_ethtool_stats(struct net_device *netdev,
2417} 2411}
2418 2412
2419/* 2413/*
2420 * bdx_ethtool_ops - ethtool interface implementation 2414 * bdx_set_ethtool_ops - ethtool interface implementation
2421 * @netdev 2415 * @netdev
2422 */ 2416 */
2423static void bdx_ethtool_ops(struct net_device *netdev) 2417static void bdx_set_ethtool_ops(struct net_device *netdev)
2424{ 2418{
2425 static const struct ethtool_ops bdx_ethtool_ops = { 2419 static const struct ethtool_ops bdx_ethtool_ops = {
2426 .get_settings = bdx_get_settings, 2420 .get_settings = bdx_get_settings,
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 67e3b71bf705..b6ba8601e2b5 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -269,7 +269,6 @@ struct bdx_priv {
269 u32 msg_enable; 269 u32 msg_enable;
270 int stats_flag; 270 int stats_flag;
271 struct bdx_stats hw_stats; 271 struct bdx_stats hw_stats;
272 struct net_device_stats net_stats;
273 struct pci_dev *pdev; 272 struct pci_dev *pdev;
274 273
275 struct pci_nic *nic; 274 struct pci_nic *nic;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index bc3af78a869f..fdb438dca9b3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -1917,19 +1917,16 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1917 */ 1917 */
1918static int tg3_phy_reset(struct tg3 *tp) 1918static int tg3_phy_reset(struct tg3 *tp)
1919{ 1919{
1920 u32 cpmuctrl; 1920 u32 val, cpmuctrl;
1921 u32 phy_status;
1922 int err; 1921 int err;
1923 1922
1924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 1923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1925 u32 val;
1926
1927 val = tr32(GRC_MISC_CFG); 1924 val = tr32(GRC_MISC_CFG);
1928 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 1925 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1929 udelay(40); 1926 udelay(40);
1930 } 1927 }
1931 err = tg3_readphy(tp, MII_BMSR, &phy_status); 1928 err = tg3_readphy(tp, MII_BMSR, &val);
1932 err |= tg3_readphy(tp, MII_BMSR, &phy_status); 1929 err |= tg3_readphy(tp, MII_BMSR, &val);
1933 if (err != 0) 1930 if (err != 0)
1934 return -EBUSY; 1931 return -EBUSY;
1935 1932
@@ -1961,18 +1958,14 @@ static int tg3_phy_reset(struct tg3 *tp)
1961 return err; 1958 return err;
1962 1959
1963 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 1960 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1964 u32 phy; 1961 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1965 1962 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
1966 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1967 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1968 1963
1969 tw32(TG3_CPMU_CTRL, cpmuctrl); 1964 tw32(TG3_CPMU_CTRL, cpmuctrl);
1970 } 1965 }
1971 1966
1972 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || 1967 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1973 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { 1968 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1974 u32 val;
1975
1976 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 1969 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1977 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 1970 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1978 CPMU_LSPD_1000MB_MACCLK_12_5) { 1971 CPMU_LSPD_1000MB_MACCLK_12_5) {
@@ -2028,23 +2021,19 @@ out:
2028 /* Cannot do read-modify-write on 5401 */ 2021 /* Cannot do read-modify-write on 5401 */
2029 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); 2022 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2030 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 2023 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2031 u32 phy_reg;
2032
2033 /* Set bit 14 with read-modify-write to preserve other bits */ 2024 /* Set bit 14 with read-modify-write to preserve other bits */
2034 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) && 2025 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2035 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg)) 2026 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
2036 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000); 2027 tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
2037 } 2028 }
2038 2029
2039 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2030 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2040 * jumbo frames transmission. 2031 * jumbo frames transmission.
2041 */ 2032 */
2042 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 2033 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2043 u32 phy_reg; 2034 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2044
2045 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2046 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2035 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2047 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2036 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2048 } 2037 }
2049 2038
2050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -3060,7 +3049,7 @@ static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3060static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) 3049static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3061{ 3050{
3062 int current_link_up; 3051 int current_link_up;
3063 u32 bmsr, dummy; 3052 u32 bmsr, val;
3064 u32 lcl_adv, rmt_adv; 3053 u32 lcl_adv, rmt_adv;
3065 u16 current_speed; 3054 u16 current_speed;
3066 u8 current_duplex; 3055 u8 current_duplex;
@@ -3140,8 +3129,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3140 } 3129 }
3141 3130
3142 /* Clear pending interrupts... */ 3131 /* Clear pending interrupts... */
3143 tg3_readphy(tp, MII_TG3_ISTAT, &dummy); 3132 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3144 tg3_readphy(tp, MII_TG3_ISTAT, &dummy); 3133 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3145 3134
3146 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 3135 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3147 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 3136 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
@@ -3162,8 +3151,6 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3162 current_duplex = DUPLEX_INVALID; 3151 current_duplex = DUPLEX_INVALID;
3163 3152
3164 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 3153 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3165 u32 val;
3166
3167 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); 3154 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3168 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val); 3155 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3169 if (!(val & (1 << 10))) { 3156 if (!(val & (1 << 10))) {
@@ -3238,13 +3225,11 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3238 3225
3239relink: 3226relink:
3240 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 3227 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3241 u32 tmp;
3242
3243 tg3_phy_copper_begin(tp); 3228 tg3_phy_copper_begin(tp);
3244 3229
3245 tg3_readphy(tp, MII_BMSR, &tmp); 3230 tg3_readphy(tp, MII_BMSR, &bmsr);
3246 if (!tg3_readphy(tp, MII_BMSR, &tmp) && 3231 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3247 (tmp & BMSR_LSTATUS)) 3232 (bmsr & BMSR_LSTATUS))
3248 current_link_up = 1; 3233 current_link_up = 1;
3249 } 3234 }
3250 3235
@@ -4549,7 +4534,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
4549 struct tg3 *tp = tnapi->tp; 4534 struct tg3 *tp = tnapi->tp;
4550 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4535 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4551 struct ring_info *src_map, *dest_map; 4536 struct ring_info *src_map, *dest_map;
4552 struct tg3_rx_prodring_set *spr = &tp->prodring[0]; 4537 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4553 int dest_idx; 4538 int dest_idx;
4554 4539
4555 switch (opaque_key) { 4540 switch (opaque_key) {
@@ -4619,7 +4604,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4619 u32 sw_idx = tnapi->rx_rcb_ptr; 4604 u32 sw_idx = tnapi->rx_rcb_ptr;
4620 u16 hw_idx; 4605 u16 hw_idx;
4621 int received; 4606 int received;
4622 struct tg3_rx_prodring_set *tpr = tnapi->prodring; 4607 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4623 4608
4624 hw_idx = *(tnapi->rx_rcb_prod_idx); 4609 hw_idx = *(tnapi->rx_rcb_prod_idx);
4625 /* 4610 /*
@@ -4644,13 +4629,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4644 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4629 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4645 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4630 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4646 if (opaque_key == RXD_OPAQUE_RING_STD) { 4631 if (opaque_key == RXD_OPAQUE_RING_STD) {
4647 ri = &tp->prodring[0].rx_std_buffers[desc_idx]; 4632 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4648 dma_addr = dma_unmap_addr(ri, mapping); 4633 dma_addr = dma_unmap_addr(ri, mapping);
4649 skb = ri->skb; 4634 skb = ri->skb;
4650 post_ptr = &std_prod_idx; 4635 post_ptr = &std_prod_idx;
4651 rx_std_posted++; 4636 rx_std_posted++;
4652 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4637 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4653 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; 4638 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4654 dma_addr = dma_unmap_addr(ri, mapping); 4639 dma_addr = dma_unmap_addr(ri, mapping);
4655 skb = ri->skb; 4640 skb = ri->skb;
4656 post_ptr = &jmb_prod_idx; 4641 post_ptr = &jmb_prod_idx;
@@ -4719,7 +4704,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4719 >> RXD_TCPCSUM_SHIFT) == 0xffff)) 4704 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4720 skb->ip_summed = CHECKSUM_UNNECESSARY; 4705 skb->ip_summed = CHECKSUM_UNNECESSARY;
4721 else 4706 else
4722 skb->ip_summed = CHECKSUM_NONE; 4707 skb_checksum_none_assert(skb);
4723 4708
4724 skb->protocol = eth_type_trans(skb, tp->dev); 4709 skb->protocol = eth_type_trans(skb, tp->dev);
4725 4710
@@ -4981,14 +4966,14 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4981 work_done += tg3_rx(tnapi, budget - work_done); 4966 work_done += tg3_rx(tnapi, budget - work_done);
4982 4967
4983 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { 4968 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4984 struct tg3_rx_prodring_set *dpr = &tp->prodring[0]; 4969 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
4985 int i, err = 0; 4970 int i, err = 0;
4986 u32 std_prod_idx = dpr->rx_std_prod_idx; 4971 u32 std_prod_idx = dpr->rx_std_prod_idx;
4987 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 4972 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4988 4973
4989 for (i = 1; i < tp->irq_cnt; i++) 4974 for (i = 1; i < tp->irq_cnt; i++)
4990 err |= tg3_rx_prodring_xfer(tp, dpr, 4975 err |= tg3_rx_prodring_xfer(tp, dpr,
4991 tp->napi[i].prodring); 4976 &tp->napi[i].prodring);
4992 4977
4993 wmb(); 4978 wmb();
4994 4979
@@ -5404,8 +5389,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5404{ 5389{
5405 u32 base = (u32) mapping & 0xffffffff; 5390 u32 base = (u32) mapping & 0xffffffff;
5406 5391
5407 return ((base > 0xffffdcc0) && 5392 return (base > 0xffffdcc0) && (base + len + 8 < base);
5408 (base + len + 8 < base));
5409} 5393}
5410 5394
5411/* Test for DMA addresses > 40-bit */ 5395/* Test for DMA addresses > 40-bit */
@@ -5414,7 +5398,7 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5414{ 5398{
5415#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 5399#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5416 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) 5400 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5417 return (((u64) mapping + len) > DMA_BIT_MASK(40)); 5401 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5418 return 0; 5402 return 0;
5419#else 5403#else
5420 return 0; 5404 return 0;
@@ -5574,9 +5558,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5574 goto out_unlock; 5558 goto out_unlock;
5575 } 5559 }
5576 5560
5577 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 5561 if (skb_is_gso_v6(skb)) {
5578 hdrlen = skb_headlen(skb) - ETH_HLEN; 5562 hdrlen = skb_headlen(skb) - ETH_HLEN;
5579 else { 5563 } else {
5580 struct iphdr *iph = ip_hdr(skb); 5564 struct iphdr *iph = ip_hdr(skb);
5581 5565
5582 tcp_opt_len = tcp_optlen(skb); 5566 tcp_opt_len = tcp_optlen(skb);
@@ -5798,7 +5782,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5798 iph = ip_hdr(skb); 5782 iph = ip_hdr(skb);
5799 tcp_opt_len = tcp_optlen(skb); 5783 tcp_opt_len = tcp_optlen(skb);
5800 5784
5801 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { 5785 if (skb_is_gso_v6(skb)) {
5802 hdr_len = skb_headlen(skb) - ETH_HLEN; 5786 hdr_len = skb_headlen(skb) - ETH_HLEN;
5803 } else { 5787 } else {
5804 u32 ip_tcp_len; 5788 u32 ip_tcp_len;
@@ -6057,7 +6041,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
6057{ 6041{
6058 int i; 6042 int i;
6059 6043
6060 if (tpr != &tp->prodring[0]) { 6044 if (tpr != &tp->napi[0].prodring) {
6061 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 6045 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6062 i = (i + 1) % TG3_RX_RING_SIZE) 6046 i = (i + 1) % TG3_RX_RING_SIZE)
6063 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], 6047 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
@@ -6103,7 +6087,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6103 tpr->rx_jmb_cons_idx = 0; 6087 tpr->rx_jmb_cons_idx = 0;
6104 tpr->rx_jmb_prod_idx = 0; 6088 tpr->rx_jmb_prod_idx = 0;
6105 6089
6106 if (tpr != &tp->prodring[0]) { 6090 if (tpr != &tp->napi[0].prodring) {
6107 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); 6091 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
6108 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) 6092 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
6109 memset(&tpr->rx_jmb_buffers[0], 0, 6093 memset(&tpr->rx_jmb_buffers[0], 0,
@@ -6253,7 +6237,7 @@ static void tg3_free_rings(struct tg3 *tp)
6253 for (j = 0; j < tp->irq_cnt; j++) { 6237 for (j = 0; j < tp->irq_cnt; j++) {
6254 struct tg3_napi *tnapi = &tp->napi[j]; 6238 struct tg3_napi *tnapi = &tp->napi[j];
6255 6239
6256 tg3_rx_prodring_free(tp, &tp->prodring[j]); 6240 tg3_rx_prodring_free(tp, &tnapi->prodring);
6257 6241
6258 if (!tnapi->tx_buffers) 6242 if (!tnapi->tx_buffers)
6259 continue; 6243 continue;
@@ -6325,7 +6309,7 @@ static int tg3_init_rings(struct tg3 *tp)
6325 if (tnapi->rx_rcb) 6309 if (tnapi->rx_rcb)
6326 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 6310 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6327 6311
6328 if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) { 6312 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6329 tg3_free_rings(tp); 6313 tg3_free_rings(tp);
6330 return -ENOMEM; 6314 return -ENOMEM;
6331 } 6315 }
@@ -6361,6 +6345,8 @@ static void tg3_free_consistent(struct tg3 *tp)
6361 tnapi->rx_rcb = NULL; 6345 tnapi->rx_rcb = NULL;
6362 } 6346 }
6363 6347
6348 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6349
6364 if (tnapi->hw_status) { 6350 if (tnapi->hw_status) {
6365 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, 6351 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6366 tnapi->hw_status, 6352 tnapi->hw_status,
@@ -6374,9 +6360,6 @@ static void tg3_free_consistent(struct tg3 *tp)
6374 tp->hw_stats, tp->stats_mapping); 6360 tp->hw_stats, tp->stats_mapping);
6375 tp->hw_stats = NULL; 6361 tp->hw_stats = NULL;
6376 } 6362 }
6377
6378 for (i = 0; i < tp->irq_cnt; i++)
6379 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
6380} 6363}
6381 6364
6382/* 6365/*
@@ -6387,11 +6370,6 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6387{ 6370{
6388 int i; 6371 int i;
6389 6372
6390 for (i = 0; i < tp->irq_cnt; i++) {
6391 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6392 goto err_out;
6393 }
6394
6395 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6373 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6396 sizeof(struct tg3_hw_stats), 6374 sizeof(struct tg3_hw_stats),
6397 &tp->stats_mapping); 6375 &tp->stats_mapping);
@@ -6413,6 +6391,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6413 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 6391 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6414 sblk = tnapi->hw_status; 6392 sblk = tnapi->hw_status;
6415 6393
6394 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6395 goto err_out;
6396
6416 /* If multivector TSS is enabled, vector 0 does not handle 6397 /* If multivector TSS is enabled, vector 0 does not handle
6417 * tx interrupts. Don't allocate any resources for it. 6398 * tx interrupts. Don't allocate any resources for it.
6418 */ 6399 */
@@ -6452,8 +6433,6 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6452 break; 6433 break;
6453 } 6434 }
6454 6435
6455 tnapi->prodring = &tp->prodring[i];
6456
6457 /* 6436 /*
6458 * If multivector RSS is enabled, vector 0 does not handle 6437 * If multivector RSS is enabled, vector 0 does not handle
6459 * rx or tx interrupts. Don't allocate any resources for it. 6438 * rx or tx interrupts. Don't allocate any resources for it.
@@ -6596,6 +6575,10 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6596 int i; 6575 int i;
6597 u32 apedata; 6576 u32 apedata;
6598 6577
6578 /* NCSI does not support APE events */
6579 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
6580 return;
6581
6599 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 6582 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6600 if (apedata != APE_SEG_SIG_MAGIC) 6583 if (apedata != APE_SEG_SIG_MAGIC)
6601 return; 6584 return;
@@ -6647,6 +6630,8 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6647 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 6630 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6648 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 6631 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6649 APE_HOST_BEHAV_NO_PHYLOCK); 6632 APE_HOST_BEHAV_NO_PHYLOCK);
6633 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6634 TG3_APE_HOST_DRVR_STATE_START);
6650 6635
6651 event = APE_EVENT_STATUS_STATE_START; 6636 event = APE_EVENT_STATUS_STATE_START;
6652 break; 6637 break;
@@ -6658,6 +6643,16 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6658 */ 6643 */
6659 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); 6644 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6660 6645
6646 if (device_may_wakeup(&tp->pdev->dev) &&
6647 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
6648 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6649 TG3_APE_HOST_WOL_SPEED_AUTO);
6650 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6651 } else
6652 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6653
6654 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6655
6661 event = APE_EVENT_STATUS_STATE_UNLOAD; 6656 event = APE_EVENT_STATUS_STATE_UNLOAD;
6662 break; 6657 break;
6663 case RESET_KIND_SUSPEND: 6658 case RESET_KIND_SUSPEND:
@@ -7548,7 +7543,7 @@ static void tg3_rings_reset(struct tg3 *tp)
7548 7543
7549 /* Zero mailbox registers. */ 7544 /* Zero mailbox registers. */
7550 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) { 7545 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7551 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { 7546 for (i = 1; i < tp->irq_max; i++) {
7552 tp->napi[i].tx_prod = 0; 7547 tp->napi[i].tx_prod = 0;
7553 tp->napi[i].tx_cons = 0; 7548 tp->napi[i].tx_cons = 0;
7554 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 7549 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -7631,7 +7626,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7631{ 7626{
7632 u32 val, rdmac_mode; 7627 u32 val, rdmac_mode;
7633 int i, err, limit; 7628 int i, err, limit;
7634 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 7629 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7635 7630
7636 tg3_disable_ints(tp); 7631 tg3_disable_ints(tp);
7637 7632
@@ -8015,6 +8010,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 8010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8016 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 8011 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8017 8012
8013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8017 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8018 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8019 tw32(TG3_RDMA_RSRVCTRL_REG,
8020 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8021 }
8022
8018 /* Receive/send statistics. */ 8023 /* Receive/send statistics. */
8019 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 8024 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8020 val = tr32(RCVLPC_STATS_ENABLE); 8025 val = tr32(RCVLPC_STATS_ENABLE);
@@ -8817,16 +8822,9 @@ static bool tg3_enable_msix(struct tg3 *tp)
8817 tp->napi[i].irq_vec = msix_ent[i].vector; 8822 tp->napi[i].irq_vec = msix_ent[i].vector;
8818 8823
8819 tp->dev->real_num_tx_queues = 1; 8824 tp->dev->real_num_tx_queues = 1;
8820 if (tp->irq_cnt > 1) { 8825 if (tp->irq_cnt > 1)
8821 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; 8826 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8822 8827
8823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8824 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8825 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
8826 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8827 }
8828 }
8829
8830 return true; 8828 return true;
8831} 8829}
8832 8830
@@ -9867,7 +9865,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
9867 tp->rx_pending = 63; 9865 tp->rx_pending = 63;
9868 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 9866 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9869 9867
9870 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) 9868 for (i = 0; i < tp->irq_max; i++)
9871 tp->napi[i].tx_pending = ering->tx_pending; 9869 tp->napi[i].tx_pending = ering->tx_pending;
9872 9870
9873 if (netif_running(dev)) { 9871 if (netif_running(dev)) {
@@ -10608,7 +10606,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10608 int num_pkts, tx_len, rx_len, i, err; 10606 int num_pkts, tx_len, rx_len, i, err;
10609 struct tg3_rx_buffer_desc *desc; 10607 struct tg3_rx_buffer_desc *desc;
10610 struct tg3_napi *tnapi, *rnapi; 10608 struct tg3_napi *tnapi, *rnapi;
10611 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 10609 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10612 10610
10613 tnapi = &tp->napi[0]; 10611 tnapi = &tp->napi[0];
10614 rnapi = &tp->napi[0]; 10612 rnapi = &tp->napi[0];
@@ -12401,14 +12399,18 @@ skip_phy_reset:
12401 12399
12402static void __devinit tg3_read_vpd(struct tg3 *tp) 12400static void __devinit tg3_read_vpd(struct tg3 *tp)
12403{ 12401{
12404 u8 vpd_data[TG3_NVM_VPD_LEN]; 12402 u8 *vpd_data;
12405 unsigned int block_end, rosize, len; 12403 unsigned int block_end, rosize, len;
12406 int j, i = 0; 12404 int j, i = 0;
12407 u32 magic; 12405 u32 magic;
12408 12406
12409 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 12407 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12410 tg3_nvram_read(tp, 0x0, &magic)) 12408 tg3_nvram_read(tp, 0x0, &magic))
12411 goto out_not_found; 12409 goto out_no_vpd;
12410
12411 vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
12412 if (!vpd_data)
12413 goto out_no_vpd;
12412 12414
12413 if (magic == TG3_EEPROM_MAGIC) { 12415 if (magic == TG3_EEPROM_MAGIC) {
12414 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { 12416 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
@@ -12492,9 +12494,12 @@ partno:
12492 12494
12493 memcpy(tp->board_part_number, &vpd_data[i], len); 12495 memcpy(tp->board_part_number, &vpd_data[i], len);
12494 12496
12495 return;
12496
12497out_not_found: 12497out_not_found:
12498 kfree(vpd_data);
12499 if (!tp->board_part_number[0])
12500 return;
12501
12502out_no_vpd:
12498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12499 strcpy(tp->board_part_number, "BCM95906"); 12504 strcpy(tp->board_part_number, "BCM95906");
12500 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && 12505 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
@@ -12736,10 +12741,12 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12736 12741
12737 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 12742 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12738 12743
12739 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 12744 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
12745 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
12740 fwtype = "NCSI"; 12746 fwtype = "NCSI";
12741 else 12747 } else {
12742 fwtype = "DASH"; 12748 fwtype = "DASH";
12749 }
12743 12750
12744 vlen = strlen(tp->fw_ver); 12751 vlen = strlen(tp->fw_ver);
12745 12752
@@ -13410,10 +13417,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13410 if (err) 13417 if (err)
13411 return err; 13418 return err;
13412 13419
13413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
13414 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
13415 return -ENOTSUPP;
13416
13417 /* Initialize data/descriptor byte/word swapping. */ 13420 /* Initialize data/descriptor byte/word swapping. */
13418 val = tr32(GRC_MODE); 13421 val = tr32(GRC_MODE);
13419 val &= GRC_MODE_HOST_STACKUP; 13422 val &= GRC_MODE_HOST_STACKUP;
@@ -14442,7 +14445,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14442 } 14445 }
14443 14446
14444 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && 14447 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14445 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 && 14448 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
14446 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) 14449 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14447 dev->netdev_ops = &tg3_netdev_ops; 14450 dev->netdev_ops = &tg3_netdev_ops;
14448 else 14451 else
@@ -14581,7 +14584,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14581 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 14584 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14582 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 14585 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14583 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 14586 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14584 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { 14587 for (i = 0; i < tp->irq_max; i++) {
14585 struct tg3_napi *tnapi = &tp->napi[i]; 14588 struct tg3_napi *tnapi = &tp->napi[i];
14586 14589
14587 tnapi->tp = tp; 14590 tnapi->tp = tp;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4937bd190964..44733e4a68a2 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1302,7 +1302,11 @@
1302#define RDMAC_STATUS_FIFOURUN 0x00000080 1302#define RDMAC_STATUS_FIFOURUN 0x00000080
1303#define RDMAC_STATUS_FIFOOREAD 0x00000100 1303#define RDMAC_STATUS_FIFOOREAD 0x00000100
1304#define RDMAC_STATUS_LNGREAD 0x00000200 1304#define RDMAC_STATUS_LNGREAD 0x00000200
1305/* 0x4808 --> 0x4c00 unused */ 1305/* 0x4808 --> 0x4900 unused */
1306
1307#define TG3_RDMA_RSRVCTRL_REG 0x00004900
1308#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
1309/* 0x4904 --> 0x4c00 unused */
1306 1310
1307/* Write DMA control registers */ 1311/* Write DMA control registers */
1308#define WDMAC_MODE 0x00004c00 1312#define WDMAC_MODE 0x00004c00
@@ -2176,7 +2180,7 @@
2176#define TG3_APE_HOST_SEG_SIG 0x4200 2180#define TG3_APE_HOST_SEG_SIG 0x4200
2177#define APE_HOST_SEG_SIG_MAGIC 0x484f5354 2181#define APE_HOST_SEG_SIG_MAGIC 0x484f5354
2178#define TG3_APE_HOST_SEG_LEN 0x4204 2182#define TG3_APE_HOST_SEG_LEN 0x4204
2179#define APE_HOST_SEG_LEN_MAGIC 0x0000001c 2183#define APE_HOST_SEG_LEN_MAGIC 0x00000020
2180#define TG3_APE_HOST_INIT_COUNT 0x4208 2184#define TG3_APE_HOST_INIT_COUNT 0x4208
2181#define TG3_APE_HOST_DRIVER_ID 0x420c 2185#define TG3_APE_HOST_DRIVER_ID 0x420c
2182#define APE_HOST_DRIVER_ID_LINUX 0xf0000000 2186#define APE_HOST_DRIVER_ID_LINUX 0xf0000000
@@ -2188,6 +2192,12 @@
2188#define APE_HOST_HEARTBEAT_INT_DISABLE 0 2192#define APE_HOST_HEARTBEAT_INT_DISABLE 0
2189#define APE_HOST_HEARTBEAT_INT_5SEC 5000 2193#define APE_HOST_HEARTBEAT_INT_5SEC 5000
2190#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218 2194#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218
2195#define TG3_APE_HOST_DRVR_STATE 0x421c
2196#define TG3_APE_HOST_DRVR_STATE_START 0x00000001
2197#define TG3_APE_HOST_DRVR_STATE_UNLOAD 0x00000002
2198#define TG3_APE_HOST_DRVR_STATE_WOL 0x00000003
2199#define TG3_APE_HOST_WOL_SPEED 0x4224
2200#define TG3_APE_HOST_WOL_SPEED_AUTO 0x00008000
2191 2201
2192#define TG3_APE_EVENT_STATUS 0x4300 2202#define TG3_APE_EVENT_STATUS 0x4300
2193 2203
@@ -2649,7 +2659,8 @@ struct tg3_rx_prodring_set {
2649 dma_addr_t rx_jmb_mapping; 2659 dma_addr_t rx_jmb_mapping;
2650}; 2660};
2651 2661
2652#define TG3_IRQ_MAX_VECS 5 2662#define TG3_IRQ_MAX_VECS_RSS 5
2663#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS
2653 2664
2654struct tg3_napi { 2665struct tg3_napi {
2655 struct napi_struct napi ____cacheline_aligned; 2666 struct napi_struct napi ____cacheline_aligned;
@@ -2668,7 +2679,7 @@ struct tg3_napi {
2668 u32 consmbox; 2679 u32 consmbox;
2669 u32 rx_rcb_ptr; 2680 u32 rx_rcb_ptr;
2670 u16 *rx_rcb_prod_idx; 2681 u16 *rx_rcb_prod_idx;
2671 struct tg3_rx_prodring_set *prodring; 2682 struct tg3_rx_prodring_set prodring;
2672 2683
2673 struct tg3_rx_buffer_desc *rx_rcb; 2684 struct tg3_rx_buffer_desc *rx_rcb;
2674 struct tg3_tx_buffer_desc *tx_ring; 2685 struct tg3_tx_buffer_desc *tx_ring;
@@ -2755,8 +2766,6 @@ struct tg3 {
2755 struct vlan_group *vlgrp; 2766 struct vlan_group *vlgrp;
2756#endif 2767#endif
2757 2768
2758 struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS];
2759
2760 2769
2761 /* begin "everything else" cacheline(s) section */ 2770 /* begin "everything else" cacheline(s) section */
2762 struct rtnl_link_stats64 net_stats; 2771 struct rtnl_link_stats64 net_stats;
@@ -2850,6 +2859,7 @@ struct tg3 {
2850#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000 2859#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
2851#define TG3_FLG3_L1PLLPD_EN 0x00800000 2860#define TG3_FLG3_L1PLLPD_EN 0x00800000
2852#define TG3_FLG3_5717_PLUS 0x01000000 2861#define TG3_FLG3_5717_PLUS 0x01000000
2862#define TG3_FLG3_APE_HAS_NCSI 0x02000000
2853 2863
2854 struct timer_list timer; 2864 struct timer_list timer;
2855 u16 timer_counter; 2865 u16 timer_counter;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index ccee3eddc5f4..ec8c804a795d 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -393,7 +393,7 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
393 spin_unlock_irqrestore(&priv->lock, flags); 393 spin_unlock_irqrestore(&priv->lock, flags);
394 return; 394 return;
395 } 395 }
396 priv->timer.function = &TLan_Timer; 396 priv->timer.function = TLan_Timer;
397 if (!in_irq()) 397 if (!in_irq())
398 spin_unlock_irqrestore(&priv->lock, flags); 398 spin_unlock_irqrestore(&priv->lock, flags);
399 399
@@ -1453,7 +1453,7 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1453 TLan_DioWrite8( dev->base_addr, 1453 TLan_DioWrite8( dev->base_addr,
1454 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1454 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1455 if ( priv->timer.function == NULL ) { 1455 if ( priv->timer.function == NULL ) {
1456 priv->timer.function = &TLan_Timer; 1456 priv->timer.function = TLan_Timer;
1457 priv->timer.data = (unsigned long) dev; 1457 priv->timer.data = (unsigned long) dev;
1458 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1458 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1459 priv->timerSetAt = jiffies; 1459 priv->timerSetAt = jiffies;
@@ -1601,7 +1601,7 @@ drop_and_reuse:
1601 TLan_DioWrite8( dev->base_addr, 1601 TLan_DioWrite8( dev->base_addr,
1602 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1602 TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
1603 if ( priv->timer.function == NULL ) { 1603 if ( priv->timer.function == NULL ) {
1604 priv->timer.function = &TLan_Timer; 1604 priv->timer.function = TLan_Timer;
1605 priv->timer.data = (unsigned long) dev; 1605 priv->timer.data = (unsigned long) dev;
1606 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; 1606 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
1607 priv->timerSetAt = jiffies; 1607 priv->timerSetAt = jiffies;
@@ -1897,7 +1897,7 @@ static void TLan_Timer( unsigned long data )
1897 TLan_DioWrite8( dev->base_addr, 1897 TLan_DioWrite8( dev->base_addr,
1898 TLAN_LED_REG, TLAN_LED_LINK ); 1898 TLAN_LED_REG, TLAN_LED_LINK );
1899 } else { 1899 } else {
1900 priv->timer.function = &TLan_Timer; 1900 priv->timer.function = TLan_Timer;
1901 priv->timer.expires = priv->timerSetAt 1901 priv->timer.expires = priv->timerSetAt
1902 + TLAN_TIMER_ACT_DELAY; 1902 + TLAN_TIMER_ACT_DELAY;
1903 spin_unlock_irqrestore(&priv->lock, flags); 1903 spin_unlock_irqrestore(&priv->lock, flags);
@@ -3187,7 +3187,7 @@ static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
3187 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3187 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3188 } 3188 }
3189 3189
3190 return ( err ); 3190 return err;
3191 3191
3192} /* TLan_EeSendByte */ 3192} /* TLan_EeSendByte */
3193 3193
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index d13ff12d7500..3315ced774e2 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -442,7 +442,7 @@ typedef struct tlan_private_tag {
442static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr) 442static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
443{ 443{
444 outw(internal_addr, base_addr + TLAN_DIO_ADR); 444 outw(internal_addr, base_addr + TLAN_DIO_ADR);
445 return (inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3))); 445 return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
446 446
447} /* TLan_DioRead8 */ 447} /* TLan_DioRead8 */
448 448
@@ -452,7 +452,7 @@ static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
452static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr) 452static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
453{ 453{
454 outw(internal_addr, base_addr + TLAN_DIO_ADR); 454 outw(internal_addr, base_addr + TLAN_DIO_ADR);
455 return (inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2))); 455 return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
456 456
457} /* TLan_DioRead16 */ 457} /* TLan_DioRead16 */
458 458
@@ -462,7 +462,7 @@ static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
462static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr) 462static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
463{ 463{
464 outw(internal_addr, base_addr + TLAN_DIO_ADR); 464 outw(internal_addr, base_addr + TLAN_DIO_ADR);
465 return (inl(base_addr + TLAN_DIO_DATA)); 465 return inl(base_addr + TLAN_DIO_DATA);
466 466
467} /* TLan_DioRead32 */ 467} /* TLan_DioRead32 */
468 468
@@ -537,6 +537,6 @@ static inline u32 TLan_HashFunc( const u8 *a )
537 hash ^= ((a[2]^a[5])<<4); /* & 060 */ 537 hash ^= ((a[2]^a[5])<<4); /* & 060 */
538 hash ^= ((a[2]^a[5])>>2); /* & 077 */ 538 hash ^= ((a[2]^a[5])>>2); /* & 077 */
539 539
540 return (hash & 077); 540 return hash & 077;
541} 541}
542#endif 542#endif
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index 16e8783ee9cd..8d362e64a40e 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -110,7 +110,7 @@ static int __init proteon_probe1(struct net_device *dev, int ioaddr)
110 } 110 }
111 111
112 dev->base_addr = ioaddr; 112 dev->base_addr = ioaddr;
113 return (0); 113 return 0;
114nodev: 114nodev:
115 release_region(ioaddr, PROTEON_IO_EXTENT); 115 release_region(ioaddr, PROTEON_IO_EXTENT);
116 return -ENODEV; 116 return -ENODEV;
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 0929fff5982c..63db5a6762ae 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -435,7 +435,7 @@ static int smctr_alloc_shared_memory(struct net_device *dev)
435 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]); 435 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]);
436 tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); 436 tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
437 437
438 return (0); 438 return 0;
439} 439}
440 440
441/* Enter Bypass state. */ 441/* Enter Bypass state. */
@@ -448,7 +448,7 @@ static int smctr_bypass_state(struct net_device *dev)
448 448
449 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE); 449 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE);
450 450
451 return (err); 451 return err;
452} 452}
453 453
454static int smctr_checksum_firmware(struct net_device *dev) 454static int smctr_checksum_firmware(struct net_device *dev)
@@ -471,9 +471,9 @@ static int smctr_checksum_firmware(struct net_device *dev)
471 smctr_disable_adapter_ctrl_store(dev); 471 smctr_disable_adapter_ctrl_store(dev);
472 472
473 if(checksum) 473 if(checksum)
474 return (checksum); 474 return checksum;
475 475
476 return (0); 476 return 0;
477} 477}
478 478
479static int __init smctr_chk_mca(struct net_device *dev) 479static int __init smctr_chk_mca(struct net_device *dev)
@@ -485,7 +485,7 @@ static int __init smctr_chk_mca(struct net_device *dev)
485 485
486 current_slot = mca_find_unused_adapter(smctr_posid, 0); 486 current_slot = mca_find_unused_adapter(smctr_posid, 0);
487 if(current_slot == MCA_NOTFOUND) 487 if(current_slot == MCA_NOTFOUND)
488 return (-ENODEV); 488 return -ENODEV;
489 489
490 mca_set_adapter_name(current_slot, smctr_name); 490 mca_set_adapter_name(current_slot, smctr_name);
491 mca_mark_as_used(current_slot); 491 mca_mark_as_used(current_slot);
@@ -622,9 +622,9 @@ static int __init smctr_chk_mca(struct net_device *dev)
622 break; 622 break;
623 } 623 }
624 624
625 return (0); 625 return 0;
626#else 626#else
627 return (-1); 627 return -1;
628#endif /* CONFIG_MCA_LEGACY */ 628#endif /* CONFIG_MCA_LEGACY */
629} 629}
630 630
@@ -677,18 +677,18 @@ static int smctr_chg_rx_mask(struct net_device *dev)
677 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0, 677 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0,
678 &tp->config_word0))) 678 &tp->config_word0)))
679 { 679 {
680 return (err); 680 return err;
681 } 681 }
682 682
683 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1, 683 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1,
684 &tp->config_word1))) 684 &tp->config_word1)))
685 { 685 {
686 return (err); 686 return err;
687 } 687 }
688 688
689 smctr_disable_16bit(dev); 689 smctr_disable_16bit(dev);
690 690
691 return (0); 691 return 0;
692} 692}
693 693
694static int smctr_clear_int(struct net_device *dev) 694static int smctr_clear_int(struct net_device *dev)
@@ -697,7 +697,7 @@ static int smctr_clear_int(struct net_device *dev)
697 697
698 outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR); 698 outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR);
699 699
700 return (0); 700 return 0;
701} 701}
702 702
703static int smctr_clear_trc_reset(int ioaddr) 703static int smctr_clear_trc_reset(int ioaddr)
@@ -707,7 +707,7 @@ static int smctr_clear_trc_reset(int ioaddr)
707 r = inb(ioaddr + MSR); 707 r = inb(ioaddr + MSR);
708 outb(~MSR_RST & r, ioaddr + MSR); 708 outb(~MSR_RST & r, ioaddr + MSR);
709 709
710 return (0); 710 return 0;
711} 711}
712 712
713/* 713/*
@@ -725,7 +725,7 @@ static int smctr_close(struct net_device *dev)
725 725
726 /* Check to see if adapter is already in a closed state. */ 726 /* Check to see if adapter is already in a closed state. */
727 if(tp->status != OPEN) 727 if(tp->status != OPEN)
728 return (0); 728 return 0;
729 729
730 smctr_enable_16bit(dev); 730 smctr_enable_16bit(dev);
731 smctr_set_page(dev, (__u8 *)tp->ram_access); 731 smctr_set_page(dev, (__u8 *)tp->ram_access);
@@ -733,7 +733,7 @@ static int smctr_close(struct net_device *dev)
733 if((err = smctr_issue_remove_cmd(dev))) 733 if((err = smctr_issue_remove_cmd(dev)))
734 { 734 {
735 smctr_disable_16bit(dev); 735 smctr_disable_16bit(dev);
736 return (err); 736 return err;
737 } 737 }
738 738
739 for(;;) 739 for(;;)
@@ -746,7 +746,7 @@ static int smctr_close(struct net_device *dev)
746 } 746 }
747 747
748 748
749 return (0); 749 return 0;
750} 750}
751 751
752static int smctr_decode_firmware(struct net_device *dev, 752static int smctr_decode_firmware(struct net_device *dev,
@@ -807,12 +807,12 @@ static int smctr_decode_firmware(struct net_device *dev,
807 if(buff) 807 if(buff)
808 *(mem++) = SWAP_BYTES(buff); 808 *(mem++) = SWAP_BYTES(buff);
809 809
810 return (0); 810 return 0;
811} 811}
812 812
813static int smctr_disable_16bit(struct net_device *dev) 813static int smctr_disable_16bit(struct net_device *dev)
814{ 814{
815 return (0); 815 return 0;
816} 816}
817 817
818/* 818/*
@@ -832,7 +832,7 @@ static int smctr_disable_adapter_ctrl_store(struct net_device *dev)
832 tp->trc_mask |= CSR_WCSS; 832 tp->trc_mask |= CSR_WCSS;
833 outb(tp->trc_mask, ioaddr + CSR); 833 outb(tp->trc_mask, ioaddr + CSR);
834 834
835 return (0); 835 return 0;
836} 836}
837 837
838static int smctr_disable_bic_int(struct net_device *dev) 838static int smctr_disable_bic_int(struct net_device *dev)
@@ -844,7 +844,7 @@ static int smctr_disable_bic_int(struct net_device *dev)
844 | CSR_MSKTINT | CSR_WCSS; 844 | CSR_MSKTINT | CSR_WCSS;
845 outb(tp->trc_mask, ioaddr + CSR); 845 outb(tp->trc_mask, ioaddr + CSR);
846 846
847 return (0); 847 return 0;
848} 848}
849 849
850static int smctr_enable_16bit(struct net_device *dev) 850static int smctr_enable_16bit(struct net_device *dev)
@@ -858,7 +858,7 @@ static int smctr_enable_16bit(struct net_device *dev)
858 outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR); 858 outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR);
859 } 859 }
860 860
861 return (0); 861 return 0;
862} 862}
863 863
864/* 864/*
@@ -881,7 +881,7 @@ static int smctr_enable_adapter_ctrl_store(struct net_device *dev)
881 tp->trc_mask &= ~CSR_WCSS; 881 tp->trc_mask &= ~CSR_WCSS;
882 outb(tp->trc_mask, ioaddr + CSR); 882 outb(tp->trc_mask, ioaddr + CSR);
883 883
884 return (0); 884 return 0;
885} 885}
886 886
887static int smctr_enable_adapter_ram(struct net_device *dev) 887static int smctr_enable_adapter_ram(struct net_device *dev)
@@ -895,7 +895,7 @@ static int smctr_enable_adapter_ram(struct net_device *dev)
895 r = inb(ioaddr + MSR); 895 r = inb(ioaddr + MSR);
896 outb(MSR_MEMB | r, ioaddr + MSR); 896 outb(MSR_MEMB | r, ioaddr + MSR);
897 897
898 return (0); 898 return 0;
899} 899}
900 900
901static int smctr_enable_bic_int(struct net_device *dev) 901static int smctr_enable_bic_int(struct net_device *dev)
@@ -921,7 +921,7 @@ static int smctr_enable_bic_int(struct net_device *dev)
921 break; 921 break;
922 } 922 }
923 923
924 return (0); 924 return 0;
925} 925}
926 926
927static int __init smctr_chk_isa(struct net_device *dev) 927static int __init smctr_chk_isa(struct net_device *dev)
@@ -1145,7 +1145,7 @@ static int __init smctr_chk_isa(struct net_device *dev)
1145 */ 1145 */
1146 } 1146 }
1147 1147
1148 return (0); 1148 return 0;
1149 1149
1150out2: 1150out2:
1151 release_region(ioaddr, SMCTR_IO_EXTENT); 1151 release_region(ioaddr, SMCTR_IO_EXTENT);
@@ -1199,7 +1199,7 @@ static int __init smctr_get_boardid(struct net_device *dev, int mca)
1199 * return; 1199 * return;
1200 */ 1200 */
1201 if(IdByte & 0xF8) 1201 if(IdByte & 0xF8)
1202 return (-1); 1202 return -1;
1203 1203
1204 r1 = inb(ioaddr + BID_REG_1); 1204 r1 = inb(ioaddr + BID_REG_1);
1205 r1 &= BID_ICR_MASK; 1205 r1 &= BID_ICR_MASK;
@@ -1250,21 +1250,21 @@ static int __init smctr_get_boardid(struct net_device *dev, int mca)
1250 while(r1 & BID_RECALL_DONE_MASK) 1250 while(r1 & BID_RECALL_DONE_MASK)
1251 r1 = inb(ioaddr + BID_REG_1); 1251 r1 = inb(ioaddr + BID_REG_1);
1252 1252
1253 return (BoardIdMask); 1253 return BoardIdMask;
1254} 1254}
1255 1255
1256static int smctr_get_group_address(struct net_device *dev) 1256static int smctr_get_group_address(struct net_device *dev)
1257{ 1257{
1258 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR); 1258 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR);
1259 1259
1260 return(smctr_wait_cmd(dev)); 1260 return smctr_wait_cmd(dev);
1261} 1261}
1262 1262
1263static int smctr_get_functional_address(struct net_device *dev) 1263static int smctr_get_functional_address(struct net_device *dev)
1264{ 1264{
1265 smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR); 1265 smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR);
1266 1266
1267 return(smctr_wait_cmd(dev)); 1267 return smctr_wait_cmd(dev);
1268} 1268}
1269 1269
1270/* Calculate number of Non-MAC receive BDB's and data buffers. 1270/* Calculate number of Non-MAC receive BDB's and data buffers.
@@ -1346,14 +1346,14 @@ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
1346 */ 1346 */
1347 mem_used += 0x100; 1347 mem_used += 0x100;
1348 1348
1349 return((0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock))); 1349 return (0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock));
1350} 1350}
1351 1351
1352static int smctr_get_physical_drop_number(struct net_device *dev) 1352static int smctr_get_physical_drop_number(struct net_device *dev)
1353{ 1353{
1354 smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER); 1354 smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER);
1355 1355
1356 return(smctr_wait_cmd(dev)); 1356 return smctr_wait_cmd(dev);
1357} 1357}
1358 1358
1359static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue) 1359static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
@@ -1366,14 +1366,14 @@ static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
1366 1366
1367 tp->rx_fcb_curr[queue]->bdb_ptr = bdb; 1367 tp->rx_fcb_curr[queue]->bdb_ptr = bdb;
1368 1368
1369 return ((__u8 *)bdb->data_block_ptr); 1369 return (__u8 *)bdb->data_block_ptr;
1370} 1370}
1371 1371
1372static int smctr_get_station_id(struct net_device *dev) 1372static int smctr_get_station_id(struct net_device *dev)
1373{ 1373{
1374 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS); 1374 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS);
1375 1375
1376 return(smctr_wait_cmd(dev)); 1376 return smctr_wait_cmd(dev);
1377} 1377}
1378 1378
1379/* 1379/*
@@ -1384,7 +1384,7 @@ static struct net_device_stats *smctr_get_stats(struct net_device *dev)
1384{ 1384{
1385 struct net_local *tp = netdev_priv(dev); 1385 struct net_local *tp = netdev_priv(dev);
1386 1386
1387 return ((struct net_device_stats *)&tp->MacStat); 1387 return (struct net_device_stats *)&tp->MacStat;
1388} 1388}
1389 1389
1390static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, 1390static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
@@ -1401,14 +1401,14 @@ static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
1401 1401
1402 /* check if there is enough FCB blocks */ 1402 /* check if there is enough FCB blocks */
1403 if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue]) 1403 if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue])
1404 return ((FCBlock *)(-1L)); 1404 return (FCBlock *)(-1L);
1405 1405
1406 /* round off the input pkt size to the nearest even number */ 1406 /* round off the input pkt size to the nearest even number */
1407 alloc_size = (bytes_count + 1) & 0xfffe; 1407 alloc_size = (bytes_count + 1) & 0xfffe;
1408 1408
1409 /* check if enough mem */ 1409 /* check if enough mem */
1410 if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue]) 1410 if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue])
1411 return ((FCBlock *)(-1L)); 1411 return (FCBlock *)(-1L);
1412 1412
1413 /* check if past the end ; 1413 /* check if past the end ;
1414 * if exactly enough mem to end of ring, alloc from front. 1414 * if exactly enough mem to end of ring, alloc from front.
@@ -1425,7 +1425,7 @@ static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
1425 if((tp->tx_buff_used[queue] + alloc_size) 1425 if((tp->tx_buff_used[queue] + alloc_size)
1426 > tp->tx_buff_size[queue]) 1426 > tp->tx_buff_size[queue])
1427 { 1427 {
1428 return ((FCBlock *)(-1L)); 1428 return (FCBlock *)(-1L);
1429 } 1429 }
1430 1430
1431 /* ring wrap */ 1431 /* ring wrap */
@@ -1448,14 +1448,14 @@ static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
1448 pFCB = tp->tx_fcb_curr[queue]; 1448 pFCB = tp->tx_fcb_curr[queue];
1449 tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr; 1449 tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr;
1450 1450
1451 return (pFCB); 1451 return pFCB;
1452} 1452}
1453 1453
1454static int smctr_get_upstream_neighbor_addr(struct net_device *dev) 1454static int smctr_get_upstream_neighbor_addr(struct net_device *dev)
1455{ 1455{
1456 smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS); 1456 smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS);
1457 1457
1458 return(smctr_wait_cmd(dev)); 1458 return smctr_wait_cmd(dev);
1459} 1459}
1460 1460
1461static int smctr_hardware_send_packet(struct net_device *dev, 1461static int smctr_hardware_send_packet(struct net_device *dev,
@@ -1469,21 +1469,22 @@ static int smctr_hardware_send_packet(struct net_device *dev,
1469 printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name); 1469 printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name);
1470 1470
1471 if(tp->status != OPEN) 1471 if(tp->status != OPEN)
1472 return (-1); 1472 return -1;
1473 1473
1474 if(tp->monitor_state_ready != 1) 1474 if(tp->monitor_state_ready != 1)
1475 return (-1); 1475 return -1;
1476 1476
1477 for(;;) 1477 for(;;)
1478 { 1478 {
1479 /* Send first buffer from queue */ 1479 /* Send first buffer from queue */
1480 skb = skb_dequeue(&tp->SendSkbQueue); 1480 skb = skb_dequeue(&tp->SendSkbQueue);
1481 if(skb == NULL) 1481 if(skb == NULL)
1482 return (-1); 1482 return -1;
1483 1483
1484 tp->QueueSkb++; 1484 tp->QueueSkb++;
1485 1485
1486 if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size) return (-1); 1486 if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size)
1487 return -1;
1487 1488
1488 smctr_enable_16bit(dev); 1489 smctr_enable_16bit(dev);
1489 smctr_set_page(dev, (__u8 *)tp->ram_access); 1490 smctr_set_page(dev, (__u8 *)tp->ram_access);
@@ -1492,7 +1493,7 @@ static int smctr_hardware_send_packet(struct net_device *dev,
1492 == (FCBlock *)(-1L)) 1493 == (FCBlock *)(-1L))
1493 { 1494 {
1494 smctr_disable_16bit(dev); 1495 smctr_disable_16bit(dev);
1495 return (-1); 1496 return -1;
1496 } 1497 }
1497 1498
1498 smctr_tx_move_frame(dev, skb, 1499 smctr_tx_move_frame(dev, skb,
@@ -1508,7 +1509,7 @@ static int smctr_hardware_send_packet(struct net_device *dev,
1508 smctr_disable_16bit(dev); 1509 smctr_disable_16bit(dev);
1509 } 1510 }
1510 1511
1511 return (0); 1512 return 0;
1512} 1513}
1513 1514
1514static int smctr_init_acbs(struct net_device *dev) 1515static int smctr_init_acbs(struct net_device *dev)
@@ -1552,7 +1553,7 @@ static int smctr_init_acbs(struct net_device *dev)
1552 tp->acb_curr = tp->acb_head->next_ptr; 1553 tp->acb_curr = tp->acb_head->next_ptr;
1553 tp->num_acbs_used = 0; 1554 tp->num_acbs_used = 0;
1554 1555
1555 return (0); 1556 return 0;
1556} 1557}
1557 1558
1558static int smctr_init_adapter(struct net_device *dev) 1559static int smctr_init_adapter(struct net_device *dev)
@@ -1590,13 +1591,14 @@ static int smctr_init_adapter(struct net_device *dev)
1590 1591
1591 if(smctr_checksum_firmware(dev)) 1592 if(smctr_checksum_firmware(dev))
1592 { 1593 {
1593 printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name); return (-ENOENT); 1594 printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name);
1595 return -ENOENT;
1594 } 1596 }
1595 1597
1596 if((err = smctr_ram_memory_test(dev))) 1598 if((err = smctr_ram_memory_test(dev)))
1597 { 1599 {
1598 printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name); 1600 printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name);
1599 return (-EIO); 1601 return -EIO;
1600 } 1602 }
1601 1603
1602 smctr_set_rx_look_ahead(dev); 1604 smctr_set_rx_look_ahead(dev);
@@ -1608,7 +1610,7 @@ static int smctr_init_adapter(struct net_device *dev)
1608 { 1610 {
1609 printk(KERN_ERR "%s: Initialization of card failed (%d)\n", 1611 printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
1610 dev->name, err); 1612 dev->name, err);
1611 return (-EINVAL); 1613 return -EINVAL;
1612 } 1614 }
1613 1615
1614 /* This routine clobbers the TRC's internal registers. */ 1616 /* This routine clobbers the TRC's internal registers. */
@@ -1616,7 +1618,7 @@ static int smctr_init_adapter(struct net_device *dev)
1616 { 1618 {
1617 printk(KERN_ERR "%s: Card failed internal self test (%d)\n", 1619 printk(KERN_ERR "%s: Card failed internal self test (%d)\n",
1618 dev->name, err); 1620 dev->name, err);
1619 return (-EINVAL); 1621 return -EINVAL;
1620 } 1622 }
1621 1623
1622 /* Re-Initialize adapter's internal registers */ 1624 /* Re-Initialize adapter's internal registers */
@@ -1625,17 +1627,17 @@ static int smctr_init_adapter(struct net_device *dev)
1625 { 1627 {
1626 printk(KERN_ERR "%s: Initialization of card failed (%d)\n", 1628 printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
1627 dev->name, err); 1629 dev->name, err);
1628 return (-EINVAL); 1630 return -EINVAL;
1629 } 1631 }
1630 1632
1631 smctr_enable_bic_int(dev); 1633 smctr_enable_bic_int(dev);
1632 1634
1633 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK))) 1635 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
1634 return (err); 1636 return err;
1635 1637
1636 smctr_disable_16bit(dev); 1638 smctr_disable_16bit(dev);
1637 1639
1638 return (0); 1640 return 0;
1639} 1641}
1640 1642
1641static int smctr_init_card_real(struct net_device *dev) 1643static int smctr_init_card_real(struct net_device *dev)
@@ -1703,15 +1705,15 @@ static int smctr_init_card_real(struct net_device *dev)
1703 smctr_init_shared_memory(dev); 1705 smctr_init_shared_memory(dev);
1704 1706
1705 if((err = smctr_issue_init_timers_cmd(dev))) 1707 if((err = smctr_issue_init_timers_cmd(dev)))
1706 return (err); 1708 return err;
1707 1709
1708 if((err = smctr_issue_init_txrx_cmd(dev))) 1710 if((err = smctr_issue_init_txrx_cmd(dev)))
1709 { 1711 {
1710 printk(KERN_ERR "%s: Hardware failure\n", dev->name); 1712 printk(KERN_ERR "%s: Hardware failure\n", dev->name);
1711 return (err); 1713 return err;
1712 } 1714 }
1713 1715
1714 return (0); 1716 return 0;
1715} 1717}
1716 1718
1717static int smctr_init_rx_bdbs(struct net_device *dev) 1719static int smctr_init_rx_bdbs(struct net_device *dev)
@@ -1763,7 +1765,7 @@ static int smctr_init_rx_bdbs(struct net_device *dev)
1763 tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr; 1765 tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr;
1764 } 1766 }
1765 1767
1766 return (0); 1768 return 0;
1767} 1769}
1768 1770
1769static int smctr_init_rx_fcbs(struct net_device *dev) 1771static int smctr_init_rx_fcbs(struct net_device *dev)
@@ -1813,7 +1815,7 @@ static int smctr_init_rx_fcbs(struct net_device *dev)
1813 tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr; 1815 tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr;
1814 } 1816 }
1815 1817
1816 return(0); 1818 return 0;
1817} 1819}
1818 1820
1819static int smctr_init_shared_memory(struct net_device *dev) 1821static int smctr_init_shared_memory(struct net_device *dev)
@@ -1871,7 +1873,7 @@ static int smctr_init_shared_memory(struct net_device *dev)
1871 smctr_init_rx_bdbs(dev); 1873 smctr_init_rx_bdbs(dev);
1872 smctr_init_rx_fcbs(dev); 1874 smctr_init_rx_fcbs(dev);
1873 1875
1874 return (0); 1876 return 0;
1875} 1877}
1876 1878
1877static int smctr_init_tx_bdbs(struct net_device *dev) 1879static int smctr_init_tx_bdbs(struct net_device *dev)
@@ -1901,7 +1903,7 @@ static int smctr_init_tx_bdbs(struct net_device *dev)
1901 tp->tx_bdb_head[i]->back_ptr = bdb; 1903 tp->tx_bdb_head[i]->back_ptr = bdb;
1902 } 1904 }
1903 1905
1904 return (0); 1906 return 0;
1905} 1907}
1906 1908
1907static int smctr_init_tx_fcbs(struct net_device *dev) 1909static int smctr_init_tx_fcbs(struct net_device *dev)
@@ -1940,7 +1942,7 @@ static int smctr_init_tx_fcbs(struct net_device *dev)
1940 tp->num_tx_fcbs_used[i] = 0; 1942 tp->num_tx_fcbs_used[i] = 0;
1941 } 1943 }
1942 1944
1943 return (0); 1945 return 0;
1944} 1946}
1945 1947
1946static int smctr_internal_self_test(struct net_device *dev) 1948static int smctr_internal_self_test(struct net_device *dev)
@@ -1949,33 +1951,33 @@ static int smctr_internal_self_test(struct net_device *dev)
1949 int err; 1951 int err;
1950 1952
1951 if((err = smctr_issue_test_internal_rom_cmd(dev))) 1953 if((err = smctr_issue_test_internal_rom_cmd(dev)))
1952 return (err); 1954 return err;
1953 1955
1954 if((err = smctr_wait_cmd(dev))) 1956 if((err = smctr_wait_cmd(dev)))
1955 return (err); 1957 return err;
1956 1958
1957 if(tp->acb_head->cmd_done_status & 0xff) 1959 if(tp->acb_head->cmd_done_status & 0xff)
1958 return (-1); 1960 return -1;
1959 1961
1960 if((err = smctr_issue_test_hic_cmd(dev))) 1962 if((err = smctr_issue_test_hic_cmd(dev)))
1961 return (err); 1963 return err;
1962 1964
1963 if((err = smctr_wait_cmd(dev))) 1965 if((err = smctr_wait_cmd(dev)))
1964 return (err); 1966 return err;
1965 1967
1966 if(tp->acb_head->cmd_done_status & 0xff) 1968 if(tp->acb_head->cmd_done_status & 0xff)
1967 return (-1); 1969 return -1;
1968 1970
1969 if((err = smctr_issue_test_mac_reg_cmd(dev))) 1971 if((err = smctr_issue_test_mac_reg_cmd(dev)))
1970 return (err); 1972 return err;
1971 1973
1972 if((err = smctr_wait_cmd(dev))) 1974 if((err = smctr_wait_cmd(dev)))
1973 return (err); 1975 return err;
1974 1976
1975 if(tp->acb_head->cmd_done_status & 0xff) 1977 if(tp->acb_head->cmd_done_status & 0xff)
1976 return (-1); 1978 return -1;
1977 1979
1978 return (0); 1980 return 0;
1979} 1981}
1980 1982
1981/* 1983/*
@@ -2468,14 +2470,14 @@ static int smctr_issue_enable_int_cmd(struct net_device *dev,
2468 int err; 2470 int err;
2469 2471
2470 if((err = smctr_wait_while_cbusy(dev))) 2472 if((err = smctr_wait_while_cbusy(dev)))
2471 return (err); 2473 return err;
2472 2474
2473 tp->sclb_ptr->int_mask_control = interrupt_enable_mask; 2475 tp->sclb_ptr->int_mask_control = interrupt_enable_mask;
2474 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK; 2476 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK;
2475 2477
2476 smctr_set_ctrl_attention(dev); 2478 smctr_set_ctrl_attention(dev);
2477 2479
2478 return (0); 2480 return 0;
2479} 2481}
2480 2482
2481static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits) 2483static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits)
@@ -2483,7 +2485,7 @@ static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ib
2483 struct net_local *tp = netdev_priv(dev); 2485 struct net_local *tp = netdev_priv(dev);
2484 2486
2485 if(smctr_wait_while_cbusy(dev)) 2487 if(smctr_wait_while_cbusy(dev))
2486 return (-1); 2488 return -1;
2487 2489
2488 tp->sclb_ptr->int_mask_control = ibits; 2490 tp->sclb_ptr->int_mask_control = ibits;
2489 tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0; 2491 tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0;
@@ -2491,7 +2493,7 @@ static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ib
2491 2493
2492 smctr_set_ctrl_attention(dev); 2494 smctr_set_ctrl_attention(dev);
2493 2495
2494 return (0); 2496 return 0;
2495} 2497}
2496 2498
2497static int smctr_issue_init_timers_cmd(struct net_device *dev) 2499static int smctr_issue_init_timers_cmd(struct net_device *dev)
@@ -2502,10 +2504,10 @@ static int smctr_issue_init_timers_cmd(struct net_device *dev)
2502 __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data; 2504 __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data;
2503 2505
2504 if((err = smctr_wait_while_cbusy(dev))) 2506 if((err = smctr_wait_while_cbusy(dev)))
2505 return (err); 2507 return err;
2506 2508
2507 if((err = smctr_wait_cmd(dev))) 2509 if((err = smctr_wait_cmd(dev)))
2508 return (err); 2510 return err;
2509 2511
2510 tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE; 2512 tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE;
2511 tp->config_word1 = 0; 2513 tp->config_word1 = 0;
@@ -2648,7 +2650,7 @@ static int smctr_issue_init_timers_cmd(struct net_device *dev)
2648 2650
2649 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0); 2651 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0);
2650 2652
2651 return (err); 2653 return err;
2652} 2654}
2653 2655
2654static int smctr_issue_init_txrx_cmd(struct net_device *dev) 2656static int smctr_issue_init_txrx_cmd(struct net_device *dev)
@@ -2659,12 +2661,12 @@ static int smctr_issue_init_txrx_cmd(struct net_device *dev)
2659 void **txrx_ptrs = (void *)tp->misc_command_data; 2661 void **txrx_ptrs = (void *)tp->misc_command_data;
2660 2662
2661 if((err = smctr_wait_while_cbusy(dev))) 2663 if((err = smctr_wait_while_cbusy(dev)))
2662 return (err); 2664 return err;
2663 2665
2664 if((err = smctr_wait_cmd(dev))) 2666 if((err = smctr_wait_cmd(dev)))
2665 { 2667 {
2666 printk(KERN_ERR "%s: Hardware failure\n", dev->name); 2668 printk(KERN_ERR "%s: Hardware failure\n", dev->name);
2667 return (err); 2669 return err;
2668 } 2670 }
2669 2671
2670 /* Initialize Transmit Queue Pointers that are used, to point to 2672 /* Initialize Transmit Queue Pointers that are used, to point to
@@ -2695,7 +2697,7 @@ static int smctr_issue_init_txrx_cmd(struct net_device *dev)
2695 2697
2696 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0); 2698 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0);
2697 2699
2698 return (err); 2700 return err;
2699} 2701}
2700 2702
2701static int smctr_issue_insert_cmd(struct net_device *dev) 2703static int smctr_issue_insert_cmd(struct net_device *dev)
@@ -2704,7 +2706,7 @@ static int smctr_issue_insert_cmd(struct net_device *dev)
2704 2706
2705 err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP); 2707 err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP);
2706 2708
2707 return (err); 2709 return err;
2708} 2710}
2709 2711
2710static int smctr_issue_read_ring_status_cmd(struct net_device *dev) 2712static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
@@ -2712,15 +2714,15 @@ static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
2712 int err; 2714 int err;
2713 2715
2714 if((err = smctr_wait_while_cbusy(dev))) 2716 if((err = smctr_wait_while_cbusy(dev)))
2715 return (err); 2717 return err;
2716 2718
2717 if((err = smctr_wait_cmd(dev))) 2719 if((err = smctr_wait_cmd(dev)))
2718 return (err); 2720 return err;
2719 2721
2720 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS, 2722 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS,
2721 RW_TRC_STATUS_BLOCK); 2723 RW_TRC_STATUS_BLOCK);
2722 2724
2723 return (err); 2725 return err;
2724} 2726}
2725 2727
2726static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt) 2728static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
@@ -2728,15 +2730,15 @@ static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
2728 int err; 2730 int err;
2729 2731
2730 if((err = smctr_wait_while_cbusy(dev))) 2732 if((err = smctr_wait_while_cbusy(dev)))
2731 return (err); 2733 return err;
2732 2734
2733 if((err = smctr_wait_cmd(dev))) 2735 if((err = smctr_wait_cmd(dev)))
2734 return (err); 2736 return err;
2735 2737
2736 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE, 2738 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE,
2737 aword_cnt); 2739 aword_cnt);
2738 2740
2739 return (err); 2741 return err;
2740} 2742}
2741 2743
2742static int smctr_issue_remove_cmd(struct net_device *dev) 2744static int smctr_issue_remove_cmd(struct net_device *dev)
@@ -2745,14 +2747,14 @@ static int smctr_issue_remove_cmd(struct net_device *dev)
2745 int err; 2747 int err;
2746 2748
2747 if((err = smctr_wait_while_cbusy(dev))) 2749 if((err = smctr_wait_while_cbusy(dev)))
2748 return (err); 2750 return err;
2749 2751
2750 tp->sclb_ptr->resume_control = 0; 2752 tp->sclb_ptr->resume_control = 0;
2751 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE; 2753 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE;
2752 2754
2753 smctr_set_ctrl_attention(dev); 2755 smctr_set_ctrl_attention(dev);
2754 2756
2755 return (0); 2757 return 0;
2756} 2758}
2757 2759
2758static int smctr_issue_resume_acb_cmd(struct net_device *dev) 2760static int smctr_issue_resume_acb_cmd(struct net_device *dev)
@@ -2761,7 +2763,7 @@ static int smctr_issue_resume_acb_cmd(struct net_device *dev)
2761 int err; 2763 int err;
2762 2764
2763 if((err = smctr_wait_while_cbusy(dev))) 2765 if((err = smctr_wait_while_cbusy(dev)))
2764 return (err); 2766 return err;
2765 2767
2766 tp->sclb_ptr->resume_control = SCLB_RC_ACB; 2768 tp->sclb_ptr->resume_control = SCLB_RC_ACB;
2767 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID; 2769 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
@@ -2770,7 +2772,7 @@ static int smctr_issue_resume_acb_cmd(struct net_device *dev)
2770 2772
2771 smctr_set_ctrl_attention(dev); 2773 smctr_set_ctrl_attention(dev);
2772 2774
2773 return (0); 2775 return 0;
2774} 2776}
2775 2777
2776static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue) 2778static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
@@ -2779,7 +2781,7 @@ static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
2779 int err; 2781 int err;
2780 2782
2781 if((err = smctr_wait_while_cbusy(dev))) 2783 if((err = smctr_wait_while_cbusy(dev)))
2782 return (err); 2784 return err;
2783 2785
2784 if(queue == MAC_QUEUE) 2786 if(queue == MAC_QUEUE)
2785 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB; 2787 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB;
@@ -2790,7 +2792,7 @@ static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
2790 2792
2791 smctr_set_ctrl_attention(dev); 2793 smctr_set_ctrl_attention(dev);
2792 2794
2793 return (0); 2795 return 0;
2794} 2796}
2795 2797
2796static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue) 2798static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
@@ -2801,7 +2803,7 @@ static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
2801 printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name); 2803 printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name);
2802 2804
2803 if(smctr_wait_while_cbusy(dev)) 2805 if(smctr_wait_while_cbusy(dev))
2804 return (-1); 2806 return -1;
2805 2807
2806 if(queue == MAC_QUEUE) 2808 if(queue == MAC_QUEUE)
2807 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB; 2809 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB;
@@ -2812,7 +2814,7 @@ static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
2812 2814
2813 smctr_set_ctrl_attention(dev); 2815 smctr_set_ctrl_attention(dev);
2814 2816
2815 return (0); 2817 return 0;
2816} 2818}
2817 2819
2818static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue) 2820static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
@@ -2823,14 +2825,14 @@ static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
2823 printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name); 2825 printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name);
2824 2826
2825 if(smctr_wait_while_cbusy(dev)) 2827 if(smctr_wait_while_cbusy(dev))
2826 return (-1); 2828 return -1;
2827 2829
2828 tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue); 2830 tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue);
2829 tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID; 2831 tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID;
2830 2832
2831 smctr_set_ctrl_attention(dev); 2833 smctr_set_ctrl_attention(dev);
2832 2834
2833 return (0); 2835 return 0;
2834} 2836}
2835 2837
2836static int smctr_issue_test_internal_rom_cmd(struct net_device *dev) 2838static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
@@ -2840,7 +2842,7 @@ static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
2840 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, 2842 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2841 TRC_INTERNAL_ROM_TEST); 2843 TRC_INTERNAL_ROM_TEST);
2842 2844
2843 return (err); 2845 return err;
2844} 2846}
2845 2847
2846static int smctr_issue_test_hic_cmd(struct net_device *dev) 2848static int smctr_issue_test_hic_cmd(struct net_device *dev)
@@ -2850,7 +2852,7 @@ static int smctr_issue_test_hic_cmd(struct net_device *dev)
2850 err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST, 2852 err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST,
2851 TRC_HOST_INTERFACE_REG_TEST); 2853 TRC_HOST_INTERFACE_REG_TEST);
2852 2854
2853 return (err); 2855 return err;
2854} 2856}
2855 2857
2856static int smctr_issue_test_mac_reg_cmd(struct net_device *dev) 2858static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
@@ -2860,7 +2862,7 @@ static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
2860 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, 2862 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2861 TRC_MAC_REGISTERS_TEST); 2863 TRC_MAC_REGISTERS_TEST);
2862 2864
2863 return (err); 2865 return err;
2864} 2866}
2865 2867
2866static int smctr_issue_trc_loopback_cmd(struct net_device *dev) 2868static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
@@ -2870,7 +2872,7 @@ static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
2870 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, 2872 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2871 TRC_INTERNAL_LOOPBACK); 2873 TRC_INTERNAL_LOOPBACK);
2872 2874
2873 return (err); 2875 return err;
2874} 2876}
2875 2877
2876static int smctr_issue_tri_loopback_cmd(struct net_device *dev) 2878static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
@@ -2880,7 +2882,7 @@ static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
2880 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, 2882 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2881 TRC_TRI_LOOPBACK); 2883 TRC_TRI_LOOPBACK);
2882 2884
2883 return (err); 2885 return err;
2884} 2886}
2885 2887
2886static int smctr_issue_write_byte_cmd(struct net_device *dev, 2888static int smctr_issue_write_byte_cmd(struct net_device *dev,
@@ -2891,10 +2893,10 @@ static int smctr_issue_write_byte_cmd(struct net_device *dev,
2891 int err; 2893 int err;
2892 2894
2893 if((err = smctr_wait_while_cbusy(dev))) 2895 if((err = smctr_wait_while_cbusy(dev)))
2894 return (err); 2896 return err;
2895 2897
2896 if((err = smctr_wait_cmd(dev))) 2898 if((err = smctr_wait_cmd(dev)))
2897 return (err); 2899 return err;
2898 2900
2899 for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff); 2901 for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff);
2900 iword++, ibyte += 2) 2902 iword++, ibyte += 2)
@@ -2903,8 +2905,8 @@ static int smctr_issue_write_byte_cmd(struct net_device *dev,
2903 | (*((__u8 *)byte + ibyte + 1)); 2905 | (*((__u8 *)byte + ibyte + 1));
2904 } 2906 }
2905 2907
2906 return (smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE, 2908 return smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2907 aword_cnt)); 2909 aword_cnt);
2908} 2910}
2909 2911
2910static int smctr_issue_write_word_cmd(struct net_device *dev, 2912static int smctr_issue_write_word_cmd(struct net_device *dev,
@@ -2914,10 +2916,10 @@ static int smctr_issue_write_word_cmd(struct net_device *dev,
2914 unsigned int i, err; 2916 unsigned int i, err;
2915 2917
2916 if((err = smctr_wait_while_cbusy(dev))) 2918 if((err = smctr_wait_while_cbusy(dev)))
2917 return (err); 2919 return err;
2918 2920
2919 if((err = smctr_wait_cmd(dev))) 2921 if((err = smctr_wait_cmd(dev)))
2920 return (err); 2922 return err;
2921 2923
2922 for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++) 2924 for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++)
2923 tp->misc_command_data[i] = *((__u16 *)word + i); 2925 tp->misc_command_data[i] = *((__u16 *)word + i);
@@ -2925,7 +2927,7 @@ static int smctr_issue_write_word_cmd(struct net_device *dev,
2925 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE, 2927 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2926 aword_cnt); 2928 aword_cnt);
2927 2929
2928 return (err); 2930 return err;
2929} 2931}
2930 2932
2931static int smctr_join_complete_state(struct net_device *dev) 2933static int smctr_join_complete_state(struct net_device *dev)
@@ -2935,7 +2937,7 @@ static int smctr_join_complete_state(struct net_device *dev)
2935 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, 2937 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
2936 JS_JOIN_COMPLETE_STATE); 2938 JS_JOIN_COMPLETE_STATE);
2937 2939
2938 return (err); 2940 return err;
2939} 2941}
2940 2942
2941static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev) 2943static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
@@ -2959,7 +2961,7 @@ static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
2959 } 2961 }
2960 } 2962 }
2961 2963
2962 return (0); 2964 return 0;
2963} 2965}
2964 2966
2965static int smctr_load_firmware(struct net_device *dev) 2967static int smctr_load_firmware(struct net_device *dev)
@@ -2974,7 +2976,7 @@ static int smctr_load_firmware(struct net_device *dev)
2974 2976
2975 if (request_firmware(&fw, "tr_smctr.bin", &dev->dev)) { 2977 if (request_firmware(&fw, "tr_smctr.bin", &dev->dev)) {
2976 printk(KERN_ERR "%s: firmware not found\n", dev->name); 2978 printk(KERN_ERR "%s: firmware not found\n", dev->name);
2977 return (UCODE_NOT_PRESENT); 2979 return UCODE_NOT_PRESENT;
2978 } 2980 }
2979 2981
2980 tp->num_of_tx_buffs = 4; 2982 tp->num_of_tx_buffs = 4;
@@ -3036,7 +3038,7 @@ static int smctr_load_firmware(struct net_device *dev)
3036 smctr_disable_16bit(dev); 3038 smctr_disable_16bit(dev);
3037 out: 3039 out:
3038 release_firmware(fw); 3040 release_firmware(fw);
3039 return (err); 3041 return err;
3040} 3042}
3041 3043
3042static int smctr_load_node_addr(struct net_device *dev) 3044static int smctr_load_node_addr(struct net_device *dev)
@@ -3052,7 +3054,7 @@ static int smctr_load_node_addr(struct net_device *dev)
3052 } 3054 }
3053 dev->addr_len = 6; 3055 dev->addr_len = 6;
3054 3056
3055 return (0); 3057 return 0;
3056} 3058}
3057 3059
3058/* Lobe Media Test. 3060/* Lobe Media Test.
@@ -3146,14 +3148,14 @@ static int smctr_lobe_media_test_cmd(struct net_device *dev)
3146 if(smctr_wait_cmd(dev)) 3148 if(smctr_wait_cmd(dev))
3147 { 3149 {
3148 printk(KERN_ERR "Lobe Failed test state\n"); 3150 printk(KERN_ERR "Lobe Failed test state\n");
3149 return (LOBE_MEDIA_TEST_FAILED); 3151 return LOBE_MEDIA_TEST_FAILED;
3150 } 3152 }
3151 } 3153 }
3152 3154
3153 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, 3155 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
3154 TRC_LOBE_MEDIA_TEST); 3156 TRC_LOBE_MEDIA_TEST);
3155 3157
3156 return (err); 3158 return err;
3157} 3159}
3158 3160
3159static int smctr_lobe_media_test_state(struct net_device *dev) 3161static int smctr_lobe_media_test_state(struct net_device *dev)
@@ -3163,7 +3165,7 @@ static int smctr_lobe_media_test_state(struct net_device *dev)
3163 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, 3165 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
3164 JS_LOBE_TEST_STATE); 3166 JS_LOBE_TEST_STATE);
3165 3167
3166 return (err); 3168 return err;
3167} 3169}
3168 3170
3169static int smctr_make_8025_hdr(struct net_device *dev, 3171static int smctr_make_8025_hdr(struct net_device *dev,
@@ -3212,7 +3214,7 @@ static int smctr_make_8025_hdr(struct net_device *dev,
3212 break; 3214 break;
3213 } 3215 }
3214 3216
3215 return (0); 3217 return 0;
3216} 3218}
3217 3219
3218static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3220static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3225,7 +3227,7 @@ static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3225 tsv->svv[0] = MSB(tp->authorized_access_priority); 3227 tsv->svv[0] = MSB(tp->authorized_access_priority);
3226 tsv->svv[1] = LSB(tp->authorized_access_priority); 3228 tsv->svv[1] = LSB(tp->authorized_access_priority);
3227 3229
3228 return (0); 3230 return 0;
3229} 3231}
3230 3232
3231static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3233static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3236,7 +3238,7 @@ static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3236 tsv->svv[0] = 0; 3238 tsv->svv[0] = 0;
3237 tsv->svv[1] = 0; 3239 tsv->svv[1] = 0;
3238 3240
3239 return (0); 3241 return 0;
3240} 3242}
3241 3243
3242static int smctr_make_auth_funct_class(struct net_device *dev, 3244static int smctr_make_auth_funct_class(struct net_device *dev,
@@ -3250,7 +3252,7 @@ static int smctr_make_auth_funct_class(struct net_device *dev,
3250 tsv->svv[0] = MSB(tp->authorized_function_classes); 3252 tsv->svv[0] = MSB(tp->authorized_function_classes);
3251 tsv->svv[1] = LSB(tp->authorized_function_classes); 3253 tsv->svv[1] = LSB(tp->authorized_function_classes);
3252 3254
3253 return (0); 3255 return 0;
3254} 3256}
3255 3257
3256static int smctr_make_corr(struct net_device *dev, 3258static int smctr_make_corr(struct net_device *dev,
@@ -3262,7 +3264,7 @@ static int smctr_make_corr(struct net_device *dev,
3262 tsv->svv[0] = MSB(correlator); 3264 tsv->svv[0] = MSB(correlator);
3263 tsv->svv[1] = LSB(correlator); 3265 tsv->svv[1] = LSB(correlator);
3264 3266
3265 return (0); 3267 return 0;
3266} 3268}
3267 3269
3268static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3270static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3280,7 +3282,7 @@ static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3280 tsv->svv[2] = MSB(tp->misc_command_data[1]); 3282 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3281 tsv->svv[3] = LSB(tp->misc_command_data[1]); 3283 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3282 3284
3283 return (0); 3285 return 0;
3284} 3286}
3285 3287
3286static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3288static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3305,7 +3307,7 @@ static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3305 tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00) 3307 tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00)
3306 tsv->svv[0] = 0x00; 3308 tsv->svv[0] = 0x00;
3307 3309
3308 return (0); 3310 return 0;
3309} 3311}
3310 3312
3311static int smctr_make_phy_drop_num(struct net_device *dev, 3313static int smctr_make_phy_drop_num(struct net_device *dev,
@@ -3324,7 +3326,7 @@ static int smctr_make_phy_drop_num(struct net_device *dev,
3324 tsv->svv[2] = MSB(tp->misc_command_data[1]); 3326 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3325 tsv->svv[3] = LSB(tp->misc_command_data[1]); 3327 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3326 3328
3327 return (0); 3329 return 0;
3328} 3330}
3329 3331
3330static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3332static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3337,7 +3339,7 @@ static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3337 for(i = 0; i < 18; i++) 3339 for(i = 0; i < 18; i++)
3338 tsv->svv[i] = 0xF0; 3340 tsv->svv[i] = 0xF0;
3339 3341
3340 return (0); 3342 return 0;
3341} 3343}
3342 3344
3343static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3345static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3358,7 +3360,7 @@ static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3358 tsv->svv[4] = MSB(tp->misc_command_data[2]); 3360 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3359 tsv->svv[5] = LSB(tp->misc_command_data[2]); 3361 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3360 3362
3361 return (0); 3363 return 0;
3362} 3364}
3363 3365
3364static int smctr_make_ring_station_status(struct net_device *dev, 3366static int smctr_make_ring_station_status(struct net_device *dev,
@@ -3374,7 +3376,7 @@ static int smctr_make_ring_station_status(struct net_device *dev,
3374 tsv->svv[4] = 0; 3376 tsv->svv[4] = 0;
3375 tsv->svv[5] = 0; 3377 tsv->svv[5] = 0;
3376 3378
3377 return (0); 3379 return 0;
3378} 3380}
3379 3381
3380static int smctr_make_ring_station_version(struct net_device *dev, 3382static int smctr_make_ring_station_version(struct net_device *dev,
@@ -3400,7 +3402,7 @@ static int smctr_make_ring_station_version(struct net_device *dev,
3400 else 3402 else
3401 tsv->svv[9] = 0xc4; /* EBCDIC - D */ 3403 tsv->svv[9] = 0xc4; /* EBCDIC - D */
3402 3404
3403 return (0); 3405 return 0;
3404} 3406}
3405 3407
3406static int smctr_make_tx_status_code(struct net_device *dev, 3408static int smctr_make_tx_status_code(struct net_device *dev,
@@ -3414,7 +3416,7 @@ static int smctr_make_tx_status_code(struct net_device *dev,
3414 /* Stripped frame status of Transmitted Frame */ 3416 /* Stripped frame status of Transmitted Frame */
3415 tsv->svv[1] = tx_fstatus & 0xff; 3417 tsv->svv[1] = tx_fstatus & 0xff;
3416 3418
3417 return (0); 3419 return 0;
3418} 3420}
3419 3421
3420static int smctr_make_upstream_neighbor_addr(struct net_device *dev, 3422static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
@@ -3436,7 +3438,7 @@ static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
3436 tsv->svv[4] = MSB(tp->misc_command_data[2]); 3438 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3437 tsv->svv[5] = LSB(tp->misc_command_data[2]); 3439 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3438 3440
3439 return (0); 3441 return 0;
3440} 3442}
3441 3443
3442static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3444static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3444,7 +3446,7 @@ static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3444 tsv->svi = WRAP_DATA; 3446 tsv->svi = WRAP_DATA;
3445 tsv->svl = S_WRAP_DATA; 3447 tsv->svl = S_WRAP_DATA;
3446 3448
3447 return (0); 3449 return 0;
3448} 3450}
3449 3451
3450/* 3452/*
@@ -3464,9 +3466,9 @@ static int smctr_open(struct net_device *dev)
3464 3466
3465 err = smctr_init_adapter(dev); 3467 err = smctr_init_adapter(dev);
3466 if(err < 0) 3468 if(err < 0)
3467 return (err); 3469 return err;
3468 3470
3469 return (err); 3471 return err;
3470} 3472}
3471 3473
3472/* Interrupt driven open of Token card. */ 3474/* Interrupt driven open of Token card. */
@@ -3481,9 +3483,9 @@ static int smctr_open_tr(struct net_device *dev)
3481 3483
3482 /* Now we can actually open the adapter. */ 3484 /* Now we can actually open the adapter. */
3483 if(tp->status == OPEN) 3485 if(tp->status == OPEN)
3484 return (0); 3486 return 0;
3485 if(tp->status != INITIALIZED) 3487 if(tp->status != INITIALIZED)
3486 return (-1); 3488 return -1;
3487 3489
3488 /* FIXME: it would work a lot better if we masked the irq sources 3490 /* FIXME: it would work a lot better if we masked the irq sources
3489 on the card here, then we could skip the locking and poll nicely */ 3491 on the card here, then we could skip the locking and poll nicely */
@@ -3560,7 +3562,7 @@ static int smctr_open_tr(struct net_device *dev)
3560out: 3562out:
3561 spin_unlock_irqrestore(&tp->lock, flags); 3563 spin_unlock_irqrestore(&tp->lock, flags);
3562 3564
3563 return (err); 3565 return err;
3564} 3566}
3565 3567
3566/* Check for a network adapter of this type, 3568/* Check for a network adapter of this type,
@@ -3675,7 +3677,7 @@ static int __init smctr_probe1(struct net_device *dev, int ioaddr)
3675 3677
3676 dev->netdev_ops = &smctr_netdev_ops; 3678 dev->netdev_ops = &smctr_netdev_ops;
3677 dev->watchdog_timeo = HZ; 3679 dev->watchdog_timeo = HZ;
3678 return (0); 3680 return 0;
3679 3681
3680out: 3682out:
3681 return err; 3683 return err;
@@ -3699,13 +3701,13 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3699 case INIT: 3701 case INIT:
3700 if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED) 3702 if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED)
3701 { 3703 {
3702 return (rcode); 3704 return rcode;
3703 } 3705 }
3704 3706
3705 if((err = smctr_send_rsp(dev, rmf, rcode, 3707 if((err = smctr_send_rsp(dev, rmf, rcode,
3706 correlator))) 3708 correlator)))
3707 { 3709 {
3708 return (err); 3710 return err;
3709 } 3711 }
3710 break; 3712 break;
3711 3713
@@ -3713,13 +3715,13 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3713 if((rcode = smctr_rcv_chg_param(dev, rmf, 3715 if((rcode = smctr_rcv_chg_param(dev, rmf,
3714 &correlator)) ==HARDWARE_FAILED) 3716 &correlator)) ==HARDWARE_FAILED)
3715 { 3717 {
3716 return (rcode); 3718 return rcode;
3717 } 3719 }
3718 3720
3719 if((err = smctr_send_rsp(dev, rmf, rcode, 3721 if((err = smctr_send_rsp(dev, rmf, rcode,
3720 correlator))) 3722 correlator)))
3721 { 3723 {
3722 return (err); 3724 return err;
3723 } 3725 }
3724 break; 3726 break;
3725 3727
@@ -3728,16 +3730,16 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3728 rmf, &correlator)) != POSITIVE_ACK) 3730 rmf, &correlator)) != POSITIVE_ACK)
3729 { 3731 {
3730 if(rcode == HARDWARE_FAILED) 3732 if(rcode == HARDWARE_FAILED)
3731 return (rcode); 3733 return rcode;
3732 else 3734 else
3733 return (smctr_send_rsp(dev, rmf, 3735 return smctr_send_rsp(dev, rmf,
3734 rcode, correlator)); 3736 rcode, correlator);
3735 } 3737 }
3736 3738
3737 if((err = smctr_send_rpt_addr(dev, rmf, 3739 if((err = smctr_send_rpt_addr(dev, rmf,
3738 correlator))) 3740 correlator)))
3739 { 3741 {
3740 return (err); 3742 return err;
3741 } 3743 }
3742 break; 3744 break;
3743 3745
@@ -3746,17 +3748,17 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3746 rmf, &correlator)) != POSITIVE_ACK) 3748 rmf, &correlator)) != POSITIVE_ACK)
3747 { 3749 {
3748 if(rcode == HARDWARE_FAILED) 3750 if(rcode == HARDWARE_FAILED)
3749 return (rcode); 3751 return rcode;
3750 else 3752 else
3751 return (smctr_send_rsp(dev, rmf, 3753 return smctr_send_rsp(dev, rmf,
3752 rcode, 3754 rcode,
3753 correlator)); 3755 correlator);
3754 } 3756 }
3755 3757
3756 if((err = smctr_send_rpt_attch(dev, rmf, 3758 if((err = smctr_send_rpt_attch(dev, rmf,
3757 correlator))) 3759 correlator)))
3758 { 3760 {
3759 return (err); 3761 return err;
3760 } 3762 }
3761 break; 3763 break;
3762 3764
@@ -3765,17 +3767,17 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3765 rmf, &correlator)) != POSITIVE_ACK) 3767 rmf, &correlator)) != POSITIVE_ACK)
3766 { 3768 {
3767 if(rcode == HARDWARE_FAILED) 3769 if(rcode == HARDWARE_FAILED)
3768 return (rcode); 3770 return rcode;
3769 else 3771 else
3770 return (smctr_send_rsp(dev, rmf, 3772 return smctr_send_rsp(dev, rmf,
3771 rcode, 3773 rcode,
3772 correlator)); 3774 correlator);
3773 } 3775 }
3774 3776
3775 if((err = smctr_send_rpt_state(dev, rmf, 3777 if((err = smctr_send_rpt_state(dev, rmf,
3776 correlator))) 3778 correlator)))
3777 { 3779 {
3778 return (err); 3780 return err;
3779 } 3781 }
3780 break; 3782 break;
3781 3783
@@ -3786,17 +3788,17 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3786 != POSITIVE_ACK) 3788 != POSITIVE_ACK)
3787 { 3789 {
3788 if(rcode == HARDWARE_FAILED) 3790 if(rcode == HARDWARE_FAILED)
3789 return (rcode); 3791 return rcode;
3790 else 3792 else
3791 return (smctr_send_rsp(dev, rmf, 3793 return smctr_send_rsp(dev, rmf,
3792 rcode, 3794 rcode,
3793 correlator)); 3795 correlator);
3794 } 3796 }
3795 3797
3796 if((err = smctr_send_tx_forward(dev, rmf, 3798 if((err = smctr_send_tx_forward(dev, rmf,
3797 &tx_fstatus)) == HARDWARE_FAILED) 3799 &tx_fstatus)) == HARDWARE_FAILED)
3798 { 3800 {
3799 return (err); 3801 return err;
3800 } 3802 }
3801 3803
3802 if(err == A_FRAME_WAS_FORWARDED) 3804 if(err == A_FRAME_WAS_FORWARDED)
@@ -3805,7 +3807,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3805 rmf, tx_fstatus)) 3807 rmf, tx_fstatus))
3806 == HARDWARE_FAILED) 3808 == HARDWARE_FAILED)
3807 { 3809 {
3808 return (err); 3810 return err;
3809 } 3811 }
3810 } 3812 }
3811 break; 3813 break;
@@ -3834,7 +3836,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3834 if((err = smctr_send_rsp(dev, rmf,rcode, 3836 if((err = smctr_send_rsp(dev, rmf,rcode,
3835 correlator))) 3837 correlator)))
3836 { 3838 {
3837 return (err); 3839 return err;
3838 } 3840 }
3839 } 3841 }
3840 3842
@@ -3899,7 +3901,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3899 err = 0; 3901 err = 0;
3900 } 3902 }
3901 3903
3902 return (err); 3904 return err;
3903} 3905}
3904 3906
3905/* Adapter RAM test. Incremental word ODD boundary data test. */ 3907/* Adapter RAM test. Incremental word ODD boundary data test. */
@@ -3942,7 +3944,7 @@ static int smctr_ram_memory_test(struct net_device *dev)
3942 err_offset = j; 3944 err_offset = j;
3943 err_word = word_read; 3945 err_word = word_read;
3944 err_pattern = word_pattern; 3946 err_pattern = word_pattern;
3945 return (RAM_TEST_FAILED); 3947 return RAM_TEST_FAILED;
3946 } 3948 }
3947 } 3949 }
3948 } 3950 }
@@ -3966,14 +3968,14 @@ static int smctr_ram_memory_test(struct net_device *dev)
3966 err_offset = j; 3968 err_offset = j;
3967 err_word = word_read; 3969 err_word = word_read;
3968 err_pattern = word_pattern; 3970 err_pattern = word_pattern;
3969 return (RAM_TEST_FAILED); 3971 return RAM_TEST_FAILED;
3970 } 3972 }
3971 } 3973 }
3972 } 3974 }
3973 3975
3974 smctr_set_page(dev, (__u8 *)tp->ram_access); 3976 smctr_set_page(dev, (__u8 *)tp->ram_access);
3975 3977
3976 return (0); 3978 return 0;
3977} 3979}
3978 3980
3979static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf, 3981static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
@@ -3986,7 +3988,7 @@ static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
3986 3988
3987 /* This Frame can only come from a CRS */ 3989 /* This Frame can only come from a CRS */
3988 if((rmf->dc_sc & SC_MASK) != SC_CRS) 3990 if((rmf->dc_sc & SC_MASK) != SC_CRS)
3989 return(E_INAPPROPRIATE_SOURCE_CLASS); 3991 return E_INAPPROPRIATE_SOURCE_CLASS;
3990 3992
3991 /* Remove MVID Length from total length. */ 3993 /* Remove MVID Length from total length. */
3992 vlen = (signed short)rmf->vl - 4; 3994 vlen = (signed short)rmf->vl - 4;
@@ -4058,7 +4060,7 @@ static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
4058 } 4060 }
4059 } 4061 }
4060 4062
4061 return (rcode); 4063 return rcode;
4062} 4064}
4063 4065
4064static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf, 4066static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
@@ -4071,7 +4073,7 @@ static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
4071 4073
4072 /* This Frame can only come from a RPS */ 4074 /* This Frame can only come from a RPS */
4073 if((rmf->dc_sc & SC_MASK) != SC_RPS) 4075 if((rmf->dc_sc & SC_MASK) != SC_RPS)
4074 return (E_INAPPROPRIATE_SOURCE_CLASS); 4076 return E_INAPPROPRIATE_SOURCE_CLASS;
4075 4077
4076 /* Remove MVID Length from total length. */ 4078 /* Remove MVID Length from total length. */
4077 vlen = (signed short)rmf->vl - 4; 4079 vlen = (signed short)rmf->vl - 4;
@@ -4133,7 +4135,7 @@ static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
4133 } 4135 }
4134 } 4136 }
4135 4137
4136 return (rcode); 4138 return rcode;
4137} 4139}
4138 4140
4139static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf) 4141static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
@@ -4145,7 +4147,7 @@ static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
4145 4147
4146 /* This Frame can only come from a CRS */ 4148 /* This Frame can only come from a CRS */
4147 if((rmf->dc_sc & SC_MASK) != SC_CRS) 4149 if((rmf->dc_sc & SC_MASK) != SC_CRS)
4148 return (E_INAPPROPRIATE_SOURCE_CLASS); 4150 return E_INAPPROPRIATE_SOURCE_CLASS;
4149 4151
4150 /* Remove MVID Length from total length */ 4152 /* Remove MVID Length from total length */
4151 vlen = (signed short)rmf->vl - 4; 4153 vlen = (signed short)rmf->vl - 4;
@@ -4193,7 +4195,7 @@ static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
4193 } 4195 }
4194 } 4196 }
4195 4197
4196 return (rcode); 4198 return rcode;
4197} 4199}
4198 4200
4199static int smctr_rcv_rq_addr_state_attch(struct net_device *dev, 4201static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
@@ -4250,7 +4252,7 @@ static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
4250 } 4252 }
4251 } 4253 }
4252 4254
4253 return (rcode); 4255 return rcode;
4254} 4256}
4255 4257
4256static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf, 4258static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
@@ -4284,7 +4286,7 @@ static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
4284 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); 4286 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4285 } 4287 }
4286 4288
4287 return (E_UNRECOGNIZED_VECTOR_ID); 4289 return E_UNRECOGNIZED_VECTOR_ID;
4288} 4290}
4289 4291
4290/* 4292/*
@@ -4311,7 +4313,7 @@ static int smctr_reset_adapter(struct net_device *dev)
4311 */ 4313 */
4312 outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR); 4314 outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR);
4313 4315
4314 return (0); 4316 return 0;
4315} 4317}
4316 4318
4317static int smctr_restart_tx_chain(struct net_device *dev, short queue) 4319static int smctr_restart_tx_chain(struct net_device *dev, short queue)
@@ -4329,7 +4331,7 @@ static int smctr_restart_tx_chain(struct net_device *dev, short queue)
4329 err = smctr_issue_resume_tx_fcb_cmd(dev, queue); 4331 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
4330 } 4332 }
4331 4333
4332 return (err); 4334 return err;
4333} 4335}
4334 4336
4335static int smctr_ring_status_chg(struct net_device *dev) 4337static int smctr_ring_status_chg(struct net_device *dev)
@@ -4371,7 +4373,7 @@ static int smctr_ring_status_chg(struct net_device *dev)
4371 } 4373 }
4372 4374
4373 if(!(tp->ring_status_flags & RING_STATUS_CHANGED)) 4375 if(!(tp->ring_status_flags & RING_STATUS_CHANGED))
4374 return (0); 4376 return 0;
4375 4377
4376 switch(tp->ring_status) 4378 switch(tp->ring_status)
4377 { 4379 {
@@ -4421,7 +4423,7 @@ static int smctr_ring_status_chg(struct net_device *dev)
4421 break; 4423 break;
4422 } 4424 }
4423 4425
4424 return (0); 4426 return 0;
4425} 4427}
4426 4428
4427static int smctr_rx_frame(struct net_device *dev) 4429static int smctr_rx_frame(struct net_device *dev)
@@ -4486,7 +4488,7 @@ static int smctr_rx_frame(struct net_device *dev)
4486 break; 4488 break;
4487 } 4489 }
4488 4490
4489 return (err); 4491 return err;
4490} 4492}
4491 4493
4492static int smctr_send_dat(struct net_device *dev) 4494static int smctr_send_dat(struct net_device *dev)
@@ -4502,7 +4504,7 @@ static int smctr_send_dat(struct net_device *dev)
4502 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 4504 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE,
4503 sizeof(MAC_HEADER))) == (FCBlock *)(-1L)) 4505 sizeof(MAC_HEADER))) == (FCBlock *)(-1L))
4504 { 4506 {
4505 return (OUT_OF_RESOURCES); 4507 return OUT_OF_RESOURCES;
4506 } 4508 }
4507 4509
4508 /* Initialize DAT Data Fields. */ 4510 /* Initialize DAT Data Fields. */
@@ -4524,7 +4526,7 @@ static int smctr_send_dat(struct net_device *dev)
4524 4526
4525 /* Start Transmit. */ 4527 /* Start Transmit. */
4526 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) 4528 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4527 return (err); 4529 return err;
4528 4530
4529 /* Wait for Transmit to Complete */ 4531 /* Wait for Transmit to Complete */
4530 for(i = 0; i < 10000; i++) 4532 for(i = 0; i < 10000; i++)
@@ -4538,7 +4540,7 @@ static int smctr_send_dat(struct net_device *dev)
4538 if(!(fcb->frame_status & FCB_COMMAND_DONE) || 4540 if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
4539 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS)) 4541 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4540 { 4542 {
4541 return (INITIALIZE_FAILED); 4543 return INITIALIZE_FAILED;
4542 } 4544 }
4543 4545
4544 /* De-allocated Tx FCB and Frame Buffer 4546 /* De-allocated Tx FCB and Frame Buffer
@@ -4549,7 +4551,7 @@ static int smctr_send_dat(struct net_device *dev)
4549 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; 4551 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4550 smctr_update_tx_chain(dev, fcb, MAC_QUEUE); 4552 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4551 4553
4552 return (0); 4554 return 0;
4553} 4555}
4554 4556
4555static void smctr_timeout(struct net_device *dev) 4557static void smctr_timeout(struct net_device *dev)
@@ -4610,7 +4612,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
4610 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr) 4612 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr)
4611 + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L)) 4613 + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L))
4612 { 4614 {
4613 return (OUT_OF_RESOURCES); 4615 return OUT_OF_RESOURCES;
4614 } 4616 }
4615 4617
4616 /* Initialize DAT Data Fields. */ 4618 /* Initialize DAT Data Fields. */
@@ -4639,7 +4641,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
4639 /* Start Transmit. */ 4641 /* Start Transmit. */
4640 tmf->vl = SWAP_BYTES(tmf->vl); 4642 tmf->vl = SWAP_BYTES(tmf->vl);
4641 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) 4643 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4642 return (err); 4644 return err;
4643 4645
4644 /* Wait for Transmit to Complete. (10 ms). */ 4646 /* Wait for Transmit to Complete. (10 ms). */
4645 for(i=0; i < 10000; i++) 4647 for(i=0; i < 10000; i++)
@@ -4653,7 +4655,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
4653 if(!(fcb->frame_status & FCB_COMMAND_DONE) || 4655 if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
4654 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS)) 4656 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4655 { 4657 {
4656 return (LOBE_MEDIA_TEST_FAILED); 4658 return LOBE_MEDIA_TEST_FAILED;
4657 } 4659 }
4658 4660
4659 /* De-allocated Tx FCB and Frame Buffer 4661 /* De-allocated Tx FCB and Frame Buffer
@@ -4664,7 +4666,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
4664 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; 4666 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4665 smctr_update_tx_chain(dev, fcb, MAC_QUEUE); 4667 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4666 4668
4667 return (0); 4669 return 0;
4668} 4670}
4669 4671
4670static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf, 4672static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
@@ -4679,7 +4681,7 @@ static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
4679 + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS)) 4681 + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS))
4680 == (FCBlock *)(-1L)) 4682 == (FCBlock *)(-1L))
4681 { 4683 {
4682 return (0); 4684 return 0;
4683 } 4685 }
4684 4686
4685 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4687 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4722,7 +4724,7 @@ static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
4722*/ 4724*/
4723 tmf->vl = SWAP_BYTES(tmf->vl); 4725 tmf->vl = SWAP_BYTES(tmf->vl);
4724 4726
4725 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); 4727 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4726} 4728}
4727 4729
4728static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf, 4730static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
@@ -4737,7 +4739,7 @@ static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
4737 + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY)) 4739 + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY))
4738 == (FCBlock *)(-1L)) 4740 == (FCBlock *)(-1L))
4739 { 4741 {
4740 return (0); 4742 return 0;
4741 } 4743 }
4742 4744
4743 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4745 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4776,7 +4778,7 @@ static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
4776*/ 4778*/
4777 tmf->vl = SWAP_BYTES(tmf->vl); 4779 tmf->vl = SWAP_BYTES(tmf->vl);
4778 4780
4779 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); 4781 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4780} 4782}
4781 4783
4782static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf, 4784static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
@@ -4791,7 +4793,7 @@ static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
4791 + S_RING_STATION_STATUS + S_STATION_IDENTIFER)) 4793 + S_RING_STATION_STATUS + S_STATION_IDENTIFER))
4792 == (FCBlock *)(-1L)) 4794 == (FCBlock *)(-1L))
4793 { 4795 {
4794 return (0); 4796 return 0;
4795 } 4797 }
4796 4798
4797 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4799 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4826,7 +4828,7 @@ static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
4826*/ 4828*/
4827 tmf->vl = SWAP_BYTES(tmf->vl); 4829 tmf->vl = SWAP_BYTES(tmf->vl);
4828 4830
4829 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); 4831 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4830} 4832}
4831 4833
4832static int smctr_send_rpt_tx_forward(struct net_device *dev, 4834static int smctr_send_rpt_tx_forward(struct net_device *dev,
@@ -4839,7 +4841,7 @@ static int smctr_send_rpt_tx_forward(struct net_device *dev,
4839 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) 4841 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4840 + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L)) 4842 + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L))
4841 { 4843 {
4842 return (0); 4844 return 0;
4843 } 4845 }
4844 4846
4845 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4847 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4862,7 +4864,7 @@ static int smctr_send_rpt_tx_forward(struct net_device *dev,
4862*/ 4864*/
4863 tmf->vl = SWAP_BYTES(tmf->vl); 4865 tmf->vl = SWAP_BYTES(tmf->vl);
4864 4866
4865 return(smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); 4867 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4866} 4868}
4867 4869
4868static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf, 4870static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
@@ -4875,7 +4877,7 @@ static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
4875 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) 4877 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4876 + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L)) 4878 + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L))
4877 { 4879 {
4878 return (0); 4880 return 0;
4879 } 4881 }
4880 4882
4881 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4883 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4888,7 +4890,7 @@ static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
4888 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); 4890 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4889 smctr_make_corr(dev, tsv, correlator); 4891 smctr_make_corr(dev, tsv, correlator);
4890 4892
4891 return (0); 4893 return 0;
4892} 4894}
4893 4895
4894static int smctr_send_rq_init(struct net_device *dev) 4896static int smctr_send_rq_init(struct net_device *dev)
@@ -4907,7 +4909,7 @@ static int smctr_send_rq_init(struct net_device *dev)
4907 + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER)) 4909 + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER))
4908 == (FCBlock *)(-1L))) 4910 == (FCBlock *)(-1L)))
4909 { 4911 {
4910 return (0); 4912 return 0;
4911 } 4913 }
4912 4914
4913 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4915 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4943,7 +4945,7 @@ static int smctr_send_rq_init(struct net_device *dev)
4943 tmf->vl = SWAP_BYTES(tmf->vl); 4945 tmf->vl = SWAP_BYTES(tmf->vl);
4944 4946
4945 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) 4947 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4946 return (err); 4948 return err;
4947 4949
4948 /* Wait for Transmit to Complete */ 4950 /* Wait for Transmit to Complete */
4949 for(i = 0; i < 10000; i++) 4951 for(i = 0; i < 10000; i++)
@@ -4957,7 +4959,7 @@ static int smctr_send_rq_init(struct net_device *dev)
4957 fstatus = fcb->frame_status; 4959 fstatus = fcb->frame_status;
4958 4960
4959 if(!(fstatus & FCB_COMMAND_DONE)) 4961 if(!(fstatus & FCB_COMMAND_DONE))
4960 return (HARDWARE_FAILED); 4962 return HARDWARE_FAILED;
4961 4963
4962 if(!(fstatus & FCB_TX_STATUS_E)) 4964 if(!(fstatus & FCB_TX_STATUS_E))
4963 count++; 4965 count++;
@@ -4971,7 +4973,7 @@ static int smctr_send_rq_init(struct net_device *dev)
4971 smctr_update_tx_chain(dev, fcb, MAC_QUEUE); 4973 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4972 } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS)); 4974 } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS));
4973 4975
4974 return (smctr_join_complete_state(dev)); 4976 return smctr_join_complete_state(dev);
4975} 4977}
4976 4978
4977static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf, 4979static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
@@ -4984,13 +4986,13 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
4984 4986
4985 /* Check if this is the END POINT of the Transmit Forward Chain. */ 4987 /* Check if this is the END POINT of the Transmit Forward Chain. */
4986 if(rmf->vl <= 18) 4988 if(rmf->vl <= 18)
4987 return (0); 4989 return 0;
4988 4990
4989 /* Allocate Transmit FCB only by requesting 0 bytes 4991 /* Allocate Transmit FCB only by requesting 0 bytes
4990 * of data buffer. 4992 * of data buffer.
4991 */ 4993 */
4992 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L)) 4994 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L))
4993 return (0); 4995 return 0;
4994 4996
4995 /* Set pointer to Transmit Frame Buffer to the data 4997 /* Set pointer to Transmit Frame Buffer to the data
4996 * portion of the received TX Forward frame, making 4998 * portion of the received TX Forward frame, making
@@ -5006,7 +5008,7 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
5006 fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2; 5008 fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2;
5007 5009
5008 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) 5010 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
5009 return (err); 5011 return err;
5010 5012
5011 /* Wait for Transmit to Complete */ 5013 /* Wait for Transmit to Complete */
5012 for(i = 0; i < 10000; i++) 5014 for(i = 0; i < 10000; i++)
@@ -5020,7 +5022,7 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
5020 if(!(fcb->frame_status & FCB_COMMAND_DONE)) 5022 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5021 { 5023 {
5022 if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE))) 5024 if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE)))
5023 return (err); 5025 return err;
5024 5026
5025 for(i = 0; i < 10000; i++) 5027 for(i = 0; i < 10000; i++)
5026 { 5028 {
@@ -5030,12 +5032,12 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
5030 } 5032 }
5031 5033
5032 if(!(fcb->frame_status & FCB_COMMAND_DONE)) 5034 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5033 return (HARDWARE_FAILED); 5035 return HARDWARE_FAILED;
5034 } 5036 }
5035 5037
5036 *tx_fstatus = fcb->frame_status; 5038 *tx_fstatus = fcb->frame_status;
5037 5039
5038 return (A_FRAME_WAS_FORWARDED); 5040 return A_FRAME_WAS_FORWARDED;
5039} 5041}
5040 5042
5041static int smctr_set_auth_access_pri(struct net_device *dev, 5043static int smctr_set_auth_access_pri(struct net_device *dev,
@@ -5044,11 +5046,11 @@ static int smctr_set_auth_access_pri(struct net_device *dev,
5044 struct net_local *tp = netdev_priv(dev); 5046 struct net_local *tp = netdev_priv(dev);
5045 5047
5046 if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY) 5048 if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY)
5047 return (E_SUB_VECTOR_LENGTH_ERROR); 5049 return E_SUB_VECTOR_LENGTH_ERROR;
5048 5050
5049 tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]); 5051 tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]);
5050 5052
5051 return (POSITIVE_ACK); 5053 return POSITIVE_ACK;
5052} 5054}
5053 5055
5054static int smctr_set_auth_funct_class(struct net_device *dev, 5056static int smctr_set_auth_funct_class(struct net_device *dev,
@@ -5057,22 +5059,22 @@ static int smctr_set_auth_funct_class(struct net_device *dev,
5057 struct net_local *tp = netdev_priv(dev); 5059 struct net_local *tp = netdev_priv(dev);
5058 5060
5059 if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS) 5061 if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS)
5060 return (E_SUB_VECTOR_LENGTH_ERROR); 5062 return E_SUB_VECTOR_LENGTH_ERROR;
5061 5063
5062 tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]); 5064 tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]);
5063 5065
5064 return (POSITIVE_ACK); 5066 return POSITIVE_ACK;
5065} 5067}
5066 5068
5067static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv, 5069static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
5068 __u16 *correlator) 5070 __u16 *correlator)
5069{ 5071{
5070 if(rsv->svl != S_CORRELATOR) 5072 if(rsv->svl != S_CORRELATOR)
5071 return (E_SUB_VECTOR_LENGTH_ERROR); 5073 return E_SUB_VECTOR_LENGTH_ERROR;
5072 5074
5073 *correlator = (rsv->svv[0] << 8 | rsv->svv[1]); 5075 *correlator = (rsv->svv[0] << 8 | rsv->svv[1]);
5074 5076
5075 return (POSITIVE_ACK); 5077 return POSITIVE_ACK;
5076} 5078}
5077 5079
5078static int smctr_set_error_timer_value(struct net_device *dev, 5080static int smctr_set_error_timer_value(struct net_device *dev,
@@ -5082,34 +5084,34 @@ static int smctr_set_error_timer_value(struct net_device *dev,
5082 int err; 5084 int err;
5083 5085
5084 if(rsv->svl != S_ERROR_TIMER_VALUE) 5086 if(rsv->svl != S_ERROR_TIMER_VALUE)
5085 return (E_SUB_VECTOR_LENGTH_ERROR); 5087 return E_SUB_VECTOR_LENGTH_ERROR;
5086 5088
5087 err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10; 5089 err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10;
5088 5090
5089 smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval); 5091 smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval);
5090 5092
5091 if((err = smctr_wait_cmd(dev))) 5093 if((err = smctr_wait_cmd(dev)))
5092 return (err); 5094 return err;
5093 5095
5094 return (POSITIVE_ACK); 5096 return POSITIVE_ACK;
5095} 5097}
5096 5098
5097static int smctr_set_frame_forward(struct net_device *dev, 5099static int smctr_set_frame_forward(struct net_device *dev,
5098 MAC_SUB_VECTOR *rsv, __u8 dc_sc) 5100 MAC_SUB_VECTOR *rsv, __u8 dc_sc)
5099{ 5101{
5100 if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD)) 5102 if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD))
5101 return (E_SUB_VECTOR_LENGTH_ERROR); 5103 return E_SUB_VECTOR_LENGTH_ERROR;
5102 5104
5103 if((dc_sc & DC_MASK) != DC_CRS) 5105 if((dc_sc & DC_MASK) != DC_CRS)
5104 { 5106 {
5105 if(rsv->svl >= 2 && rsv->svl < 20) 5107 if(rsv->svl >= 2 && rsv->svl < 20)
5106 return (E_TRANSMIT_FORWARD_INVALID); 5108 return E_TRANSMIT_FORWARD_INVALID;
5107 5109
5108 if((rsv->svv[0] != 0) || (rsv->svv[1] != 0)) 5110 if((rsv->svv[0] != 0) || (rsv->svv[1] != 0))
5109 return (E_TRANSMIT_FORWARD_INVALID); 5111 return E_TRANSMIT_FORWARD_INVALID;
5110 } 5112 }
5111 5113
5112 return (POSITIVE_ACK); 5114 return POSITIVE_ACK;
5113} 5115}
5114 5116
5115static int smctr_set_local_ring_num(struct net_device *dev, 5117static int smctr_set_local_ring_num(struct net_device *dev,
@@ -5118,13 +5120,13 @@ static int smctr_set_local_ring_num(struct net_device *dev,
5118 struct net_local *tp = netdev_priv(dev); 5120 struct net_local *tp = netdev_priv(dev);
5119 5121
5120 if(rsv->svl != S_LOCAL_RING_NUMBER) 5122 if(rsv->svl != S_LOCAL_RING_NUMBER)
5121 return (E_SUB_VECTOR_LENGTH_ERROR); 5123 return E_SUB_VECTOR_LENGTH_ERROR;
5122 5124
5123 if(tp->ptr_local_ring_num) 5125 if(tp->ptr_local_ring_num)
5124 *(__u16 *)(tp->ptr_local_ring_num) 5126 *(__u16 *)(tp->ptr_local_ring_num)
5125 = (rsv->svv[0] << 8 | rsv->svv[1]); 5127 = (rsv->svv[0] << 8 | rsv->svv[1]);
5126 5128
5127 return (POSITIVE_ACK); 5129 return POSITIVE_ACK;
5128} 5130}
5129 5131
5130static unsigned short smctr_set_ctrl_attention(struct net_device *dev) 5132static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
@@ -5140,7 +5142,7 @@ static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
5140 outb(tp->trc_mask, ioaddr + CSR); 5142 outb(tp->trc_mask, ioaddr + CSR);
5141 } 5143 }
5142 5144
5143 return (0); 5145 return 0;
5144} 5146}
5145 5147
5146static void smctr_set_multicast_list(struct net_device *dev) 5148static void smctr_set_multicast_list(struct net_device *dev)
@@ -5159,7 +5161,7 @@ static int smctr_set_page(struct net_device *dev, __u8 *buf)
5159 amask = (__u8)((tptr & PR_PAGE_MASK) >> 8); 5161 amask = (__u8)((tptr & PR_PAGE_MASK) >> 8);
5160 outb(amask, dev->base_addr + PR); 5162 outb(amask, dev->base_addr + PR);
5161 5163
5162 return (0); 5164 return 0;
5163} 5165}
5164 5166
5165static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv) 5167static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
@@ -5167,13 +5169,13 @@ static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
5167 int err; 5169 int err;
5168 5170
5169 if(rsv->svl != S_PHYSICAL_DROP) 5171 if(rsv->svl != S_PHYSICAL_DROP)
5170 return (E_SUB_VECTOR_LENGTH_ERROR); 5172 return E_SUB_VECTOR_LENGTH_ERROR;
5171 5173
5172 smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]); 5174 smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]);
5173 if((err = smctr_wait_cmd(dev))) 5175 if((err = smctr_wait_cmd(dev)))
5174 return (err); 5176 return err;
5175 5177
5176 return (POSITIVE_ACK); 5178 return POSITIVE_ACK;
5177} 5179}
5178 5180
5179/* Reset the ring speed to the opposite of what it was. This auto-pilot 5181/* Reset the ring speed to the opposite of what it was. This auto-pilot
@@ -5195,16 +5197,16 @@ static int smctr_set_ring_speed(struct net_device *dev)
5195 smctr_reset_adapter(dev); 5197 smctr_reset_adapter(dev);
5196 5198
5197 if((err = smctr_init_card_real(dev))) 5199 if((err = smctr_init_card_real(dev)))
5198 return (err); 5200 return err;
5199 5201
5200 smctr_enable_bic_int(dev); 5202 smctr_enable_bic_int(dev);
5201 5203
5202 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK))) 5204 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
5203 return (err); 5205 return err;
5204 5206
5205 smctr_disable_16bit(dev); 5207 smctr_disable_16bit(dev);
5206 5208
5207 return (0); 5209 return 0;
5208} 5210}
5209 5211
5210static int smctr_set_rx_look_ahead(struct net_device *dev) 5212static int smctr_set_rx_look_ahead(struct net_device *dev)
@@ -5233,7 +5235,7 @@ static int smctr_set_rx_look_ahead(struct net_device *dev)
5233 *((__u16 *)(tp->ram_access)) = sword; 5235 *((__u16 *)(tp->ram_access)) = sword;
5234 } 5236 }
5235 5237
5236 return (0); 5238 return 0;
5237} 5239}
5238 5240
5239static int smctr_set_trc_reset(int ioaddr) 5241static int smctr_set_trc_reset(int ioaddr)
@@ -5243,7 +5245,7 @@ static int smctr_set_trc_reset(int ioaddr)
5243 r = inb(ioaddr + MSR); 5245 r = inb(ioaddr + MSR);
5244 outb(MSR_RST | r, ioaddr + MSR); 5246 outb(MSR_RST | r, ioaddr + MSR);
5245 5247
5246 return (0); 5248 return 0;
5247} 5249}
5248 5250
5249/* 5251/*
@@ -5259,10 +5261,10 @@ static int smctr_setup_single_cmd(struct net_device *dev,
5259 printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name); 5261 printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name);
5260 5262
5261 if((err = smctr_wait_while_cbusy(dev))) 5263 if((err = smctr_wait_while_cbusy(dev)))
5262 return (err); 5264 return err;
5263 5265
5264 if((err = (unsigned int)smctr_wait_cmd(dev))) 5266 if((err = (unsigned int)smctr_wait_cmd(dev)))
5265 return (err); 5267 return err;
5266 5268
5267 tp->acb_head->cmd_done_status = 0; 5269 tp->acb_head->cmd_done_status = 0;
5268 tp->acb_head->cmd = command; 5270 tp->acb_head->cmd = command;
@@ -5270,7 +5272,7 @@ static int smctr_setup_single_cmd(struct net_device *dev,
5270 5272
5271 err = smctr_issue_resume_acb_cmd(dev); 5273 err = smctr_issue_resume_acb_cmd(dev);
5272 5274
5273 return (err); 5275 return err;
5274} 5276}
5275 5277
5276/* 5278/*
@@ -5287,7 +5289,7 @@ static int smctr_setup_single_cmd_w_data(struct net_device *dev,
5287 tp->acb_head->data_offset_lo 5289 tp->acb_head->data_offset_lo
5288 = (__u16)TRC_POINTER(tp->misc_command_data); 5290 = (__u16)TRC_POINTER(tp->misc_command_data);
5289 5291
5290 return(smctr_issue_resume_acb_cmd(dev)); 5292 return smctr_issue_resume_acb_cmd(dev);
5291} 5293}
5292 5294
5293static char *smctr_malloc(struct net_device *dev, __u16 size) 5295static char *smctr_malloc(struct net_device *dev, __u16 size)
@@ -5298,7 +5300,7 @@ static char *smctr_malloc(struct net_device *dev, __u16 size)
5298 m = (char *)(tp->ram_access + tp->sh_mem_used); 5300 m = (char *)(tp->ram_access + tp->sh_mem_used);
5299 tp->sh_mem_used += (__u32)size; 5301 tp->sh_mem_used += (__u32)size;
5300 5302
5301 return (m); 5303 return m;
5302} 5304}
5303 5305
5304static int smctr_status_chg(struct net_device *dev) 5306static int smctr_status_chg(struct net_device *dev)
@@ -5333,7 +5335,7 @@ static int smctr_status_chg(struct net_device *dev)
5333 break; 5335 break;
5334 } 5336 }
5335 5337
5336 return (0); 5338 return 0;
5337} 5339}
5338 5340
5339static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb, 5341static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
@@ -5355,7 +5357,7 @@ static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
5355 err = smctr_issue_resume_tx_fcb_cmd(dev, queue); 5357 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
5356 } 5358 }
5357 5359
5358 return (err); 5360 return err;
5359} 5361}
5360 5362
5361static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue) 5363static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
@@ -5409,7 +5411,7 @@ static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
5409 break; 5411 break;
5410 } 5412 }
5411 5413
5412 return (err); 5414 return err;
5413} 5415}
5414 5416
5415static unsigned short smctr_tx_move_frame(struct net_device *dev, 5417static unsigned short smctr_tx_move_frame(struct net_device *dev,
@@ -5450,7 +5452,7 @@ static unsigned short smctr_tx_move_frame(struct net_device *dev,
5450 pbuff += len; 5452 pbuff += len;
5451 } 5453 }
5452 5454
5453 return (0); 5455 return 0;
5454} 5456}
5455 5457
5456/* Update the error statistic counters for this adapter. */ 5458/* Update the error statistic counters for this adapter. */
@@ -5493,7 +5495,7 @@ static int smctr_update_err_stats(struct net_device *dev)
5493 if(tstat->token_errors) 5495 if(tstat->token_errors)
5494 tstat->token_errors += *(tp->misc_command_data + 5) >> 8; 5496 tstat->token_errors += *(tp->misc_command_data + 5) >> 8;
5495 5497
5496 return (0); 5498 return 0;
5497} 5499}
5498 5500
5499static int smctr_update_rx_chain(struct net_device *dev, __u16 queue) 5501static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
@@ -5530,7 +5532,7 @@ static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
5530 tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END; 5532 tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END;
5531 tp->rx_bdb_curr[queue] = bdb; 5533 tp->rx_bdb_curr[queue] = bdb;
5532 5534
5533 return (0); 5535 return 0;
5534} 5536}
5535 5537
5536static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb, 5538static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
@@ -5542,13 +5544,13 @@ static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
5542 printk(KERN_DEBUG "smctr_update_tx_chain\n"); 5544 printk(KERN_DEBUG "smctr_update_tx_chain\n");
5543 5545
5544 if(tp->num_tx_fcbs_used[queue] <= 0) 5546 if(tp->num_tx_fcbs_used[queue] <= 0)
5545 return (HARDWARE_FAILED); 5547 return HARDWARE_FAILED;
5546 else 5548 else
5547 { 5549 {
5548 if(tp->tx_buff_used[queue] < fcb->memory_alloc) 5550 if(tp->tx_buff_used[queue] < fcb->memory_alloc)
5549 { 5551 {
5550 tp->tx_buff_used[queue] = 0; 5552 tp->tx_buff_used[queue] = 0;
5551 return (HARDWARE_FAILED); 5553 return HARDWARE_FAILED;
5552 } 5554 }
5553 5555
5554 tp->tx_buff_used[queue] -= fcb->memory_alloc; 5556 tp->tx_buff_used[queue] -= fcb->memory_alloc;
@@ -5566,7 +5568,7 @@ static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
5566 fcb->frame_status = 0; 5568 fcb->frame_status = 0;
5567 tp->tx_fcb_end[queue] = fcb->next_ptr; 5569 tp->tx_fcb_end[queue] = fcb->next_ptr;
5568 netif_wake_queue(dev); 5570 netif_wake_queue(dev);
5569 return (0); 5571 return 0;
5570 } 5572 }
5571} 5573}
5572 5574
@@ -5587,12 +5589,12 @@ static int smctr_wait_cmd(struct net_device *dev)
5587 } 5589 }
5588 5590
5589 if(loop_count == 0) 5591 if(loop_count == 0)
5590 return(HARDWARE_FAILED); 5592 return HARDWARE_FAILED;
5591 5593
5592 if(tp->acb_head->cmd_done_status & 0xff) 5594 if(tp->acb_head->cmd_done_status & 0xff)
5593 return(HARDWARE_FAILED); 5595 return HARDWARE_FAILED;
5594 5596
5595 return (0); 5597 return 0;
5596} 5598}
5597 5599
5598static int smctr_wait_while_cbusy(struct net_device *dev) 5600static int smctr_wait_while_cbusy(struct net_device *dev)
@@ -5624,9 +5626,9 @@ static int smctr_wait_while_cbusy(struct net_device *dev)
5624 } 5626 }
5625 5627
5626 if(timeout) 5628 if(timeout)
5627 return (0); 5629 return 0;
5628 else 5630 else
5629 return (HARDWARE_FAILED); 5631 return HARDWARE_FAILED;
5630} 5632}
5631 5633
5632#ifdef MODULE 5634#ifdef MODULE
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 435ef7d5470f..c83f4f6e39e1 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -224,7 +224,7 @@ static int madgemc_sifprobe(struct net_device *dev)
224 chk2 ^= 0x0FE; 224 chk2 ^= 0x0FE;
225 225
226 if(chk1 != chk2) 226 if(chk1 != chk2)
227 return (-1); /* No adapter */ 227 return -1; /* No adapter */
228 chk1 -= 2; 228 chk1 -= 2;
229 } while(chk1 != 0); /* Repeat 128 times (all byte values) */ 229 } while(chk1 != 0); /* Repeat 128 times (all byte values) */
230 230
@@ -232,7 +232,7 @@ static int madgemc_sifprobe(struct net_device *dev)
232 /* Restore the SIFADR value */ 232 /* Restore the SIFADR value */
233 SIFWRITEB(old, SIFADR); 233 SIFWRITEB(old, SIFADR);
234 234
235 return (0); 235 return 0;
236} 236}
237#endif 237#endif
238 238
@@ -271,7 +271,7 @@ int tms380tr_open(struct net_device *dev)
271 { 271 {
272 printk(KERN_INFO "%s: Chipset initialization error\n", 272 printk(KERN_INFO "%s: Chipset initialization error\n",
273 dev->name); 273 dev->name);
274 return (-1); 274 return -1;
275 } 275 }
276 276
277 tp->timer.expires = jiffies + 30*HZ; 277 tp->timer.expires = jiffies + 30*HZ;
@@ -298,7 +298,7 @@ int tms380tr_open(struct net_device *dev)
298 if(tp->AdapterVirtOpenFlag == 0) 298 if(tp->AdapterVirtOpenFlag == 0)
299 { 299 {
300 tms380tr_disable_interrupts(dev); 300 tms380tr_disable_interrupts(dev);
301 return (-1); 301 return -1;
302 } 302 }
303 303
304 tp->StartTime = jiffies; 304 tp->StartTime = jiffies;
@@ -309,7 +309,7 @@ int tms380tr_open(struct net_device *dev)
309 tp->timer.data = (unsigned long)dev; 309 tp->timer.data = (unsigned long)dev;
310 add_timer(&tp->timer); 310 add_timer(&tp->timer);
311 311
312 return (0); 312 return 0;
313} 313}
314 314
315/* 315/*
@@ -343,23 +343,23 @@ static int tms380tr_chipset_init(struct net_device *dev)
343 printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name); 343 printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name);
344 err = tms380tr_reset_adapter(dev); 344 err = tms380tr_reset_adapter(dev);
345 if(err < 0) 345 if(err < 0)
346 return (-1); 346 return -1;
347 347
348 if(tms380tr_debug > 3) 348 if(tms380tr_debug > 3)
349 printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name); 349 printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name);
350 err = tms380tr_bringup_diags(dev); 350 err = tms380tr_bringup_diags(dev);
351 if(err < 0) 351 if(err < 0)
352 return (-1); 352 return -1;
353 353
354 if(tms380tr_debug > 3) 354 if(tms380tr_debug > 3)
355 printk(KERN_DEBUG "%s: Init adapter...\n", dev->name); 355 printk(KERN_DEBUG "%s: Init adapter...\n", dev->name);
356 err = tms380tr_init_adapter(dev); 356 err = tms380tr_init_adapter(dev);
357 if(err < 0) 357 if(err < 0)
358 return (-1); 358 return -1;
359 359
360 if(tms380tr_debug > 3) 360 if(tms380tr_debug > 3)
361 printk(KERN_DEBUG "%s: Done!\n", dev->name); 361 printk(KERN_DEBUG "%s: Done!\n", dev->name);
362 return (0); 362 return 0;
363} 363}
364 364
365/* 365/*
@@ -877,7 +877,7 @@ static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqTy
877 IrqType != STS_IRQ_COMMAND_STATUS && 877 IrqType != STS_IRQ_COMMAND_STATUS &&
878 IrqType != STS_IRQ_RING_STATUS) 878 IrqType != STS_IRQ_RING_STATUS)
879 { 879 {
880 return (1); /* SSB not involved. */ 880 return 1; /* SSB not involved. */
881 } 881 }
882 882
883 /* Note: All fields of the SSB have been set to all ones (-1) after it 883 /* Note: All fields of the SSB have been set to all ones (-1) after it
@@ -887,21 +887,21 @@ static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqTy
887 */ 887 */
888 888
889 if(ssb->STS == (unsigned short) -1) 889 if(ssb->STS == (unsigned short) -1)
890 return (0); /* Command field not yet available. */ 890 return 0; /* Command field not yet available. */
891 if(IrqType == STS_IRQ_COMMAND_STATUS) 891 if(IrqType == STS_IRQ_COMMAND_STATUS)
892 return (1); /* Status fields not always affected. */ 892 return 1; /* Status fields not always affected. */
893 if(ssb->Parm[0] == (unsigned short) -1) 893 if(ssb->Parm[0] == (unsigned short) -1)
894 return (0); /* Status 1 field not yet available. */ 894 return 0; /* Status 1 field not yet available. */
895 if(IrqType == STS_IRQ_RING_STATUS) 895 if(IrqType == STS_IRQ_RING_STATUS)
896 return (1); /* Status 2 & 3 fields not affected. */ 896 return 1; /* Status 2 & 3 fields not affected. */
897 897
898 /* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */ 898 /* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */
899 if(ssb->Parm[1] == (unsigned short) -1) 899 if(ssb->Parm[1] == (unsigned short) -1)
900 return (0); /* Status 2 field not yet available. */ 900 return 0; /* Status 2 field not yet available. */
901 if(ssb->Parm[2] == (unsigned short) -1) 901 if(ssb->Parm[2] == (unsigned short) -1)
902 return (0); /* Status 3 field not yet available. */ 902 return 0; /* Status 3 field not yet available. */
903 903
904 return (1); /* All SSB fields have been written by the adapter. */ 904 return 1; /* All SSB fields have been written by the adapter. */
905} 905}
906 906
907/* 907/*
@@ -1143,7 +1143,7 @@ int tms380tr_close(struct net_device *dev)
1143#endif 1143#endif
1144 tms380tr_cancel_tx_queue(tp); 1144 tms380tr_cancel_tx_queue(tp);
1145 1145
1146 return (0); 1146 return 0;
1147} 1147}
1148 1148
1149/* 1149/*
@@ -1154,7 +1154,7 @@ static struct net_device_stats *tms380tr_get_stats(struct net_device *dev)
1154{ 1154{
1155 struct net_local *tp = netdev_priv(dev); 1155 struct net_local *tp = netdev_priv(dev);
1156 1156
1157 return ((struct net_device_stats *)&tp->MacStat); 1157 return (struct net_device_stats *)&tp->MacStat;
1158} 1158}
1159 1159
1160/* 1160/*
@@ -1256,7 +1256,7 @@ static int tms380tr_reset_adapter(struct net_device *dev)
1256 if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) { 1256 if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) {
1257 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n", 1257 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
1258 dev->name, "tms380tr.bin"); 1258 dev->name, "tms380tr.bin");
1259 return (-1); 1259 return -1;
1260 } 1260 }
1261 1261
1262 fw_ptr = (unsigned short *)fw_entry->data; 1262 fw_ptr = (unsigned short *)fw_entry->data;
@@ -1321,16 +1321,14 @@ static int tms380tr_reset_adapter(struct net_device *dev)
1321 1321
1322 /* Clear CPHALT and start BUD */ 1322 /* Clear CPHALT and start BUD */
1323 SIFWRITEW(c, SIFACL); 1323 SIFWRITEW(c, SIFACL);
1324 if (fw_entry) 1324 release_firmware(fw_entry);
1325 release_firmware(fw_entry); 1325 return 1;
1326 return (1);
1327 } 1326 }
1328 } while(count == 0); 1327 } while(count == 0);
1329 1328
1330 if (fw_entry) 1329 release_firmware(fw_entry);
1331 release_firmware(fw_entry);
1332 printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name); 1330 printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name);
1333 return (-1); 1331 return -1;
1334} 1332}
1335 1333
1336MODULE_FIRMWARE("tms380tr.bin"); 1334MODULE_FIRMWARE("tms380tr.bin");
@@ -1365,7 +1363,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
1365 printk(KERN_DEBUG " %04X\n", Status); 1363 printk(KERN_DEBUG " %04X\n", Status);
1366 /* BUD successfully completed */ 1364 /* BUD successfully completed */
1367 if(Status == STS_INITIALIZE) 1365 if(Status == STS_INITIALIZE)
1368 return (1); 1366 return 1;
1369 /* Unrecoverable hardware error, BUD not completed? */ 1367 /* Unrecoverable hardware error, BUD not completed? */
1370 } while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST)) 1368 } while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST))
1371 != (STS_ERROR | STS_TEST))); 1369 != (STS_ERROR | STS_TEST)));
@@ -1392,7 +1390,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
1392 else 1390 else
1393 printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f); 1391 printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f);
1394 1392
1395 return (-1); 1393 return -1;
1396} 1394}
1397 1395
1398/* 1396/*
@@ -1466,7 +1464,7 @@ static int tms380tr_init_adapter(struct net_device *dev)
1466 { 1464 {
1467 printk(KERN_INFO "%s: DMA failed\n", dev->name); 1465 printk(KERN_INFO "%s: DMA failed\n", dev->name);
1468 /* DMA data error: wrong data in SCB */ 1466 /* DMA data error: wrong data in SCB */
1469 return (-1); 1467 return -1;
1470 } 1468 }
1471 i++; 1469 i++;
1472 } while(i < 6); 1470 } while(i < 6);
@@ -1475,11 +1473,11 @@ static int tms380tr_init_adapter(struct net_device *dev)
1475 do { /* Test if contents of SSB is valid */ 1473 do { /* Test if contents of SSB is valid */
1476 if(SSB_Test[i] != *(sb_ptr + i)) 1474 if(SSB_Test[i] != *(sb_ptr + i))
1477 /* DMA data error: wrong data in SSB */ 1475 /* DMA data error: wrong data in SSB */
1478 return (-1); 1476 return -1;
1479 i++; 1477 i++;
1480 } while (i < 8); 1478 } while (i < 8);
1481 1479
1482 return (1); /* Adapter successfully initialized */ 1480 return 1; /* Adapter successfully initialized */
1483 } 1481 }
1484 else 1482 else
1485 { 1483 {
@@ -1490,7 +1488,7 @@ static int tms380tr_init_adapter(struct net_device *dev)
1490 Status &= STS_ERROR_MASK; 1488 Status &= STS_ERROR_MASK;
1491 /* ShowInitialisationErrorCode(Status); */ 1489 /* ShowInitialisationErrorCode(Status); */
1492 printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status); 1490 printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status);
1493 return (-1); /* Unrecoverable error */ 1491 return -1; /* Unrecoverable error */
1494 } 1492 }
1495 else 1493 else
1496 { 1494 {
@@ -1505,7 +1503,7 @@ static int tms380tr_init_adapter(struct net_device *dev)
1505 } while(retry_cnt > 0); 1503 } while(retry_cnt > 0);
1506 1504
1507 printk(KERN_INFO "%s: Retry exceeded\n", dev->name); 1505 printk(KERN_INFO "%s: Retry exceeded\n", dev->name);
1508 return (-1); 1506 return -1;
1509} 1507}
1510 1508
1511/* 1509/*
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index d4c7c0c0a3d6..d3e788a9cd1c 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -125,18 +125,16 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
125 dev->irq = pci_irq_line; 125 dev->irq = pci_irq_line;
126 dev->dma = 0; 126 dev->dma = 0;
127 127
128 printk("%s: %s\n", dev->name, cardinfo->name); 128 dev_info(&pdev->dev, "%s\n", cardinfo->name);
129 printk("%s: IO: %#4lx IRQ: %d\n", 129 dev_info(&pdev->dev, " IO: %#4lx IRQ: %d\n", dev->base_addr, dev->irq);
130 dev->name, dev->base_addr, dev->irq);
131 130
132 tms_pci_read_eeprom(dev); 131 tms_pci_read_eeprom(dev);
133 132
134 printk("%s: Ring Station Address: %pM\n", 133 dev_info(&pdev->dev, " Ring Station Address: %pM\n", dev->dev_addr);
135 dev->name, dev->dev_addr);
136 134
137 ret = tmsdev_init(dev, &pdev->dev); 135 ret = tmsdev_init(dev, &pdev->dev);
138 if (ret) { 136 if (ret) {
139 printk("%s: unable to get memory for dev->priv.\n", dev->name); 137 dev_info(&pdev->dev, "unable to get memory for dev->priv.\n");
140 goto err_out_region; 138 goto err_out_region;
141 } 139 }
142 140
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index a03730bd1da5..5c633a32eaeb 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -219,7 +219,7 @@ static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
219 if (i == 100) 219 if (i == 100)
220 return 0xffff; 220 return 0xffff;
221 else 221 else
222 return (TSI_READ_PHY(TSI108_MAC_MII_DATAIN)); 222 return TSI_READ_PHY(TSI108_MAC_MII_DATAIN);
223} 223}
224 224
225static void tsi108_write_mii(struct tsi108_prv_data *data, 225static void tsi108_write_mii(struct tsi108_prv_data *data,
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 75a64c88cf7a..251c6ce15aea 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1448,7 +1448,7 @@ de4x5_sw_reset(struct net_device *dev)
1448 status = -EIO; 1448 status = -EIO;
1449 } 1449 }
1450 1450
1451 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 1451 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1452 lp->tx_old = lp->tx_new; 1452 lp->tx_old = lp->tx_new;
1453 1453
1454 return status; 1454 return status;
@@ -1506,7 +1506,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1506 lp->stats.tx_bytes += skb->len; 1506 lp->stats.tx_bytes += skb->len;
1507 outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */ 1507 outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
1508 1508
1509 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 1509 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1510 1510
1511 if (TX_BUFFS_AVAIL) { 1511 if (TX_BUFFS_AVAIL) {
1512 netif_start_queue(dev); /* Another pkt may be queued */ 1512 netif_start_queue(dev); /* Another pkt may be queued */
@@ -1657,7 +1657,7 @@ de4x5_rx(struct net_device *dev)
1657 } 1657 }
1658 1658
1659 /* Change buffer ownership for this frame, back to the adapter */ 1659 /* Change buffer ownership for this frame, back to the adapter */
1660 for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) { 1660 for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
1661 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN); 1661 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
1662 barrier(); 1662 barrier();
1663 } 1663 }
@@ -1668,7 +1668,7 @@ de4x5_rx(struct net_device *dev)
1668 /* 1668 /*
1669 ** Update entry information 1669 ** Update entry information
1670 */ 1670 */
1671 lp->rx_new = (++lp->rx_new) % lp->rxRingSize; 1671 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1672 } 1672 }
1673 1673
1674 return 0; 1674 return 0;
@@ -1726,7 +1726,7 @@ de4x5_tx(struct net_device *dev)
1726 } 1726 }
1727 1727
1728 /* Update all the pointers */ 1728 /* Update all the pointers */
1729 lp->tx_old = (++lp->tx_old) % lp->txRingSize; 1729 lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
1730 } 1730 }
1731 1731
1732 /* Any resources available? */ 1732 /* Any resources available? */
@@ -1801,7 +1801,7 @@ de4x5_rx_ovfc(struct net_device *dev)
1801 1801
1802 for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) { 1802 for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
1803 lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN); 1803 lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
1804 lp->rx_new = (++lp->rx_new % lp->rxRingSize); 1804 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1805 } 1805 }
1806 1806
1807 outl(omr, DE4X5_OMR); 1807 outl(omr, DE4X5_OMR);
@@ -1932,7 +1932,7 @@ set_multicast_list(struct net_device *dev)
1932 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | 1932 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1933 SETUP_FRAME_LEN, (struct sk_buff *)1); 1933 SETUP_FRAME_LEN, (struct sk_buff *)1);
1934 1934
1935 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 1935 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1936 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ 1936 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
1937 dev->trans_start = jiffies; /* prevent tx timeout */ 1937 dev->trans_start = jiffies; /* prevent tx timeout */
1938 } 1938 }
@@ -3119,7 +3119,7 @@ dc2114x_autoconf(struct net_device *dev)
3119 if (lp->media == _100Mb) { 3119 if (lp->media == _100Mb) {
3120 if ((slnk = test_for_100Mb(dev, 6500)) < 0) { 3120 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
3121 lp->media = SPD_DET; 3121 lp->media = SPD_DET;
3122 return (slnk & ~TIMER_CB); 3122 return slnk & ~TIMER_CB;
3123 } 3123 }
3124 } else { 3124 } else {
3125 if (wait_for_link(dev) < 0) { 3125 if (wait_for_link(dev) < 0) {
@@ -3484,7 +3484,7 @@ is_spd_100(struct net_device *dev)
3484 spd = ((~gep_rd(dev)) & GEP_SLNK); 3484 spd = ((~gep_rd(dev)) & GEP_SLNK);
3485 } else { 3485 } else {
3486 if ((lp->ibn == 2) || !lp->asBitValid) 3486 if ((lp->ibn == 2) || !lp->asBitValid)
3487 return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0); 3487 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3488 3488
3489 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) | 3489 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3490 (lp->linkOK & ~lp->asBitValid); 3490 (lp->linkOK & ~lp->asBitValid);
@@ -3502,15 +3502,15 @@ is_100_up(struct net_device *dev)
3502 if (lp->useMII) { 3502 if (lp->useMII) {
3503 /* Double read for sticky bits & temporary drops */ 3503 /* Double read for sticky bits & temporary drops */
3504 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); 3504 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3505 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS); 3505 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3506 } else if (!lp->useSROM) { /* de500-xa */ 3506 } else if (!lp->useSROM) { /* de500-xa */
3507 return ((~gep_rd(dev)) & GEP_SLNK); 3507 return (~gep_rd(dev)) & GEP_SLNK;
3508 } else { 3508 } else {
3509 if ((lp->ibn == 2) || !lp->asBitValid) 3509 if ((lp->ibn == 2) || !lp->asBitValid)
3510 return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0); 3510 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3511 3511
3512 return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) | 3512 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3513 (lp->linkOK & ~lp->asBitValid)); 3513 (lp->linkOK & ~lp->asBitValid);
3514 } 3514 }
3515} 3515}
3516 3516
@@ -3523,17 +3523,17 @@ is_10_up(struct net_device *dev)
3523 if (lp->useMII) { 3523 if (lp->useMII) {
3524 /* Double read for sticky bits & temporary drops */ 3524 /* Double read for sticky bits & temporary drops */
3525 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); 3525 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3526 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS); 3526 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3527 } else if (!lp->useSROM) { /* de500-xa */ 3527 } else if (!lp->useSROM) { /* de500-xa */
3528 return ((~gep_rd(dev)) & GEP_LNP); 3528 return (~gep_rd(dev)) & GEP_LNP;
3529 } else { 3529 } else {
3530 if ((lp->ibn == 2) || !lp->asBitValid) 3530 if ((lp->ibn == 2) || !lp->asBitValid)
3531 return (((lp->chipset & ~0x00ff) == DC2114x) ? 3531 return ((lp->chipset & ~0x00ff) == DC2114x) ?
3532 (~inl(DE4X5_SISR)&SISR_LS10): 3532 (~inl(DE4X5_SISR)&SISR_LS10):
3533 0); 3533 0;
3534 3534
3535 return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) | 3535 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3536 (lp->linkOK & ~lp->asBitValid)); 3536 (lp->linkOK & ~lp->asBitValid);
3537 } 3537 }
3538} 3538}
3539 3539
@@ -3544,7 +3544,7 @@ is_anc_capable(struct net_device *dev)
3544 u_long iobase = dev->base_addr; 3544 u_long iobase = dev->base_addr;
3545 3545
3546 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { 3546 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3547 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII)); 3547 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3548 } else if ((lp->chipset & ~0x00ff) == DC2114x) { 3548 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3549 return (inl(DE4X5_SISR) & SISR_LPN) >> 12; 3549 return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
3550 } else { 3550 } else {
@@ -3568,7 +3568,7 @@ ping_media(struct net_device *dev, int msec)
3568 3568
3569 lp->tmp = lp->tx_new; /* Remember the ring position */ 3569 lp->tmp = lp->tx_new; /* Remember the ring position */
3570 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1); 3570 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
3571 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 3571 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
3572 outl(POLL_DEMAND, DE4X5_TPD); 3572 outl(POLL_DEMAND, DE4X5_TPD);
3573 } 3573 }
3574 3574
@@ -4930,7 +4930,7 @@ getfrom_mii(u32 command, u_long ioaddr)
4930 outl(command | MII_MDC, ioaddr); 4930 outl(command | MII_MDC, ioaddr);
4931 udelay(1); 4931 udelay(1);
4932 4932
4933 return ((inl(ioaddr) >> 19) & 1); 4933 return (inl(ioaddr) >> 19) & 1;
4934} 4934}
4935 4935
4936/* 4936/*
@@ -4975,8 +4975,8 @@ mii_get_oui(u_char phyaddr, u_long ioaddr)
4975 a.breg[0]=a.breg[1]; 4975 a.breg[0]=a.breg[1];
4976 a.breg[1]=i; 4976 a.breg[1]=i;
4977 4977
4978 return ((a.reg<<8)|ret); */ /* SEEQ and Cypress way */ 4978 return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */
4979/* return ((r2<<6)|(u_int)(r3>>10)); */ /* NATIONAL and BROADCOM way */ 4979/* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */
4980 return r2; /* (I did it) My way */ 4980 return r2; /* (I did it) My way */
4981} 4981}
4982 4982
@@ -5144,7 +5144,7 @@ gep_rd(struct net_device *dev)
5144 if (lp->chipset == DC21140) { 5144 if (lp->chipset == DC21140) {
5145 return inl(DE4X5_GEP); 5145 return inl(DE4X5_GEP);
5146 } else if ((lp->chipset & ~0x00ff) == DC2114x) { 5146 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5147 return (inl(DE4X5_SIGR) & 0x000fffff); 5147 return inl(DE4X5_SIGR) & 0x000fffff;
5148 } 5148 }
5149 5149
5150 return 0; 5150 return 0;
@@ -5417,7 +5417,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5417 /* Set up the descriptor and give ownership to the card */ 5417 /* Set up the descriptor and give ownership to the card */
5418 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | 5418 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
5419 SETUP_FRAME_LEN, (struct sk_buff *)1); 5419 SETUP_FRAME_LEN, (struct sk_buff *)1);
5420 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 5420 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
5421 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ 5421 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
5422 netif_wake_queue(dev); /* Unlock the TX ring */ 5422 netif_wake_queue(dev); /* Unlock the TX ring */
5423 break; 5423 break;
@@ -5474,7 +5474,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5474 tmp.lval[6] = inl(DE4X5_STRR); j+=4; 5474 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
5475 tmp.lval[7] = inl(DE4X5_SIGR); j+=4; 5475 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
5476 ioc->len = j; 5476 ioc->len = j;
5477 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; 5477 if (copy_to_user(ioc->data, tmp.lval, ioc->len)) return -EFAULT;
5478 break; 5478 break;
5479 5479
5480#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */ 5480#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 0bc4f3030a80..a9f7d5d1a269 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -599,7 +599,7 @@ static int dmfe_open(struct DEVICE *dev)
599 init_timer(&db->timer); 599 init_timer(&db->timer);
600 db->timer.expires = DMFE_TIMER_WUT + HZ * 2; 600 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
601 db->timer.data = (unsigned long)dev; 601 db->timer.data = (unsigned long)dev;
602 db->timer.function = &dmfe_timer; 602 db->timer.function = dmfe_timer;
603 add_timer(&db->timer); 603 add_timer(&db->timer);
604 604
605 return 0; 605 return 0;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 1faf7a4d7202..0013642903ee 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -180,21 +180,24 @@ int tulip_poll(struct napi_struct *napi, int budget)
180 dev_warn(&dev->dev, 180 dev_warn(&dev->dev,
181 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", 181 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
182 status); 182 status);
183 tp->stats.rx_length_errors++; 183 dev->stats.rx_length_errors++;
184 } 184 }
185 } else { 185 } else {
186 /* There was a fatal error. */ 186 /* There was a fatal error. */
187 if (tulip_debug > 2) 187 if (tulip_debug > 2)
188 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n", 188 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
189 dev->name, status); 189 dev->name, status);
190 tp->stats.rx_errors++; /* end of a packet.*/ 190 dev->stats.rx_errors++; /* end of a packet.*/
191 if (pkt_len > 1518 || 191 if (pkt_len > 1518 ||
192 (status & RxDescRunt)) 192 (status & RxDescRunt))
193 tp->stats.rx_length_errors++; 193 dev->stats.rx_length_errors++;
194 194
195 if (status & 0x0004) tp->stats.rx_frame_errors++; 195 if (status & 0x0004)
196 if (status & 0x0002) tp->stats.rx_crc_errors++; 196 dev->stats.rx_frame_errors++;
197 if (status & 0x0001) tp->stats.rx_fifo_errors++; 197 if (status & 0x0002)
198 dev->stats.rx_crc_errors++;
199 if (status & 0x0001)
200 dev->stats.rx_fifo_errors++;
198 } 201 }
199 } else { 202 } else {
200 struct sk_buff *skb; 203 struct sk_buff *skb;
@@ -244,8 +247,8 @@ int tulip_poll(struct napi_struct *napi, int budget)
244 247
245 netif_receive_skb(skb); 248 netif_receive_skb(skb);
246 249
247 tp->stats.rx_packets++; 250 dev->stats.rx_packets++;
248 tp->stats.rx_bytes += pkt_len; 251 dev->stats.rx_bytes += pkt_len;
249 } 252 }
250#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION 253#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
251 received++; 254 received++;
@@ -404,20 +407,23 @@ static int tulip_rx(struct net_device *dev)
404 dev_warn(&dev->dev, 407 dev_warn(&dev->dev,
405 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", 408 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
406 status); 409 status);
407 tp->stats.rx_length_errors++; 410 dev->stats.rx_length_errors++;
408 } 411 }
409 } else { 412 } else {
410 /* There was a fatal error. */ 413 /* There was a fatal error. */
411 if (tulip_debug > 2) 414 if (tulip_debug > 2)
412 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n", 415 printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
413 dev->name, status); 416 dev->name, status);
414 tp->stats.rx_errors++; /* end of a packet.*/ 417 dev->stats.rx_errors++; /* end of a packet.*/
415 if (pkt_len > 1518 || 418 if (pkt_len > 1518 ||
416 (status & RxDescRunt)) 419 (status & RxDescRunt))
417 tp->stats.rx_length_errors++; 420 dev->stats.rx_length_errors++;
418 if (status & 0x0004) tp->stats.rx_frame_errors++; 421 if (status & 0x0004)
419 if (status & 0x0002) tp->stats.rx_crc_errors++; 422 dev->stats.rx_frame_errors++;
420 if (status & 0x0001) tp->stats.rx_fifo_errors++; 423 if (status & 0x0002)
424 dev->stats.rx_crc_errors++;
425 if (status & 0x0001)
426 dev->stats.rx_fifo_errors++;
421 } 427 }
422 } else { 428 } else {
423 struct sk_buff *skb; 429 struct sk_buff *skb;
@@ -467,8 +473,8 @@ static int tulip_rx(struct net_device *dev)
467 473
468 netif_rx(skb); 474 netif_rx(skb);
469 475
470 tp->stats.rx_packets++; 476 dev->stats.rx_packets++;
471 tp->stats.rx_bytes += pkt_len; 477 dev->stats.rx_bytes += pkt_len;
472 } 478 }
473 received++; 479 received++;
474 entry = (++tp->cur_rx) % RX_RING_SIZE; 480 entry = (++tp->cur_rx) % RX_RING_SIZE;
@@ -602,18 +608,22 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
602 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n", 608 printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
603 dev->name, status); 609 dev->name, status);
604#endif 610#endif
605 tp->stats.tx_errors++; 611 dev->stats.tx_errors++;
606 if (status & 0x4104) tp->stats.tx_aborted_errors++; 612 if (status & 0x4104)
607 if (status & 0x0C00) tp->stats.tx_carrier_errors++; 613 dev->stats.tx_aborted_errors++;
608 if (status & 0x0200) tp->stats.tx_window_errors++; 614 if (status & 0x0C00)
609 if (status & 0x0002) tp->stats.tx_fifo_errors++; 615 dev->stats.tx_carrier_errors++;
616 if (status & 0x0200)
617 dev->stats.tx_window_errors++;
618 if (status & 0x0002)
619 dev->stats.tx_fifo_errors++;
610 if ((status & 0x0080) && tp->full_duplex == 0) 620 if ((status & 0x0080) && tp->full_duplex == 0)
611 tp->stats.tx_heartbeat_errors++; 621 dev->stats.tx_heartbeat_errors++;
612 } else { 622 } else {
613 tp->stats.tx_bytes += 623 dev->stats.tx_bytes +=
614 tp->tx_buffers[entry].skb->len; 624 tp->tx_buffers[entry].skb->len;
615 tp->stats.collisions += (status >> 3) & 15; 625 dev->stats.collisions += (status >> 3) & 15;
616 tp->stats.tx_packets++; 626 dev->stats.tx_packets++;
617 } 627 }
618 628
619 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, 629 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
@@ -655,7 +665,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
655 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ 665 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
656 if (csr5 == 0xffffffff) 666 if (csr5 == 0xffffffff)
657 break; 667 break;
658 if (csr5 & TxJabber) tp->stats.tx_errors++; 668 if (csr5 & TxJabber)
669 dev->stats.tx_errors++;
659 if (csr5 & TxFIFOUnderflow) { 670 if (csr5 & TxFIFOUnderflow) {
660 if ((tp->csr6 & 0xC000) != 0xC000) 671 if ((tp->csr6 & 0xC000) != 0xC000)
661 tp->csr6 += 0x4000; /* Bump up the Tx threshold */ 672 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
@@ -672,8 +683,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
672 } 683 }
673 } 684 }
674 if (csr5 & RxDied) { /* Missed a Rx frame. */ 685 if (csr5 & RxDied) { /* Missed a Rx frame. */
675 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; 686 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
676 tp->stats.rx_errors++; 687 dev->stats.rx_errors++;
677 tulip_start_rxtx(tp); 688 tulip_start_rxtx(tp);
678 } 689 }
679 /* 690 /*
@@ -789,7 +800,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
789#endif /* CONFIG_TULIP_NAPI */ 800#endif /* CONFIG_TULIP_NAPI */
790 801
791 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) { 802 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
792 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed; 803 dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
793 } 804 }
794 805
795 if (tulip_debug > 4) 806 if (tulip_debug > 4)
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index e525875ed67d..ed66a16711dc 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -417,7 +417,6 @@ struct tulip_private {
417 int revision; 417 int revision;
418 int flags; 418 int flags;
419 struct napi_struct napi; 419 struct napi_struct napi;
420 struct net_device_stats stats;
421 struct timer_list timer; /* Media selection timer. */ 420 struct timer_list timer; /* Media selection timer. */
422 struct timer_list oom_timer; /* Out of memory timer. */ 421 struct timer_list oom_timer; /* Out of memory timer. */
423 u32 mc_filter[2]; 422 u32 mc_filter[2];
@@ -570,7 +569,7 @@ static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __io
570 /* Trigger an immediate transmit demand. */ 569 /* Trigger an immediate transmit demand. */
571 iowrite32(0, ioaddr + CSR1); 570 iowrite32(0, ioaddr + CSR1);
572 571
573 tp->stats.tx_errors++; 572 tp->dev->stats.tx_errors++;
574} 573}
575 574
576#endif /* __NET_TULIP_H__ */ 575#endif /* __NET_TULIP_H__ */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 3a8d7efa2acf..2c39f2591216 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -725,7 +725,7 @@ static void tulip_clean_tx_ring(struct tulip_private *tp)
725 int status = le32_to_cpu(tp->tx_ring[entry].status); 725 int status = le32_to_cpu(tp->tx_ring[entry].status);
726 726
727 if (status < 0) { 727 if (status < 0) {
728 tp->stats.tx_errors++; /* It wasn't Txed */ 728 tp->dev->stats.tx_errors++; /* It wasn't Txed */
729 tp->tx_ring[entry].status = 0; 729 tp->tx_ring[entry].status = 0;
730 } 730 }
731 731
@@ -781,8 +781,8 @@ static void tulip_down (struct net_device *dev)
781 /* release any unconsumed transmit buffers */ 781 /* release any unconsumed transmit buffers */
782 tulip_clean_tx_ring(tp); 782 tulip_clean_tx_ring(tp);
783 783
784 if (ioread32 (ioaddr + CSR6) != 0xffffffff) 784 if (ioread32(ioaddr + CSR6) != 0xffffffff)
785 tp->stats.rx_missed_errors += ioread32 (ioaddr + CSR8) & 0xffff; 785 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
786 786
787 spin_unlock_irqrestore (&tp->lock, flags); 787 spin_unlock_irqrestore (&tp->lock, flags);
788 788
@@ -864,12 +864,12 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
864 864
865 spin_lock_irqsave (&tp->lock, flags); 865 spin_lock_irqsave (&tp->lock, flags);
866 866
867 tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; 867 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
868 868
869 spin_unlock_irqrestore(&tp->lock, flags); 869 spin_unlock_irqrestore(&tp->lock, flags);
870 } 870 }
871 871
872 return &tp->stats; 872 return &dev->stats;
873} 873}
874 874
875 875
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 96de5829b940..74217dbf0143 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -480,7 +480,7 @@ static int uli526x_open(struct net_device *dev)
480 init_timer(&db->timer); 480 init_timer(&db->timer);
481 db->timer.expires = ULI526X_TIMER_WUT + HZ * 2; 481 db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
482 db->timer.data = (unsigned long)dev; 482 db->timer.data = (unsigned long)dev;
483 db->timer.function = &uli526x_timer; 483 db->timer.function = uli526x_timer;
484 add_timer(&db->timer); 484 add_timer(&db->timer);
485 485
486 return 0; 486 return 0;
@@ -1747,7 +1747,7 @@ static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
1747 if(cr10_value&0x10000000) 1747 if(cr10_value&0x10000000)
1748 break; 1748 break;
1749 } 1749 }
1750 return (cr10_value&0x0ffff); 1750 return cr10_value & 0x0ffff;
1751} 1751}
1752 1752
1753static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) 1753static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 66d41cf8da29..f0b231035dee 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -662,7 +662,7 @@ static int netdev_open(struct net_device *dev)
662 init_timer(&np->timer); 662 init_timer(&np->timer);
663 np->timer.expires = jiffies + 1*HZ; 663 np->timer.expires = jiffies + 1*HZ;
664 np->timer.data = (unsigned long)dev; 664 np->timer.data = (unsigned long)dev;
665 np->timer.function = &netdev_timer; /* timer handler */ 665 np->timer.function = netdev_timer; /* timer handler */
666 add_timer(&np->timer); 666 add_timer(&np->timer);
667 return 0; 667 return 0;
668out_err: 668out_err:
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index a439e93be22d..5a73752be2ca 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -29,7 +29,6 @@
29#include <linux/skbuff.h> 29#include <linux/skbuff.h>
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/ethtool.h>
33#include <linux/bitops.h> 32#include <linux/bitops.h>
34 33
35#include <asm/uaccess.h> 34#include <asm/uaccess.h>
@@ -181,19 +180,6 @@ static void print_binary(unsigned int number)
181} 180}
182#endif 181#endif
183 182
184static void netdev_get_drvinfo(struct net_device *dev,
185 struct ethtool_drvinfo *info)
186{
187 struct xircom_private *private = netdev_priv(dev);
188
189 strcpy(info->driver, "xircom_cb");
190 strcpy(info->bus_info, pci_name(private->pdev));
191}
192
193static const struct ethtool_ops netdev_ethtool_ops = {
194 .get_drvinfo = netdev_get_drvinfo,
195};
196
197static const struct net_device_ops netdev_ops = { 183static const struct net_device_ops netdev_ops = {
198 .ndo_open = xircom_open, 184 .ndo_open = xircom_open,
199 .ndo_stop = xircom_close, 185 .ndo_stop = xircom_close,
@@ -279,7 +265,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
279 setup_descriptors(private); 265 setup_descriptors(private);
280 266
281 dev->netdev_ops = &netdev_ops; 267 dev->netdev_ops = &netdev_ops;
282 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
283 pci_set_drvdata(pdev, dev); 268 pci_set_drvdata(pdev, dev);
284 269
285 if (register_netdev(dev)) { 270 if (register_netdev(dev)) {
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 2e50077ff450..1cc67138adbf 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -541,7 +541,7 @@ cleanup:
541 541
542 indexes->respCleared = cpu_to_le32(cleared); 542 indexes->respCleared = cpu_to_le32(cleared);
543 wmb(); 543 wmb();
544 return (resp_save == NULL); 544 return resp_save == NULL;
545} 545}
546 546
547static inline int 547static inline int
@@ -962,36 +962,34 @@ typhoon_do_get_stats(struct typhoon *tp)
962 * The extra status reported would be a good candidate for 962 * The extra status reported would be a good candidate for
963 * ethtool_ops->get_{strings,stats}() 963 * ethtool_ops->get_{strings,stats}()
964 */ 964 */
965 stats->tx_packets = le32_to_cpu(s->txPackets); 965 stats->tx_packets = le32_to_cpu(s->txPackets) +
966 stats->tx_bytes = le64_to_cpu(s->txBytes); 966 saved->tx_packets;
967 stats->tx_errors = le32_to_cpu(s->txCarrierLost); 967 stats->tx_bytes = le64_to_cpu(s->txBytes) +
968 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost); 968 saved->tx_bytes;
969 stats->collisions = le32_to_cpu(s->txMultipleCollisions); 969 stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
970 stats->rx_packets = le32_to_cpu(s->rxPacketsGood); 970 saved->tx_errors;
971 stats->rx_bytes = le64_to_cpu(s->rxBytesGood); 971 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
972 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns); 972 saved->tx_carrier_errors;
973 stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
974 saved->collisions;
975 stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
976 saved->rx_packets;
977 stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
978 saved->rx_bytes;
979 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
980 saved->rx_fifo_errors;
973 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) + 981 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
974 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors); 982 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
975 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors); 983 saved->rx_errors;
976 stats->rx_length_errors = le32_to_cpu(s->rxOversized); 984 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
985 saved->rx_crc_errors;
986 stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
987 saved->rx_length_errors;
977 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ? 988 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
978 SPEED_100 : SPEED_10; 989 SPEED_100 : SPEED_10;
979 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ? 990 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
980 DUPLEX_FULL : DUPLEX_HALF; 991 DUPLEX_FULL : DUPLEX_HALF;
981 992
982 /* add in the saved statistics
983 */
984 stats->tx_packets += saved->tx_packets;
985 stats->tx_bytes += saved->tx_bytes;
986 stats->tx_errors += saved->tx_errors;
987 stats->collisions += saved->collisions;
988 stats->rx_packets += saved->rx_packets;
989 stats->rx_bytes += saved->rx_bytes;
990 stats->rx_fifo_errors += saved->rx_fifo_errors;
991 stats->rx_errors += saved->rx_errors;
992 stats->rx_crc_errors += saved->rx_crc_errors;
993 stats->rx_length_errors += saved->rx_length_errors;
994
995 return 0; 993 return 0;
996} 994}
997 995
@@ -1762,7 +1760,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
1762 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) { 1760 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1763 new_skb->ip_summed = CHECKSUM_UNNECESSARY; 1761 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1764 } else 1762 } else
1765 new_skb->ip_summed = CHECKSUM_NONE; 1763 skb_checksum_none_assert(new_skb);
1766 1764
1767 spin_lock(&tp->state_lock); 1765 spin_lock(&tp->state_lock);
1768 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN) 1766 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index d7b7018a1de1..52ffabe6db0e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -358,6 +358,14 @@ config USB_NET_ZAURUS
358 really need this non-conformant variant of CDC Ethernet (or in 358 really need this non-conformant variant of CDC Ethernet (or in
359 some cases CDC MDLM) protocol, not "g_ether". 359 some cases CDC MDLM) protocol, not "g_ether".
360 360
361config USB_NET_CX82310_ETH
362 tristate "Conexant CX82310 USB ethernet port"
363 depends on USB_USBNET
364 help
365 Choose this option if you're using a Conexant CX82310-based ADSL
366 router with USB ethernet port. This driver is for routers only,
367 it will not work with ADSL modems (use cxacru driver instead).
368
361config USB_HSO 369config USB_HSO
362 tristate "Option USB High Speed Mobile Devices" 370 tristate "Option USB High Speed Mobile Devices"
363 depends on USB && RFKILL 371 depends on USB && RFKILL
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b13a279663ba..a19b0259ae16 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -25,4 +25,5 @@ obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o 25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o 26obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o 27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
28 29
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
new file mode 100644
index 000000000000..8969f124c18c
--- /dev/null
+++ b/drivers/net/usb/cx82310_eth.c
@@ -0,0 +1,346 @@
1/*
2 * Driver for USB ethernet port of Conexant CX82310-based ADSL routers
3 * Copyright (C) 2010 by Ondrej Zary
4 * some parts inspired by the cxacru driver
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/ethtool.h>
26#include <linux/workqueue.h>
27#include <linux/mii.h>
28#include <linux/usb.h>
29#include <linux/usb/usbnet.h>
30
31enum cx82310_cmd {
32 CMD_START = 0x84, /* no effect? */
33 CMD_STOP = 0x85, /* no effect? */
34 CMD_GET_STATUS = 0x90, /* returns nothing? */
35 CMD_GET_MAC_ADDR = 0x91, /* read MAC address */
36 CMD_GET_LINK_STATUS = 0x92, /* not useful, link is always up */
37 CMD_ETHERNET_MODE = 0x99, /* unknown, needed during init */
38};
39
40enum cx82310_status {
41 STATUS_UNDEFINED,
42 STATUS_SUCCESS,
43 STATUS_ERROR,
44 STATUS_UNSUPPORTED,
45 STATUS_UNIMPLEMENTED,
46 STATUS_PARAMETER_ERROR,
47 STATUS_DBG_LOOPBACK,
48};
49
50#define CMD_PACKET_SIZE 64
51/* first command after power on can take around 8 seconds */
52#define CMD_TIMEOUT 15000
53#define CMD_REPLY_RETRY 5
54
55#define CX82310_MTU 1514
56#define CMD_EP 0x01
57
58/*
59 * execute control command
60 * - optionally send some data (command parameters)
61 * - optionally wait for the reply
62 * - optionally read some data from the reply
63 */
64static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
65 u8 *wdata, int wlen, u8 *rdata, int rlen)
66{
67 int actual_len, retries, ret;
68 struct usb_device *udev = dev->udev;
69 u8 *buf = kzalloc(CMD_PACKET_SIZE, GFP_KERNEL);
70
71 if (!buf)
72 return -ENOMEM;
73
74 /* create command packet */
75 buf[0] = cmd;
76 if (wdata)
77 memcpy(buf + 4, wdata, min_t(int, wlen, CMD_PACKET_SIZE - 4));
78
79 /* send command packet */
80 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
81 CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
82 if (ret < 0) {
83 dev_err(&dev->udev->dev, "send command %#x: error %d\n",
84 cmd, ret);
85 goto end;
86 }
87
88 if (reply) {
89 /* wait for reply, retry if it's empty */
90 for (retries = 0; retries < CMD_REPLY_RETRY; retries++) {
91 ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, CMD_EP),
92 buf, CMD_PACKET_SIZE, &actual_len,
93 CMD_TIMEOUT);
94 if (ret < 0) {
95 dev_err(&dev->udev->dev,
96 "reply receive error %d\n", ret);
97 goto end;
98 }
99 if (actual_len > 0)
100 break;
101 }
102 if (actual_len == 0) {
103 dev_err(&dev->udev->dev, "no reply to command %#x\n",
104 cmd);
105 ret = -EIO;
106 goto end;
107 }
108 if (buf[0] != cmd) {
109 dev_err(&dev->udev->dev,
110 "got reply to command %#x, expected: %#x\n",
111 buf[0], cmd);
112 ret = -EIO;
113 goto end;
114 }
115 if (buf[1] != STATUS_SUCCESS) {
116 dev_err(&dev->udev->dev, "command %#x failed: %#x\n",
117 cmd, buf[1]);
118 ret = -EIO;
119 goto end;
120 }
121 if (rdata)
122 memcpy(rdata, buf + 4,
123 min_t(int, rlen, CMD_PACKET_SIZE - 4));
124 }
125end:
126 kfree(buf);
127 return ret;
128}
129
130#define partial_len data[0] /* length of partial packet data */
131#define partial_rem data[1] /* remaining (missing) data length */
132#define partial_data data[2] /* partial packet data */
133
134static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
135{
136 int ret;
137 char buf[15];
138 struct usb_device *udev = dev->udev;
139
140 /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
141 if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
142 && strcmp(buf, "USB NET CARD")) {
143 dev_info(&udev->dev, "ignoring: probably an ADSL modem\n");
144 return -ENODEV;
145 }
146
147 ret = usbnet_get_endpoints(dev, intf);
148 if (ret)
149 return ret;
150
151 /*
152 * this must not include ethernet header as the device can send partial
153 * packets with no header (and sometimes even empty URBs)
154 */
155 dev->net->hard_header_len = 0;
156 /* we can send at most 1514 bytes of data (+ 2-byte header) per URB */
157 dev->hard_mtu = CX82310_MTU + 2;
158 /* we can receive URBs up to 4KB from the device */
159 dev->rx_urb_size = 4096;
160
161 dev->partial_data = (unsigned long) kmalloc(dev->hard_mtu, GFP_KERNEL);
162 if (!dev->partial_data)
163 return -ENOMEM;
164
165 /* enable ethernet mode (?) */
166 ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
167 if (ret) {
168 dev_err(&udev->dev, "unable to enable ethernet mode: %d\n",
169 ret);
170 goto err;
171 }
172
173 /* get the MAC address */
174 ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0,
175 dev->net->dev_addr, ETH_ALEN);
176 if (ret) {
177 dev_err(&udev->dev, "unable to read MAC address: %d\n", ret);
178 goto err;
179 }
180
181 /* start (does not seem to have any effect?) */
182 ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0);
183 if (ret)
184 goto err;
185
186 return 0;
187err:
188 kfree((void *)dev->partial_data);
189 return ret;
190}
191
192static void cx82310_unbind(struct usbnet *dev, struct usb_interface *intf)
193{
194 kfree((void *)dev->partial_data);
195}
196
197/*
198 * RX is NOT easy - we can receive multiple packets per skb, each having 2-byte
199 * packet length at the beginning.
200 * The last packet might be incomplete (when it crosses the 4KB URB size),
201 * continuing in the next skb (without any headers).
202 * If a packet has odd length, there is one extra byte at the end (before next
203 * packet or at the end of the URB).
204 */
205static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
206{
207 int len;
208 struct sk_buff *skb2;
209
210 /*
211 * If the last skb ended with an incomplete packet, this skb contains
212 * end of that packet at the beginning.
213 */
214 if (dev->partial_rem) {
215 len = dev->partial_len + dev->partial_rem;
216 skb2 = alloc_skb(len, GFP_ATOMIC);
217 if (!skb2)
218 return 0;
219 skb_put(skb2, len);
220 memcpy(skb2->data, (void *)dev->partial_data,
221 dev->partial_len);
222 memcpy(skb2->data + dev->partial_len, skb->data,
223 dev->partial_rem);
224 usbnet_skb_return(dev, skb2);
225 skb_pull(skb, (dev->partial_rem + 1) & ~1);
226 dev->partial_rem = 0;
227 if (skb->len < 2)
228 return 1;
229 }
230
231 /* a skb can contain multiple packets */
232 while (skb->len > 1) {
233 /* first two bytes are packet length */
234 len = skb->data[0] | (skb->data[1] << 8);
235 skb_pull(skb, 2);
236
237 /* if last packet in the skb, let usbnet to process it */
238 if (len == skb->len || len + 1 == skb->len) {
239 skb_trim(skb, len);
240 break;
241 }
242
243 if (len > CX82310_MTU) {
244 dev_err(&dev->udev->dev, "RX packet too long: %d B\n",
245 len);
246 return 0;
247 }
248
249 /* incomplete packet, save it for the next skb */
250 if (len > skb->len) {
251 dev->partial_len = skb->len;
252 dev->partial_rem = len - skb->len;
253 memcpy((void *)dev->partial_data, skb->data,
254 dev->partial_len);
255 skb_pull(skb, skb->len);
256 break;
257 }
258
259 skb2 = alloc_skb(len, GFP_ATOMIC);
260 if (!skb2)
261 return 0;
262 skb_put(skb2, len);
263 memcpy(skb2->data, skb->data, len);
264 /* process the packet */
265 usbnet_skb_return(dev, skb2);
266
267 skb_pull(skb, (len + 1) & ~1);
268 }
269
270 /* let usbnet process the last packet */
271 return 1;
272}
273
274/* TX is easy, just add 2 bytes of length at the beginning */
275static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
276 gfp_t flags)
277{
278 int len = skb->len;
279
280 if (skb_headroom(skb) < 2) {
281 struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
282 dev_kfree_skb_any(skb);
283 skb = skb2;
284 if (!skb)
285 return NULL;
286 }
287 skb_push(skb, 2);
288
289 skb->data[0] = len;
290 skb->data[1] = len >> 8;
291
292 return skb;
293}
294
295
296static const struct driver_info cx82310_info = {
297 .description = "Conexant CX82310 USB ethernet",
298 .flags = FLAG_ETHER,
299 .bind = cx82310_bind,
300 .unbind = cx82310_unbind,
301 .rx_fixup = cx82310_rx_fixup,
302 .tx_fixup = cx82310_tx_fixup,
303};
304
305#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \
306 .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
307 USB_DEVICE_ID_MATCH_DEV_INFO, \
308 .idVendor = (vend), \
309 .idProduct = (prod), \
310 .bDeviceClass = (cl), \
311 .bDeviceSubClass = (sc), \
312 .bDeviceProtocol = (pr)
313
314static const struct usb_device_id products[] = {
315 {
316 USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0),
317 .driver_info = (unsigned long) &cx82310_info
318 },
319 { },
320};
321MODULE_DEVICE_TABLE(usb, products);
322
323static struct usb_driver cx82310_driver = {
324 .name = "cx82310_eth",
325 .id_table = products,
326 .probe = usbnet_probe,
327 .disconnect = usbnet_disconnect,
328 .suspend = usbnet_suspend,
329 .resume = usbnet_resume,
330};
331
332static int __init cx82310_init(void)
333{
334 return usb_register(&cx82310_driver);
335}
336module_init(cx82310_init);
337
338static void __exit cx82310_exit(void)
339{
340 usb_deregister(&cx82310_driver);
341}
342module_exit(cx82310_exit);
343
344MODULE_AUTHOR("Ondrej Zary");
345MODULE_DESCRIPTION("Conexant CX82310-based ADSL router USB ethernet driver");
346MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1cd752f9a6e1..8110595fbbcc 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -843,16 +843,7 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
843 return NETDEV_TX_OK; 843 return NETDEV_TX_OK;
844} 844}
845 845
846static void hso_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
847{
848 struct hso_net *odev = netdev_priv(net);
849
850 strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN);
851 usb_make_path(odev->parent->usb, info->bus_info, sizeof info->bus_info);
852}
853
854static const struct ethtool_ops ops = { 846static const struct ethtool_ops ops = {
855 .get_drvinfo = hso_get_drvinfo,
856 .get_link = ethtool_op_get_link 847 .get_link = ethtool_op_get_link
857}; 848};
858 849
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 2b7b39cad1ce..5e98643a4a21 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -759,14 +759,6 @@ static int kaweth_close(struct net_device *net)
759 return 0; 759 return 0;
760} 760}
761 761
762static void kaweth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
763{
764 struct kaweth_device *kaweth = netdev_priv(dev);
765
766 strlcpy(info->driver, driver_name, sizeof(info->driver));
767 usb_make_path(kaweth->dev, info->bus_info, sizeof (info->bus_info));
768}
769
770static u32 kaweth_get_link(struct net_device *dev) 762static u32 kaweth_get_link(struct net_device *dev)
771{ 763{
772 struct kaweth_device *kaweth = netdev_priv(dev); 764 struct kaweth_device *kaweth = netdev_priv(dev);
@@ -775,7 +767,6 @@ static u32 kaweth_get_link(struct net_device *dev)
775} 767}
776 768
777static const struct ethtool_ops ops = { 769static const struct ethtool_ops ops = {
778 .get_drvinfo = kaweth_get_drvinfo,
779 .get_link = kaweth_get_link 770 .get_link = kaweth_get_link
780}; 771};
781 772
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index ee85c8b9a858..d1ac15c95faf 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -203,7 +203,7 @@ static inline void sierra_net_set_private(struct usbnet *dev,
203/* is packet IPv4 */ 203/* is packet IPv4 */
204static inline int is_ip(struct sk_buff *skb) 204static inline int is_ip(struct sk_buff *skb)
205{ 205{
206 return (skb->protocol == cpu_to_be16(ETH_P_IP)); 206 return skb->protocol == cpu_to_be16(ETH_P_IP);
207} 207}
208 208
209/* 209/*
@@ -354,7 +354,7 @@ static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
354 354
355static inline int sierra_net_is_valid_addrlen(u8 len) 355static inline int sierra_net_is_valid_addrlen(u8 len)
356{ 356{
357 return (len == sizeof(struct in_addr)); 357 return len == sizeof(struct in_addr);
358} 358}
359 359
360static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) 360static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 5ec542dd5b50..0bbc0c323135 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -250,7 +250,7 @@ static int veth_close(struct net_device *dev)
250 250
251static int is_valid_veth_mtu(int new_mtu) 251static int is_valid_veth_mtu(int new_mtu)
252{ 252{
253 return (new_mtu >= MIN_MTU && new_mtu <= MAX_MTU); 253 return new_mtu >= MIN_MTU && new_mtu <= MAX_MTU;
254} 254}
255 255
256static int veth_change_mtu(struct net_device *dev, int new_mtu) 256static int veth_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index f53412368ce1..6884813b809c 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1954,7 +1954,7 @@ static int velocity_tx_srv(struct velocity_info *vptr)
1954 */ 1954 */
1955static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) 1955static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1956{ 1956{
1957 skb->ip_summed = CHECKSUM_NONE; 1957 skb_checksum_none_assert(skb);
1958 1958
1959 if (rd->rdesc1.CSM & CSM_IPKT) { 1959 if (rd->rdesc1.CSM & CSM_IPKT) {
1960 if (rd->rdesc1.CSM & CSM_IPOK) { 1960 if (rd->rdesc1.CSM & CSM_IPOK) {
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index f7b33ae7a703..b5e120b0074b 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1504,22 +1504,25 @@ struct velocity_info {
1504 * addresses on this chain then we use the first - multi-IP WOL is not 1504 * addresses on this chain then we use the first - multi-IP WOL is not
1505 * supported. 1505 * supported.
1506 * 1506 *
1507 * CHECK ME: locking
1508 */ 1507 */
1509 1508
1510static inline int velocity_get_ip(struct velocity_info *vptr) 1509static inline int velocity_get_ip(struct velocity_info *vptr)
1511{ 1510{
1512 struct in_device *in_dev = (struct in_device *) vptr->dev->ip_ptr; 1511 struct in_device *in_dev;
1513 struct in_ifaddr *ifa; 1512 struct in_ifaddr *ifa;
1513 int res = -ENOENT;
1514 1514
1515 rcu_read_lock();
1516 in_dev = __in_dev_get_rcu(vptr->dev);
1515 if (in_dev != NULL) { 1517 if (in_dev != NULL) {
1516 ifa = (struct in_ifaddr *) in_dev->ifa_list; 1518 ifa = (struct in_ifaddr *) in_dev->ifa_list;
1517 if (ifa != NULL) { 1519 if (ifa != NULL) {
1518 memcpy(vptr->ip_addr, &ifa->ifa_address, 4); 1520 memcpy(vptr->ip_addr, &ifa->ifa_address, 4);
1519 return 0; 1521 res = 0;
1520 } 1522 }
1521 } 1523 }
1522 return -ENOENT; 1524 rcu_read_unlock();
1525 return res;
1523} 1526}
1524 1527
1525/** 1528/**
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4598e9d2608f..bb6b67f6b0cc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -705,19 +705,6 @@ static int virtnet_close(struct net_device *dev)
705 return 0; 705 return 0;
706} 706}
707 707
708static void virtnet_get_drvinfo(struct net_device *dev,
709 struct ethtool_drvinfo *drvinfo)
710{
711 struct virtnet_info *vi = netdev_priv(dev);
712 struct virtio_device *vdev = vi->vdev;
713
714 strncpy(drvinfo->driver, KBUILD_MODNAME, ARRAY_SIZE(drvinfo->driver));
715 strncpy(drvinfo->version, "N/A", ARRAY_SIZE(drvinfo->version));
716 strncpy(drvinfo->fw_version, "N/A", ARRAY_SIZE(drvinfo->fw_version));
717 strncpy(drvinfo->bus_info, dev_name(&vdev->dev),
718 ARRAY_SIZE(drvinfo->bus_info));
719}
720
721static int virtnet_set_tx_csum(struct net_device *dev, u32 data) 708static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
722{ 709{
723 struct virtnet_info *vi = netdev_priv(dev); 710 struct virtnet_info *vi = netdev_priv(dev);
@@ -830,7 +817,6 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
830} 817}
831 818
832static const struct ethtool_ops virtnet_ethtool_ops = { 819static const struct ethtool_ops virtnet_ethtool_ops = {
833 .get_drvinfo = virtnet_get_drvinfo,
834 .set_tx_csum = virtnet_set_tx_csum, 820 .set_tx_csum = virtnet_set_tx_csum,
835 .set_sg = ethtool_op_set_sg, 821 .set_sg = ethtool_op_set_sg,
836 .set_tso = ethtool_op_set_tso, 822 .set_tso = ethtool_op_set_tso,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index abe0ff53daf3..198ce92af0c3 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1042,11 +1042,11 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1042 skb->csum = htons(gdesc->rcd.csum); 1042 skb->csum = htons(gdesc->rcd.csum);
1043 skb->ip_summed = CHECKSUM_PARTIAL; 1043 skb->ip_summed = CHECKSUM_PARTIAL;
1044 } else { 1044 } else {
1045 skb->ip_summed = CHECKSUM_NONE; 1045 skb_checksum_none_assert(skb);
1046 } 1046 }
1047 } 1047 }
1048 } else { 1048 } else {
1049 skb->ip_summed = CHECKSUM_NONE; 1049 skb_checksum_none_assert(skb);
1050 } 1050 }
1051} 1051}
1052 1052
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c7c5605b3728..5378b849f54f 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -501,7 +501,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
501 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) 501 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
502 skb->ip_summed = CHECKSUM_UNNECESSARY; 502 skb->ip_summed = CHECKSUM_UNNECESSARY;
503 else 503 else
504 skb->ip_summed = CHECKSUM_NONE; 504 skb_checksum_none_assert(skb);
505 505
506 vxge_rx_complete(ring, skb, ext_info.vlan, 506 vxge_rx_complete(ring, skb, ext_info.vlan,
507 pkt_length, &ext_info); 507 pkt_length, &ext_info);
@@ -2159,8 +2159,8 @@ start:
2159 /* Alarm MSIX Vectors count */ 2159 /* Alarm MSIX Vectors count */
2160 vdev->intr_cnt++; 2160 vdev->intr_cnt++;
2161 2161
2162 vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry), 2162 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2163 GFP_KERNEL); 2163 GFP_KERNEL);
2164 if (!vdev->entries) { 2164 if (!vdev->entries) {
2165 vxge_debug_init(VXGE_ERR, 2165 vxge_debug_init(VXGE_ERR,
2166 "%s: memory allocation failed", 2166 "%s: memory allocation failed",
@@ -2169,9 +2169,9 @@ start:
2169 goto alloc_entries_failed; 2169 goto alloc_entries_failed;
2170 } 2170 }
2171 2171
2172 vdev->vxge_entries = 2172 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2173 kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry), 2173 sizeof(struct vxge_msix_entry),
2174 GFP_KERNEL); 2174 GFP_KERNEL);
2175 if (!vdev->vxge_entries) { 2175 if (!vdev->vxge_entries) {
2176 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", 2176 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2177 VXGE_DRIVER_NAME); 2177 VXGE_DRIVER_NAME);
@@ -2914,26 +2914,18 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
2914} 2914}
2915 2915
2916/** 2916/**
2917 * vxge_get_stats 2917 * vxge_get_stats64
2918 * @dev: pointer to the device structure 2918 * @dev: pointer to the device structure
2919 * @stats: pointer to struct rtnl_link_stats64
2919 * 2920 *
2920 * Updates the device statistics structure. This function updates the device
2921 * statistics structure in the net_device structure and returns a pointer
2922 * to the same.
2923 */ 2921 */
2924static struct net_device_stats * 2922static struct rtnl_link_stats64 *
2925vxge_get_stats(struct net_device *dev) 2923vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2926{ 2924{
2927 struct vxgedev *vdev; 2925 struct vxgedev *vdev = netdev_priv(dev);
2928 struct net_device_stats *net_stats;
2929 int k; 2926 int k;
2930 2927
2931 vdev = netdev_priv(dev); 2928 /* net_stats already zeroed by caller */
2932
2933 net_stats = &vdev->stats.net_stats;
2934
2935 memset(net_stats, 0, sizeof(struct net_device_stats));
2936
2937 for (k = 0; k < vdev->no_of_vpath; k++) { 2929 for (k = 0; k < vdev->no_of_vpath; k++) {
2938 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms; 2930 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms;
2939 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; 2931 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
@@ -3102,7 +3094,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3102static const struct net_device_ops vxge_netdev_ops = { 3094static const struct net_device_ops vxge_netdev_ops = {
3103 .ndo_open = vxge_open, 3095 .ndo_open = vxge_open,
3104 .ndo_stop = vxge_close, 3096 .ndo_stop = vxge_close,
3105 .ndo_get_stats = vxge_get_stats, 3097 .ndo_get_stats64 = vxge_get_stats64,
3106 .ndo_start_xmit = vxge_xmit, 3098 .ndo_start_xmit = vxge_xmit,
3107 .ndo_validate_addr = eth_validate_addr, 3099 .ndo_validate_addr = eth_validate_addr,
3108 .ndo_set_multicast_list = vxge_set_multicast, 3100 .ndo_set_multicast_list = vxge_set_multicast,
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 2e3b064b8e4b..d4be07eaacd7 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -172,7 +172,6 @@ struct vxge_msix_entry {
172 172
173struct vxge_sw_stats { 173struct vxge_sw_stats {
174 /* Network Stats (interface stats) */ 174 /* Network Stats (interface stats) */
175 struct net_device_stats net_stats;
176 175
177 /* Tx */ 176 /* Tx */
178 u64 tx_frms; 177 u64 tx_frms;
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 0bd898c94759..4ac85a09c5a6 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -264,7 +264,7 @@ static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
264 new_line.clock_type != CLOCK_TXFROMRX && 264 new_line.clock_type != CLOCK_TXFROMRX &&
265 new_line.clock_type != CLOCK_INT && 265 new_line.clock_type != CLOCK_INT &&
266 new_line.clock_type != CLOCK_TXINT) 266 new_line.clock_type != CLOCK_TXINT)
267 return -EINVAL; /* No such clock setting */ 267 return -EINVAL; /* No such clock setting */
268 268
269 if (new_line.loopback != 0 && new_line.loopback != 1) 269 if (new_line.loopback != 0 && new_line.loopback != 1)
270 return -EINVAL; 270 return -EINVAL;
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index a5ddc6c8963e..164c3624ba89 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -73,7 +73,7 @@ static int reset_cyc2x(void __iomem *addr);
73static int detect_cyc2x(void __iomem *addr); 73static int detect_cyc2x(void __iomem *addr);
74 74
75/* Miscellaneous functions */ 75/* Miscellaneous functions */
76static int get_option_index(long *optlist, long optval); 76static int get_option_index(const long *optlist, long optval);
77static u16 checksum(u8 *buf, u32 len); 77static u16 checksum(u8 *buf, u32 len);
78 78
79#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET) 79#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET)
@@ -81,23 +81,23 @@ static u16 checksum(u8 *buf, u32 len);
81/* Global Data */ 81/* Global Data */
82 82
83/* private data */ 83/* private data */
84static char modname[] = "cycx_drv"; 84static const char modname[] = "cycx_drv";
85static char fullname[] = "Cyclom 2X Support Module"; 85static const char fullname[] = "Cyclom 2X Support Module";
86static char copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo " 86static const char copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
87 "<acme@conectiva.com.br>"; 87 "<acme@conectiva.com.br>";
88 88
89/* Hardware configuration options. 89/* Hardware configuration options.
90 * These are arrays of configuration options used by verification routines. 90 * These are arrays of configuration options used by verification routines.
91 * The first element of each array is its size (i.e. number of options). 91 * The first element of each array is its size (i.e. number of options).
92 */ 92 */
93static long cyc2x_dpmbase_options[] = { 93static const long cyc2x_dpmbase_options[] = {
94 20, 94 20,
95 0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000, 95 0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000,
96 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000, 96 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000,
97 0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000 97 0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000
98}; 98};
99 99
100static long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 }; 100static const long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
101 101
102/* Kernel Loadable Module Entry Points */ 102/* Kernel Loadable Module Entry Points */
103/* Module 'insert' entry point. 103/* Module 'insert' entry point.
@@ -529,7 +529,7 @@ static int detect_cyc2x(void __iomem *addr)
529/* Miscellaneous */ 529/* Miscellaneous */
530/* Get option's index into the options list. 530/* Get option's index into the options list.
531 * Return option's index (1 .. N) or zero if option is invalid. */ 531 * Return option's index (1 .. N) or zero if option is invalid. */
532static int get_option_index(long *optlist, long optval) 532static int get_option_index(const long *optlist, long optval)
533{ 533{
534 int i = 1; 534 int i = 1;
535 535
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index a0e8611ad8e8..859dba9b972e 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -81,9 +81,9 @@ static irqreturn_t cycx_isr(int irq, void *dev_id);
81 */ 81 */
82 82
83/* private data */ 83/* private data */
84static char cycx_drvname[] = "cyclomx"; 84static const char cycx_drvname[] = "cyclomx";
85static char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver"; 85static const char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver";
86static char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo " 86static const char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
87 "<acme@conectiva.com.br>"; 87 "<acme@conectiva.com.br>";
88static int cycx_ncards = CONFIG_CYCX_CARDS; 88static int cycx_ncards = CONFIG_CYCX_CARDS;
89static struct cycx_device *cycx_card_array; /* adapter data space */ 89static struct cycx_device *cycx_card_array; /* adapter data space */
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 421d0715310e..1481a446fefb 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -97,11 +97,11 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
97 97
98 dest = skb_push(skb, hlen); 98 dest = skb_push(skb, hlen);
99 if (!dest) 99 if (!dest)
100 return(0); 100 return 0;
101 101
102 memcpy(dest, &hdr, hlen); 102 memcpy(dest, &hdr, hlen);
103 103
104 return(hlen); 104 return hlen;
105} 105}
106 106
107static void dlci_receive(struct sk_buff *skb, struct net_device *dev) 107static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
@@ -211,14 +211,14 @@ static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, in
211 if (copy_from_user(&config, conf, sizeof(struct dlci_conf))) 211 if (copy_from_user(&config, conf, sizeof(struct dlci_conf)))
212 return -EFAULT; 212 return -EFAULT;
213 if (config.flags & ~DLCI_VALID_FLAGS) 213 if (config.flags & ~DLCI_VALID_FLAGS)
214 return(-EINVAL); 214 return -EINVAL;
215 memcpy(&dlp->config, &config, sizeof(struct dlci_conf)); 215 memcpy(&dlp->config, &config, sizeof(struct dlci_conf));
216 dlp->configured = 1; 216 dlp->configured = 1;
217 } 217 }
218 218
219 err = (*flp->dlci_conf)(dlp->slave, dev, get); 219 err = (*flp->dlci_conf)(dlp->slave, dev, get);
220 if (err) 220 if (err)
221 return(err); 221 return err;
222 222
223 if (get) 223 if (get)
224 { 224 {
@@ -226,7 +226,7 @@ static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, in
226 return -EFAULT; 226 return -EFAULT;
227 } 227 }
228 228
229 return(0); 229 return 0;
230} 230}
231 231
232static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 232static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -234,7 +234,7 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
234 struct dlci_local *dlp; 234 struct dlci_local *dlp;
235 235
236 if (!capable(CAP_NET_ADMIN)) 236 if (!capable(CAP_NET_ADMIN))
237 return(-EPERM); 237 return -EPERM;
238 238
239 dlp = netdev_priv(dev); 239 dlp = netdev_priv(dev);
240 240
@@ -242,7 +242,7 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
242 { 242 {
243 case DLCI_GET_SLAVE: 243 case DLCI_GET_SLAVE:
244 if (!*(short *)(dev->dev_addr)) 244 if (!*(short *)(dev->dev_addr))
245 return(-EINVAL); 245 return -EINVAL;
246 246
247 strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave)); 247 strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave));
248 break; 248 break;
@@ -250,15 +250,15 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
250 case DLCI_GET_CONF: 250 case DLCI_GET_CONF:
251 case DLCI_SET_CONF: 251 case DLCI_SET_CONF:
252 if (!*(short *)(dev->dev_addr)) 252 if (!*(short *)(dev->dev_addr))
253 return(-EINVAL); 253 return -EINVAL;
254 254
255 return(dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF)); 255 return dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF);
256 break; 256 break;
257 257
258 default: 258 default:
259 return(-EOPNOTSUPP); 259 return -EOPNOTSUPP;
260 } 260 }
261 return(0); 261 return 0;
262} 262}
263 263
264static int dlci_change_mtu(struct net_device *dev, int new_mtu) 264static int dlci_change_mtu(struct net_device *dev, int new_mtu)
@@ -277,15 +277,15 @@ static int dlci_open(struct net_device *dev)
277 dlp = netdev_priv(dev); 277 dlp = netdev_priv(dev);
278 278
279 if (!*(short *)(dev->dev_addr)) 279 if (!*(short *)(dev->dev_addr))
280 return(-EINVAL); 280 return -EINVAL;
281 281
282 if (!netif_running(dlp->slave)) 282 if (!netif_running(dlp->slave))
283 return(-ENOTCONN); 283 return -ENOTCONN;
284 284
285 flp = netdev_priv(dlp->slave); 285 flp = netdev_priv(dlp->slave);
286 err = (*flp->activate)(dlp->slave, dev); 286 err = (*flp->activate)(dlp->slave, dev);
287 if (err) 287 if (err)
288 return(err); 288 return err;
289 289
290 netif_start_queue(dev); 290 netif_start_queue(dev);
291 291
@@ -365,14 +365,14 @@ static int dlci_add(struct dlci_add *dlci)
365 list_add(&dlp->list, &dlci_devs); 365 list_add(&dlp->list, &dlci_devs);
366 rtnl_unlock(); 366 rtnl_unlock();
367 367
368 return(0); 368 return 0;
369 369
370 err2: 370 err2:
371 rtnl_unlock(); 371 rtnl_unlock();
372 free_netdev(master); 372 free_netdev(master);
373 err1: 373 err1:
374 dev_put(slave); 374 dev_put(slave);
375 return(err); 375 return err;
376} 376}
377 377
378static int dlci_del(struct dlci_add *dlci) 378static int dlci_del(struct dlci_add *dlci)
@@ -385,10 +385,10 @@ static int dlci_del(struct dlci_add *dlci)
385 /* validate slave device */ 385 /* validate slave device */
386 master = __dev_get_by_name(&init_net, dlci->devname); 386 master = __dev_get_by_name(&init_net, dlci->devname);
387 if (!master) 387 if (!master)
388 return(-ENODEV); 388 return -ENODEV;
389 389
390 if (netif_running(master)) { 390 if (netif_running(master)) {
391 return(-EBUSY); 391 return -EBUSY;
392 } 392 }
393 393
394 dlp = netdev_priv(master); 394 dlp = netdev_priv(master);
@@ -406,7 +406,7 @@ static int dlci_del(struct dlci_add *dlci)
406 } 406 }
407 rtnl_unlock(); 407 rtnl_unlock();
408 408
409 return(err); 409 return err;
410} 410}
411 411
412static int dlci_ioctl(unsigned int cmd, void __user *arg) 412static int dlci_ioctl(unsigned int cmd, void __user *arg)
@@ -415,7 +415,7 @@ static int dlci_ioctl(unsigned int cmd, void __user *arg)
415 int err; 415 int err;
416 416
417 if (!capable(CAP_NET_ADMIN)) 417 if (!capable(CAP_NET_ADMIN))
418 return(-EPERM); 418 return -EPERM;
419 419
420 if (copy_from_user(&add, arg, sizeof(struct dlci_add))) 420 if (copy_from_user(&add, arg, sizeof(struct dlci_add)))
421 return -EFAULT; 421 return -EFAULT;
@@ -438,7 +438,7 @@ static int dlci_ioctl(unsigned int cmd, void __user *arg)
438 err = -EINVAL; 438 err = -EINVAL;
439 } 439 }
440 440
441 return(err); 441 return err;
442} 442}
443 443
444static const struct header_ops dlci_header_ops = { 444static const struct header_ops dlci_header_ops = {
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index b38ffa149aba..b1e5e5b69c2a 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -191,7 +191,8 @@ static int cisco_rx(struct sk_buff *skb)
191 191
192 switch (ntohl (cisco_data->type)) { 192 switch (ntohl (cisco_data->type)) {
193 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */ 193 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
194 in_dev = dev->ip_ptr; 194 rcu_read_lock();
195 in_dev = __in_dev_get_rcu(dev);
195 addr = 0; 196 addr = 0;
196 mask = ~cpu_to_be32(0); /* is the mask correct? */ 197 mask = ~cpu_to_be32(0); /* is the mask correct? */
197 198
@@ -211,6 +212,7 @@ static int cisco_rx(struct sk_buff *skb)
211 cisco_keepalive_send(dev, CISCO_ADDR_REPLY, 212 cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
212 addr, mask); 213 addr, mask);
213 } 214 }
215 rcu_read_unlock();
214 dev_kfree_skb_any(skb); 216 dev_kfree_skb_any(skb);
215 return NET_RX_SUCCESS; 217 return NET_RX_SUCCESS;
216 218
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 4d4dc38c7290..7f5bb913c8b9 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -46,7 +46,7 @@
46 46
47#include <net/x25device.h> 47#include <net/x25device.h>
48 48
49static char bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 49static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
50 50
51/* If this number is made larger, check that the temporary string buffer 51/* If this number is made larger, check that the temporary string buffer
52 * in lapbeth_new_device is large enough to store the probe device name.*/ 52 * in lapbeth_new_device is large enough to store the probe device name.*/
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index e2c6f7f4f51c..70feb84df670 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1022,7 +1022,7 @@ static int lmc_open(struct net_device *dev)
1022 1022
1023 if (sc->lmc_ok){ 1023 if (sc->lmc_ok){
1024 lmc_trace(dev, "lmc_open lmc_ok out"); 1024 lmc_trace(dev, "lmc_open lmc_ok out");
1025 return (0); 1025 return 0;
1026 } 1026 }
1027 1027
1028 lmc_softreset (sc); 1028 lmc_softreset (sc);
@@ -1105,12 +1105,12 @@ static int lmc_open(struct net_device *dev)
1105 init_timer (&sc->timer); 1105 init_timer (&sc->timer);
1106 sc->timer.expires = jiffies + HZ; 1106 sc->timer.expires = jiffies + HZ;
1107 sc->timer.data = (unsigned long) dev; 1107 sc->timer.data = (unsigned long) dev;
1108 sc->timer.function = &lmc_watchdog; 1108 sc->timer.function = lmc_watchdog;
1109 add_timer (&sc->timer); 1109 add_timer (&sc->timer);
1110 1110
1111 lmc_trace(dev, "lmc_open out"); 1111 lmc_trace(dev, "lmc_open out");
1112 1112
1113 return (0); 1113 return 0;
1114} 1114}
1115 1115
1116/* Total reset to compensate for the AdTran DSU doing bad things 1116/* Total reset to compensate for the AdTran DSU doing bad things
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 5394b51bdb2f..17d408fe693f 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -282,7 +282,7 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
282 new_line.clock_type != CLOCK_TXFROMRX && 282 new_line.clock_type != CLOCK_TXFROMRX &&
283 new_line.clock_type != CLOCK_INT && 283 new_line.clock_type != CLOCK_INT &&
284 new_line.clock_type != CLOCK_TXINT) 284 new_line.clock_type != CLOCK_TXINT)
285 return -EINVAL; /* No such clock setting */ 285 return -EINVAL; /* No such clock setting */
286 286
287 if (new_line.loopback != 0 && new_line.loopback != 1) 287 if (new_line.loopback != 0 && new_line.loopback != 1)
288 return -EINVAL; 288 return -EINVAL;
@@ -379,14 +379,14 @@ static int __init n2_run(unsigned long io, unsigned long irq,
379 if (request_irq(irq, sca_intr, 0, devname, card)) { 379 if (request_irq(irq, sca_intr, 0, devname, card)) {
380 printk(KERN_ERR "n2: could not allocate IRQ\n"); 380 printk(KERN_ERR "n2: could not allocate IRQ\n");
381 n2_destroy_card(card); 381 n2_destroy_card(card);
382 return(-EBUSY); 382 return -EBUSY;
383 } 383 }
384 card->irq = irq; 384 card->irq = irq;
385 385
386 if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) { 386 if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) {
387 printk(KERN_ERR "n2: could not request RAM window\n"); 387 printk(KERN_ERR "n2: could not request RAM window\n");
388 n2_destroy_card(card); 388 n2_destroy_card(card);
389 return(-EBUSY); 389 return -EBUSY;
390 } 390 }
391 card->phy_winbase = winbase; 391 card->phy_winbase = winbase;
392 card->winbase = ioremap(winbase, USE_WINDOWSIZE); 392 card->winbase = ioremap(winbase, USE_WINDOWSIZE);
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index c6aa66e5b52f..f875cfae3093 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -1,5 +1,5 @@
1#define USE_PCI_CLOCK 1#define USE_PCI_CLOCK
2static char rcsid[] = 2static const char rcsid[] =
3"Revision: 3.4.5 Date: 2002/03/07 "; 3"Revision: 3.4.5 Date: 2002/03/07 ";
4 4
5/* 5/*
@@ -451,11 +451,11 @@ static int dma_get_rx_frame_size(pc300_t * card, int ch)
451 if ((status & DST_EOM) || (first_bd == card->chan[ch].rx_last_bd)) { 451 if ((status & DST_EOM) || (first_bd == card->chan[ch].rx_last_bd)) {
452 /* Return the size of a good frame or incomplete bad frame 452 /* Return the size of a good frame or incomplete bad frame
453 * (dma_buf_read will clean the buffer descriptors in this case). */ 453 * (dma_buf_read will clean the buffer descriptors in this case). */
454 return (rcvd); 454 return rcvd;
455 } 455 }
456 ptdescr = (card->hw.rambase + cpc_readl(&ptdescr->next)); 456 ptdescr = (card->hw.rambase + cpc_readl(&ptdescr->next));
457 } 457 }
458 return (-1); 458 return -1;
459} 459}
460 460
461/* 461/*
@@ -557,7 +557,7 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
557 cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), 557 cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
558 RX_BD_ADDR(ch, chan->rx_last_bd)); 558 RX_BD_ADDR(ch, chan->rx_last_bd));
559 } 559 }
560 return (rcvd); 560 return rcvd;
561} 561}
562 562
563static void tx_dma_stop(pc300_t * card, int ch) 563static void tx_dma_stop(pc300_t * card, int ch)
@@ -1733,7 +1733,7 @@ static u16 falc_pattern_test_error(pc300_t * card, int ch)
1733 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1733 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1734 falc_t *pfalc = (falc_t *) & chan->falc; 1734 falc_t *pfalc = (falc_t *) & chan->falc;
1735 1735
1736 return (pfalc->bec); 1736 return pfalc->bec;
1737} 1737}
1738 1738
1739/**********************************/ 1739/**********************************/
@@ -2819,7 +2819,7 @@ static int clock_rate_calc(u32 rate, u32 clock, int *br_io)
2819 *br_io = 0; 2819 *br_io = 0;
2820 2820
2821 if (rate == 0) 2821 if (rate == 0)
2822 return (0); 2822 return 0;
2823 2823
2824 for (br = 0, br_pwr = 1; br <= 9; br++, br_pwr <<= 1) { 2824 for (br = 0, br_pwr = 1; br <= 9; br++, br_pwr <<= 1) {
2825 if ((tc = clock / br_pwr / rate) <= 0xff) { 2825 if ((tc = clock / br_pwr / rate) <= 0xff) {
@@ -2832,11 +2832,11 @@ static int clock_rate_calc(u32 rate, u32 clock, int *br_io)
2832 error = ((rate - (clock / br_pwr / rate)) / rate) * 1000; 2832 error = ((rate - (clock / br_pwr / rate)) / rate) * 1000;
2833 /* Errors bigger than +/- 1% won't be tolerated */ 2833 /* Errors bigger than +/- 1% won't be tolerated */
2834 if (error < -10 || error > 10) 2834 if (error < -10 || error > 10)
2835 return (-1); 2835 return -1;
2836 else 2836 else
2837 return (tc); 2837 return tc;
2838 } else { 2838 } else {
2839 return (-1); 2839 return -1;
2840 } 2840 }
2841} 2841}
2842 2842
@@ -3207,7 +3207,7 @@ static u32 detect_ram(pc300_t * card)
3207 break; 3207 break;
3208 } 3208 }
3209 } 3209 }
3210 return (i); 3210 return i;
3211} 3211}
3212 3212
3213static void plx_init(pc300_t * card) 3213static void plx_init(pc300_t * card)
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4293889e287e..515d9b8af01e 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -540,7 +540,7 @@ static int cpc_tty_chars_in_buffer(struct tty_struct *tty)
540 return -ENODEV; 540 return -ENODEV;
541 } 541 }
542 542
543 return(0); 543 return 0;
544} 544}
545 545
546static int pc300_tiocmset(struct tty_struct *tty, struct file *file, 546static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index e2cff64a446a..fd7375955e41 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -220,7 +220,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
220 new_line.clock_type != CLOCK_TXFROMRX && 220 new_line.clock_type != CLOCK_TXFROMRX &&
221 new_line.clock_type != CLOCK_INT && 221 new_line.clock_type != CLOCK_INT &&
222 new_line.clock_type != CLOCK_TXINT) 222 new_line.clock_type != CLOCK_TXINT)
223 return -EINVAL; /* No such clock setting */ 223 return -EINVAL; /* No such clock setting */
224 224
225 if (new_line.loopback != 0 && new_line.loopback != 1) 225 if (new_line.loopback != 0 && new_line.loopback != 1)
226 return -EINVAL; 226 return -EINVAL;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index f4125da2762f..3f4e2b5684db 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -178,7 +178,7 @@ static char sdla_byte(struct net_device *dev, int addr)
178 byte = *temp; 178 byte = *temp;
179 spin_unlock_irqrestore(&sdla_lock, flags); 179 spin_unlock_irqrestore(&sdla_lock, flags);
180 180
181 return(byte); 181 return byte;
182} 182}
183 183
184static void sdla_stop(struct net_device *dev) 184static void sdla_stop(struct net_device *dev)
@@ -267,7 +267,7 @@ static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char r
267 resp = *temp; 267 resp = *temp;
268 } 268 }
269 } 269 }
270 return(time_before(jiffies, done) ? jiffies - start : -1); 270 return time_before(jiffies, done) ? jiffies - start : -1;
271} 271}
272 272
273/* constants for Z80 CPU speed */ 273/* constants for Z80 CPU speed */
@@ -283,13 +283,13 @@ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
283 283
284 sdla_start(dev); 284 sdla_start(dev);
285 if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0) 285 if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0)
286 return(-EIO); 286 return -EIO;
287 287
288 data = LOADER_READY; 288 data = LOADER_READY;
289 sdla_write(dev, 0, &data, 1); 289 sdla_write(dev, 0, &data, 1);
290 290
291 if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0) 291 if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0)
292 return(-EIO); 292 return -EIO;
293 293
294 sdla_stop(dev); 294 sdla_stop(dev);
295 sdla_read(dev, 0, &data, 1); 295 sdla_read(dev, 0, &data, 1);
@@ -297,11 +297,11 @@ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
297 if (data == Z80_SCC_BAD) 297 if (data == Z80_SCC_BAD)
298 { 298 {
299 printk("%s: SCC bad\n", dev->name); 299 printk("%s: SCC bad\n", dev->name);
300 return(-EIO); 300 return -EIO;
301 } 301 }
302 302
303 if (data != Z80_SCC_OK) 303 if (data != Z80_SCC_OK)
304 return(-EINVAL); 304 return -EINVAL;
305 305
306 if (jiffs < 165) 306 if (jiffs < 165)
307 ifr->ifr_mtu = SDLA_CPU_16M; 307 ifr->ifr_mtu = SDLA_CPU_16M;
@@ -316,7 +316,7 @@ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
316 else 316 else
317 ifr->ifr_mtu = SDLA_CPU_3M; 317 ifr->ifr_mtu = SDLA_CPU_3M;
318 318
319 return(0); 319 return 0;
320} 320}
321 321
322/************************************************ 322/************************************************
@@ -493,7 +493,7 @@ static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
493 if (ret != SDLA_RET_OK) 493 if (ret != SDLA_RET_OK)
494 sdla_errors(dev, cmd, dlci, ret, len, &status); 494 sdla_errors(dev, cmd, dlci, ret, len, &status);
495 495
496 return(ret); 496 return ret;
497} 497}
498 498
499/*********************************************** 499/***********************************************
@@ -516,14 +516,14 @@ static int sdla_activate(struct net_device *slave, struct net_device *master)
516 break; 516 break;
517 517
518 if (i == CONFIG_DLCI_MAX) 518 if (i == CONFIG_DLCI_MAX)
519 return(-ENODEV); 519 return -ENODEV;
520 520
521 flp->dlci[i] = abs(flp->dlci[i]); 521 flp->dlci[i] = abs(flp->dlci[i]);
522 522
523 if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE)) 523 if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
524 sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL); 524 sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
525 525
526 return(0); 526 return 0;
527} 527}
528 528
529static int sdla_deactivate(struct net_device *slave, struct net_device *master) 529static int sdla_deactivate(struct net_device *slave, struct net_device *master)
@@ -538,14 +538,14 @@ static int sdla_deactivate(struct net_device *slave, struct net_device *master)
538 break; 538 break;
539 539
540 if (i == CONFIG_DLCI_MAX) 540 if (i == CONFIG_DLCI_MAX)
541 return(-ENODEV); 541 return -ENODEV;
542 542
543 flp->dlci[i] = -abs(flp->dlci[i]); 543 flp->dlci[i] = -abs(flp->dlci[i]);
544 544
545 if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE)) 545 if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
546 sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL); 546 sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
547 547
548 return(0); 548 return 0;
549} 549}
550 550
551static int sdla_assoc(struct net_device *slave, struct net_device *master) 551static int sdla_assoc(struct net_device *slave, struct net_device *master)
@@ -554,7 +554,7 @@ static int sdla_assoc(struct net_device *slave, struct net_device *master)
554 int i; 554 int i;
555 555
556 if (master->type != ARPHRD_DLCI) 556 if (master->type != ARPHRD_DLCI)
557 return(-EINVAL); 557 return -EINVAL;
558 558
559 flp = netdev_priv(slave); 559 flp = netdev_priv(slave);
560 560
@@ -563,11 +563,11 @@ static int sdla_assoc(struct net_device *slave, struct net_device *master)
563 if (!flp->master[i]) 563 if (!flp->master[i])
564 break; 564 break;
565 if (abs(flp->dlci[i]) == *(short *)(master->dev_addr)) 565 if (abs(flp->dlci[i]) == *(short *)(master->dev_addr))
566 return(-EADDRINUSE); 566 return -EADDRINUSE;
567 } 567 }
568 568
569 if (i == CONFIG_DLCI_MAX) 569 if (i == CONFIG_DLCI_MAX)
570 return(-EMLINK); /* #### Alan: Comments on this ?? */ 570 return -EMLINK; /* #### Alan: Comments on this ?? */
571 571
572 572
573 flp->master[i] = master; 573 flp->master[i] = master;
@@ -581,7 +581,7 @@ static int sdla_assoc(struct net_device *slave, struct net_device *master)
581 sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL); 581 sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
582 } 582 }
583 583
584 return(0); 584 return 0;
585} 585}
586 586
587static int sdla_deassoc(struct net_device *slave, struct net_device *master) 587static int sdla_deassoc(struct net_device *slave, struct net_device *master)
@@ -596,7 +596,7 @@ static int sdla_deassoc(struct net_device *slave, struct net_device *master)
596 break; 596 break;
597 597
598 if (i == CONFIG_DLCI_MAX) 598 if (i == CONFIG_DLCI_MAX)
599 return(-ENODEV); 599 return -ENODEV;
600 600
601 flp->master[i] = NULL; 601 flp->master[i] = NULL;
602 flp->dlci[i] = 0; 602 flp->dlci[i] = 0;
@@ -609,7 +609,7 @@ static int sdla_deassoc(struct net_device *slave, struct net_device *master)
609 sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL); 609 sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
610 } 610 }
611 611
612 return(0); 612 return 0;
613} 613}
614 614
615static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get) 615static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
@@ -626,7 +626,7 @@ static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, i
626 break; 626 break;
627 627
628 if (i == CONFIG_DLCI_MAX) 628 if (i == CONFIG_DLCI_MAX)
629 return(-ENODEV); 629 return -ENODEV;
630 630
631 dlp = netdev_priv(master); 631 dlp = netdev_priv(master);
632 632
@@ -641,7 +641,7 @@ static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, i
641 &dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL); 641 &dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL);
642 } 642 }
643 643
644 return(ret == SDLA_RET_OK ? 0 : -EIO); 644 return ret == SDLA_RET_OK ? 0 : -EIO;
645} 645}
646 646
647/************************** 647/**************************
@@ -986,7 +986,7 @@ static int sdla_close(struct net_device *dev)
986 986
987 netif_stop_queue(dev); 987 netif_stop_queue(dev);
988 988
989 return(0); 989 return 0;
990} 990}
991 991
992struct conf_data { 992struct conf_data {
@@ -1006,10 +1006,10 @@ static int sdla_open(struct net_device *dev)
1006 flp = netdev_priv(dev); 1006 flp = netdev_priv(dev);
1007 1007
1008 if (!flp->initialized) 1008 if (!flp->initialized)
1009 return(-EPERM); 1009 return -EPERM;
1010 1010
1011 if (!flp->configured) 1011 if (!flp->configured)
1012 return(-EPERM); 1012 return -EPERM;
1013 1013
1014 /* time to send in the configuration */ 1014 /* time to send in the configuration */
1015 len = 0; 1015 len = 0;
@@ -1087,7 +1087,7 @@ static int sdla_open(struct net_device *dev)
1087 1087
1088 netif_start_queue(dev); 1088 netif_start_queue(dev);
1089 1089
1090 return(0); 1090 return 0;
1091} 1091}
1092 1092
1093static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get) 1093static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get)
@@ -1098,48 +1098,48 @@ static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, in
1098 short size; 1098 short size;
1099 1099
1100 if (dev->type == 0xFFFF) 1100 if (dev->type == 0xFFFF)
1101 return(-EUNATCH); 1101 return -EUNATCH;
1102 1102
1103 flp = netdev_priv(dev); 1103 flp = netdev_priv(dev);
1104 1104
1105 if (!get) 1105 if (!get)
1106 { 1106 {
1107 if (netif_running(dev)) 1107 if (netif_running(dev))
1108 return(-EBUSY); 1108 return -EBUSY;
1109 1109
1110 if(copy_from_user(&data.config, conf, sizeof(struct frad_conf))) 1110 if(copy_from_user(&data.config, conf, sizeof(struct frad_conf)))
1111 return -EFAULT; 1111 return -EFAULT;
1112 1112
1113 if (data.config.station & ~FRAD_STATION_NODE) 1113 if (data.config.station & ~FRAD_STATION_NODE)
1114 return(-EINVAL); 1114 return -EINVAL;
1115 1115
1116 if (data.config.flags & ~FRAD_VALID_FLAGS) 1116 if (data.config.flags & ~FRAD_VALID_FLAGS)
1117 return(-EINVAL); 1117 return -EINVAL;
1118 1118
1119 if ((data.config.kbaud < 0) || 1119 if ((data.config.kbaud < 0) ||
1120 ((data.config.kbaud > 128) && (flp->type != SDLA_S508))) 1120 ((data.config.kbaud > 128) && (flp->type != SDLA_S508)))
1121 return(-EINVAL); 1121 return -EINVAL;
1122 1122
1123 if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232)) 1123 if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232))
1124 return(-EINVAL); 1124 return -EINVAL;
1125 1125
1126 if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU)) 1126 if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU))
1127 return(-EINVAL); 1127 return -EINVAL;
1128 1128
1129 if ((data.config.T391 < 5) || (data.config.T391 > 30)) 1129 if ((data.config.T391 < 5) || (data.config.T391 > 30))
1130 return(-EINVAL); 1130 return -EINVAL;
1131 1131
1132 if ((data.config.T392 < 5) || (data.config.T392 > 30)) 1132 if ((data.config.T392 < 5) || (data.config.T392 > 30))
1133 return(-EINVAL); 1133 return -EINVAL;
1134 1134
1135 if ((data.config.N391 < 1) || (data.config.N391 > 255)) 1135 if ((data.config.N391 < 1) || (data.config.N391 > 255))
1136 return(-EINVAL); 1136 return -EINVAL;
1137 1137
1138 if ((data.config.N392 < 1) || (data.config.N392 > 10)) 1138 if ((data.config.N392 < 1) || (data.config.N392 > 10))
1139 return(-EINVAL); 1139 return -EINVAL;
1140 1140
1141 if ((data.config.N393 < 1) || (data.config.N393 > 10)) 1141 if ((data.config.N393 < 1) || (data.config.N393 > 10))
1142 return(-EINVAL); 1142 return -EINVAL;
1143 1143
1144 memcpy(&flp->config, &data.config, sizeof(struct frad_conf)); 1144 memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
1145 flp->config.flags |= SDLA_DIRECT_RECV; 1145 flp->config.flags |= SDLA_DIRECT_RECV;
@@ -1171,7 +1171,7 @@ static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, in
1171 { 1171 {
1172 size = sizeof(data); 1172 size = sizeof(data);
1173 if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK) 1173 if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK)
1174 return(-EIO); 1174 return -EIO;
1175 } 1175 }
1176 else 1176 else
1177 if (flp->configured) 1177 if (flp->configured)
@@ -1185,7 +1185,7 @@ static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, in
1185 return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0; 1185 return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0;
1186 } 1186 }
1187 1187
1188 return(0); 1188 return 0;
1189} 1189}
1190 1190
1191static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read) 1191static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read)
@@ -1200,7 +1200,7 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r
1200 { 1200 {
1201 temp = kzalloc(mem.len, GFP_KERNEL); 1201 temp = kzalloc(mem.len, GFP_KERNEL);
1202 if (!temp) 1202 if (!temp)
1203 return(-ENOMEM); 1203 return -ENOMEM;
1204 sdla_read(dev, mem.addr, temp, mem.len); 1204 sdla_read(dev, mem.addr, temp, mem.len);
1205 if(copy_to_user(mem.data, temp, mem.len)) 1205 if(copy_to_user(mem.data, temp, mem.len))
1206 { 1206 {
@@ -1217,7 +1217,7 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r
1217 sdla_write(dev, mem.addr, temp, mem.len); 1217 sdla_write(dev, mem.addr, temp, mem.len);
1218 kfree(temp); 1218 kfree(temp);
1219 } 1219 }
1220 return(0); 1220 return 0;
1221} 1221}
1222 1222
1223static int sdla_reconfig(struct net_device *dev) 1223static int sdla_reconfig(struct net_device *dev)
@@ -1241,7 +1241,7 @@ static int sdla_reconfig(struct net_device *dev)
1241 sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL); 1241 sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
1242 sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); 1242 sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
1243 1243
1244 return(0); 1244 return 0;
1245} 1245}
1246 1246
1247static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1247static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1254,20 +1254,20 @@ static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1254 flp = netdev_priv(dev); 1254 flp = netdev_priv(dev);
1255 1255
1256 if (!flp->initialized) 1256 if (!flp->initialized)
1257 return(-EINVAL); 1257 return -EINVAL;
1258 1258
1259 switch (cmd) 1259 switch (cmd)
1260 { 1260 {
1261 case FRAD_GET_CONF: 1261 case FRAD_GET_CONF:
1262 case FRAD_SET_CONF: 1262 case FRAD_SET_CONF:
1263 return(sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF)); 1263 return sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF);
1264 1264
1265 case SDLA_IDENTIFY: 1265 case SDLA_IDENTIFY:
1266 ifr->ifr_flags = flp->type; 1266 ifr->ifr_flags = flp->type;
1267 break; 1267 break;
1268 1268
1269 case SDLA_CPUSPEED: 1269 case SDLA_CPUSPEED:
1270 return(sdla_cpuspeed(dev, ifr)); 1270 return sdla_cpuspeed(dev, ifr);
1271 1271
1272/* ========================================================== 1272/* ==========================================================
1273NOTE: This is rather a useless action right now, as the 1273NOTE: This is rather a useless action right now, as the
@@ -1277,7 +1277,7 @@ NOTE: This is rather a useless action right now, as the
1277============================================================*/ 1277============================================================*/
1278 case SDLA_PROTOCOL: 1278 case SDLA_PROTOCOL:
1279 if (flp->configured) 1279 if (flp->configured)
1280 return(-EALREADY); 1280 return -EALREADY;
1281 1281
1282 switch (ifr->ifr_flags) 1282 switch (ifr->ifr_flags)
1283 { 1283 {
@@ -1285,7 +1285,7 @@ NOTE: This is rather a useless action right now, as the
1285 dev->type = ifr->ifr_flags; 1285 dev->type = ifr->ifr_flags;
1286 break; 1286 break;
1287 default: 1287 default:
1288 return(-ENOPROTOOPT); 1288 return -ENOPROTOOPT;
1289 } 1289 }
1290 break; 1290 break;
1291 1291
@@ -1297,7 +1297,7 @@ NOTE: This is rather a useless action right now, as the
1297 case SDLA_READMEM: 1297 case SDLA_READMEM:
1298 if(!capable(CAP_SYS_RAWIO)) 1298 if(!capable(CAP_SYS_RAWIO))
1299 return -EPERM; 1299 return -EPERM;
1300 return(sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM)); 1300 return sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM);
1301 1301
1302 case SDLA_START: 1302 case SDLA_START:
1303 sdla_start(dev); 1303 sdla_start(dev);
@@ -1308,9 +1308,9 @@ NOTE: This is rather a useless action right now, as the
1308 break; 1308 break;
1309 1309
1310 default: 1310 default:
1311 return(-EOPNOTSUPP); 1311 return -EOPNOTSUPP;
1312 } 1312 }
1313 return(0); 1313 return 0;
1314} 1314}
1315 1315
1316static int sdla_change_mtu(struct net_device *dev, int new_mtu) 1316static int sdla_change_mtu(struct net_device *dev, int new_mtu)
@@ -1320,10 +1320,10 @@ static int sdla_change_mtu(struct net_device *dev, int new_mtu)
1320 flp = netdev_priv(dev); 1320 flp = netdev_priv(dev);
1321 1321
1322 if (netif_running(dev)) 1322 if (netif_running(dev))
1323 return(-EBUSY); 1323 return -EBUSY;
1324 1324
1325 /* for now, you can't change the MTU! */ 1325 /* for now, you can't change the MTU! */
1326 return(-EOPNOTSUPP); 1326 return -EOPNOTSUPP;
1327} 1327}
1328 1328
1329static int sdla_set_config(struct net_device *dev, struct ifmap *map) 1329static int sdla_set_config(struct net_device *dev, struct ifmap *map)
@@ -1337,18 +1337,18 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
1337 flp = netdev_priv(dev); 1337 flp = netdev_priv(dev);
1338 1338
1339 if (flp->initialized) 1339 if (flp->initialized)
1340 return(-EINVAL); 1340 return -EINVAL;
1341 1341
1342 for(i=0; i < ARRAY_SIZE(valid_port); i++) 1342 for(i=0; i < ARRAY_SIZE(valid_port); i++)
1343 if (valid_port[i] == map->base_addr) 1343 if (valid_port[i] == map->base_addr)
1344 break; 1344 break;
1345 1345
1346 if (i == ARRAY_SIZE(valid_port)) 1346 if (i == ARRAY_SIZE(valid_port))
1347 return(-EINVAL); 1347 return -EINVAL;
1348 1348
1349 if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){ 1349 if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
1350 printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr); 1350 printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr);
1351 return(-EINVAL); 1351 return -EINVAL;
1352 } 1352 }
1353 base = map->base_addr; 1353 base = map->base_addr;
1354 1354
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index e47f5a986b1c..d81ad8397885 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -648,7 +648,7 @@ static int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
648 } 648 }
649 } 649 }
650 *ptr++ = X25_END; 650 *ptr++ = X25_END;
651 return (ptr - d); 651 return ptr - d;
652} 652}
653 653
654static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) 654static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index fbf5e843d48c..93956861ea21 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -766,7 +766,7 @@ irqreturn_t z8530_interrupt(int irq, void *dev_id)
766 766
767EXPORT_SYMBOL(z8530_interrupt); 767EXPORT_SYMBOL(z8530_interrupt);
768 768
769static char reg_init[16]= 769static const u8 reg_init[16]=
770{ 770{
771 0,0,0,0, 771 0,0,0,0,
772 0,0,0,0, 772 0,0,0,0,
@@ -1206,7 +1206,7 @@ EXPORT_SYMBOL(z8530_sync_txdma_close);
1206 * it exists... 1206 * it exists...
1207 */ 1207 */
1208 1208
1209static char *z8530_type_name[]={ 1209static const char *z8530_type_name[]={
1210 "Z8530", 1210 "Z8530",
1211 "Z85C30", 1211 "Z85C30",
1212 "Z85230" 1212 "Z85230"
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index eb72c67699ab..f1549fff0edc 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -342,10 +342,10 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
342 printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n", 342 printk(" %s, IRQ %d, shared memory at %#lx-%#lx.\n",
343 model_name, dev->irq, dev->mem_start, dev->mem_end-1); 343 model_name, dev->irq, dev->mem_start, dev->mem_end-1);
344 344
345 ei_status.reset_8390 = &wd_reset_8390; 345 ei_status.reset_8390 = wd_reset_8390;
346 ei_status.block_input = &wd_block_input; 346 ei_status.block_input = wd_block_input;
347 ei_status.block_output = &wd_block_output; 347 ei_status.block_output = wd_block_output;
348 ei_status.get_8390_hdr = &wd_get_8390_hdr; 348 ei_status.get_8390_hdr = wd_get_8390_hdr;
349 349
350 dev->netdev_ops = &wd_netdev_ops; 350 dev->netdev_ops = &wd_netdev_ops;
351 NS8390_init(dev, 0); 351 NS8390_init(dev, 0);
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 5d4ce4d2b32b..85af697574a6 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -50,5 +50,7 @@ obj-$(CONFIG_ATH_COMMON) += ath/
50obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o 50obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
51 51
52obj-$(CONFIG_WL12XX) += wl12xx/ 52obj-$(CONFIG_WL12XX) += wl12xx/
53# small builtin driver bit
54obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/wl12xx_platform_data.o
53 55
54obj-$(CONFIG_IWM) += iwmc3200wifi/ 56obj-$(CONFIG_IWM) += iwmc3200wifi/
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 1d05445d4ba3..924ed095dd99 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -105,7 +105,7 @@ static struct pci_driver airo_driver = {
105 of statistics in the /proc filesystem */ 105 of statistics in the /proc filesystem */
106 106
107#define IGNLABEL(comment) NULL 107#define IGNLABEL(comment) NULL
108static char *statsLabels[] = { 108static const char *statsLabels[] = {
109 "RxOverrun", 109 "RxOverrun",
110 IGNLABEL("RxPlcpCrcErr"), 110 IGNLABEL("RxPlcpCrcErr"),
111 IGNLABEL("RxPlcpFormatErr"), 111 IGNLABEL("RxPlcpFormatErr"),
@@ -932,7 +932,7 @@ typedef struct aironet_ioctl {
932 unsigned char __user *data; // d-data 932 unsigned char __user *data; // d-data
933} aironet_ioctl; 933} aironet_ioctl;
934 934
935static char swversion[] = "2.1"; 935static const char swversion[] = "2.1";
936#endif /* CISCO_EXT */ 936#endif /* CISCO_EXT */
937 937
938#define NUM_MODULES 2 938#define NUM_MODULES 2
@@ -1374,7 +1374,7 @@ static int micsetup(struct airo_info *ai) {
1374 return SUCCESS; 1374 return SUCCESS;
1375} 1375}
1376 1376
1377static char micsnap[] = {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02}; 1377static const u8 micsnap[] = {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02};
1378 1378
1379/*=========================================================================== 1379/*===========================================================================
1380 * Description: Mic a packet 1380 * Description: Mic a packet
@@ -2723,9 +2723,8 @@ static int airo_networks_allocate(struct airo_info *ai)
2723 if (ai->networks) 2723 if (ai->networks)
2724 return 0; 2724 return 0;
2725 2725
2726 ai->networks = 2726 ai->networks = kcalloc(AIRO_MAX_NETWORK_COUNT, sizeof(BSSListElement),
2727 kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement), 2727 GFP_KERNEL);
2728 GFP_KERNEL);
2729 if (!ai->networks) { 2728 if (!ai->networks) {
2730 airo_print_warn("", "Out of memory allocating beacons"); 2729 airo_print_warn("", "Out of memory allocating beacons");
2731 return -ENOMEM; 2730 return -ENOMEM;
@@ -5024,7 +5023,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
5024 airo_config_commit(dev, NULL, NULL, NULL); 5023 airo_config_commit(dev, NULL, NULL, NULL);
5025} 5024}
5026 5025
5027static char *get_rmode(__le16 mode) 5026static const char *get_rmode(__le16 mode)
5028{ 5027{
5029 switch(mode & RXMODE_MASK) { 5028 switch(mode & RXMODE_MASK) {
5030 case RXMODE_RFMON: return "rfmon"; 5029 case RXMODE_RFMON: return "rfmon";
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 1128fa8c9ed5..91c5f73b5ba3 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2061,11 +2061,12 @@ static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2061 2061
2062 int i; 2062 int i;
2063 2063
2064 at76_dbg(DBG_MAC80211, "%s(): cmd %d key->alg %d key->keyidx %d " 2064 at76_dbg(DBG_MAC80211, "%s(): cmd %d key->cipher %d key->keyidx %d "
2065 "key->keylen %d", 2065 "key->keylen %d",
2066 __func__, cmd, key->alg, key->keyidx, key->keylen); 2066 __func__, cmd, key->cipher, key->keyidx, key->keylen);
2067 2067
2068 if (key->alg != ALG_WEP) 2068 if ((key->cipher != WLAN_CIPHER_SUITE_WEP40) &&
2069 (key->cipher != WLAN_CIPHER_SUITE_WEP104))
2069 return -EOPNOTSUPP; 2070 return -EOPNOTSUPP;
2070 2071
2071 key->hw_key_idx = key->keyidx; 2072 key->hw_key_idx = key->keyidx;
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 0a75be027afa..92c216263ee9 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -25,5 +25,6 @@ config ATH_DEBUG
25source "drivers/net/wireless/ath/ath5k/Kconfig" 25source "drivers/net/wireless/ath/ath5k/Kconfig"
26source "drivers/net/wireless/ath/ath9k/Kconfig" 26source "drivers/net/wireless/ath/ath9k/Kconfig"
27source "drivers/net/wireless/ath/ar9170/Kconfig" 27source "drivers/net/wireless/ath/ar9170/Kconfig"
28source "drivers/net/wireless/ath/carl9170/Kconfig"
28 29
29endif 30endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 8113a5042afa..6d711ec97ec2 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,11 +1,13 @@
1obj-$(CONFIG_ATH5K) += ath5k/ 1obj-$(CONFIG_ATH5K) += ath5k/
2obj-$(CONFIG_ATH9K_HW) += ath9k/ 2obj-$(CONFIG_ATH9K_HW) += ath9k/
3obj-$(CONFIG_AR9170_USB) += ar9170/ 3obj-$(CONFIG_AR9170_USB) += ar9170/
4obj-$(CONFIG_CARL9170) += carl9170/
4 5
5obj-$(CONFIG_ATH_COMMON) += ath.o 6obj-$(CONFIG_ATH_COMMON) += ath.o
6 7
7ath-objs := main.o \ 8ath-objs := main.o \
8 regd.o \ 9 regd.o \
9 hw.o 10 hw.o \
11 key.o
10 12
11ath-$(CONFIG_ATH_DEBUG) += debug.o 13ath-$(CONFIG_ATH_DEBUG) += debug.o
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index debfb0fbc7c5..32bf79e6a320 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -1190,14 +1190,13 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1190 if (info->control.hw_key) { 1190 if (info->control.hw_key) {
1191 icv = info->control.hw_key->icv_len; 1191 icv = info->control.hw_key->icv_len;
1192 1192
1193 switch (info->control.hw_key->alg) { 1193 switch (info->control.hw_key->cipher) {
1194 case ALG_WEP: 1194 case WLAN_CIPHER_SUITE_WEP40:
1195 case WLAN_CIPHER_SUITE_WEP104:
1196 case WLAN_CIPHER_SUITE_TKIP:
1195 keytype = AR9170_TX_MAC_ENCR_RC4; 1197 keytype = AR9170_TX_MAC_ENCR_RC4;
1196 break; 1198 break;
1197 case ALG_TKIP: 1199 case WLAN_CIPHER_SUITE_CCMP:
1198 keytype = AR9170_TX_MAC_ENCR_RC4;
1199 break;
1200 case ALG_CCMP:
1201 keytype = AR9170_TX_MAC_ENCR_AES; 1200 keytype = AR9170_TX_MAC_ENCR_AES;
1202 break; 1201 break;
1203 default: 1202 default:
@@ -1778,17 +1777,17 @@ static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1778 if ((!ar->vif) || (ar->disable_offload)) 1777 if ((!ar->vif) || (ar->disable_offload))
1779 return -EOPNOTSUPP; 1778 return -EOPNOTSUPP;
1780 1779
1781 switch (key->alg) { 1780 switch (key->cipher) {
1782 case ALG_WEP: 1781 case WLAN_CIPHER_SUITE_WEP40:
1783 if (key->keylen == WLAN_KEY_LEN_WEP40) 1782 ktype = AR9170_ENC_ALG_WEP64;
1784 ktype = AR9170_ENC_ALG_WEP64; 1783 break;
1785 else 1784 case WLAN_CIPHER_SUITE_WEP104:
1786 ktype = AR9170_ENC_ALG_WEP128; 1785 ktype = AR9170_ENC_ALG_WEP128;
1787 break; 1786 break;
1788 case ALG_TKIP: 1787 case WLAN_CIPHER_SUITE_TKIP:
1789 ktype = AR9170_ENC_ALG_TKIP; 1788 ktype = AR9170_ENC_ALG_TKIP;
1790 break; 1789 break;
1791 case ALG_CCMP: 1790 case WLAN_CIPHER_SUITE_CCMP:
1792 ktype = AR9170_ENC_ALG_AESCCMP; 1791 ktype = AR9170_ENC_ALG_AESCCMP;
1793 break; 1792 break;
1794 default: 1793 default:
@@ -1827,7 +1826,7 @@ static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1827 if (err) 1826 if (err)
1828 goto out; 1827 goto out;
1829 1828
1830 if (key->alg == ALG_TKIP) { 1829 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1831 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, 1830 err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
1832 ktype, 1, key->key + 16, 16); 1831 ktype, 1, key->key + 16, 16);
1833 if (err) 1832 if (err)
@@ -1864,7 +1863,7 @@ static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1864 if (err) 1863 if (err)
1865 goto out; 1864 goto out;
1866 1865
1867 if (key->alg == ALG_TKIP) { 1866 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1868 err = ar9170_upload_key(ar, key->hw_key_idx, 1867 err = ar9170_upload_key(ar, key->hw_key_idx,
1869 NULL, 1868 NULL,
1870 AR9170_ENC_ALG_NONE, 1, 1869 AR9170_ENC_ALG_NONE, 1,
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index d32f2828b098..dd236c3b52f6 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -71,6 +71,32 @@ struct ath_regulatory {
71 struct reg_dmn_pair_mapping *regpair; 71 struct reg_dmn_pair_mapping *regpair;
72}; 72};
73 73
74enum ath_crypt_caps {
75 ATH_CRYPT_CAP_CIPHER_AESCCM = BIT(0),
76 ATH_CRYPT_CAP_MIC_COMBINED = BIT(1),
77};
78
79struct ath_keyval {
80 u8 kv_type;
81 u8 kv_pad;
82 u16 kv_len;
83 u8 kv_val[16]; /* TK */
84 u8 kv_mic[8]; /* Michael MIC key */
85 u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware
86 * supports both MIC keys in the same key cache entry;
87 * in that case, kv_mic is the RX key) */
88};
89
90enum ath_cipher {
91 ATH_CIPHER_WEP = 0,
92 ATH_CIPHER_AES_OCB = 1,
93 ATH_CIPHER_AES_CCM = 2,
94 ATH_CIPHER_CKIP = 3,
95 ATH_CIPHER_TKIP = 4,
96 ATH_CIPHER_CLR = 5,
97 ATH_CIPHER_MIC = 127
98};
99
74/** 100/**
75 * struct ath_ops - Register read/write operations 101 * struct ath_ops - Register read/write operations
76 * 102 *
@@ -119,7 +145,8 @@ struct ath_common {
119 145
120 u32 keymax; 146 u32 keymax;
121 DECLARE_BITMAP(keymap, ATH_KEYMAX); 147 DECLARE_BITMAP(keymap, ATH_KEYMAX);
122 u8 splitmic; 148 DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
149 enum ath_crypt_caps crypt_caps;
123 150
124 struct ath_regulatory regulatory; 151 struct ath_regulatory regulatory;
125 const struct ath_ops *ops; 152 const struct ath_ops *ops;
@@ -131,5 +158,11 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
131 gfp_t gfp_mask); 158 gfp_t gfp_mask);
132 159
133void ath_hw_setbssidmask(struct ath_common *common); 160void ath_hw_setbssidmask(struct ath_common *common);
161void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
162int ath_key_config(struct ath_common *common,
163 struct ieee80211_vif *vif,
164 struct ieee80211_sta *sta,
165 struct ieee80211_key_conf *key);
166bool ath_hw_keyreset(struct ath_common *common, u16 entry);
134 167
135#endif /* ATH_H */ 168#endif /* ATH_H */
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 26dbe65fedb0..e4a5f046bba4 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -552,9 +552,9 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
552 if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO) 552 if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
553 return; 553 return;
554 554
555 /* if one of the errors triggered, we can get a superfluous second 555 /* If one of the errors triggered, we can get a superfluous second
556 * interrupt, even though we have already reset the register. the 556 * interrupt, even though we have already reset the register. The
557 * function detects that so we can return early */ 557 * function detects that so we can return early. */
558 if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0) 558 if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
559 return; 559 return;
560 560
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ea6362a8988d..b96bb985b56d 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -175,7 +175,7 @@
175#define AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF 0 175#define AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF 0
176#define AR5K_TUNE_RADAR_ALERT false 176#define AR5K_TUNE_RADAR_ALERT false
177#define AR5K_TUNE_MIN_TX_FIFO_THRES 1 177#define AR5K_TUNE_MIN_TX_FIFO_THRES 1
178#define AR5K_TUNE_MAX_TX_FIFO_THRES ((IEEE80211_MAX_LEN / 64) + 1) 178#define AR5K_TUNE_MAX_TX_FIFO_THRES ((IEEE80211_MAX_FRAME_LEN / 64) + 1)
179#define AR5K_TUNE_REGISTER_TIMEOUT 20000 179#define AR5K_TUNE_REGISTER_TIMEOUT 20000
180/* Register for RSSI threshold has a mask of 0xff, so 255 seems to 180/* Register for RSSI threshold has a mask of 0xff, so 255 seems to
181 * be the max value. */ 181 * be the max value. */
@@ -206,6 +206,8 @@
206#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */ 206#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
207#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */ 207#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */
208 208
209#define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */
210
209#define AR5K_INIT_CARR_SENSE_EN 1 211#define AR5K_INIT_CARR_SENSE_EN 1
210 212
211/*Swap RX/TX Descriptor for big endian archs*/ 213/*Swap RX/TX Descriptor for big endian archs*/
@@ -256,8 +258,6 @@
256 (AR5K_INIT_PROG_IFS_TURBO) \ 258 (AR5K_INIT_PROG_IFS_TURBO) \
257) 259)
258 260
259/* token to use for aifs, cwmin, cwmax in MadWiFi */
260#define AR5K_TXQ_USEDEFAULT ((u32) -1)
261 261
262/* GENERIC CHIPSET DEFINITIONS */ 262/* GENERIC CHIPSET DEFINITIONS */
263 263
@@ -343,9 +343,6 @@ struct ath5k_srev_name {
343#define AR5K_SREV_PHY_5413 0x61 343#define AR5K_SREV_PHY_5413 0x61
344#define AR5K_SREV_PHY_2425 0x70 344#define AR5K_SREV_PHY_2425 0x70
345 345
346/* IEEE defs */
347#define IEEE80211_MAX_LEN 2500
348
349/* TODO add support to mac80211 for vendor-specific rates and modes */ 346/* TODO add support to mac80211 for vendor-specific rates and modes */
350 347
351/* 348/*
@@ -531,9 +528,9 @@ struct ath5k_txq_info {
531 enum ath5k_tx_queue tqi_type; 528 enum ath5k_tx_queue tqi_type;
532 enum ath5k_tx_queue_subtype tqi_subtype; 529 enum ath5k_tx_queue_subtype tqi_subtype;
533 u16 tqi_flags; /* Tx queue flags (see above) */ 530 u16 tqi_flags; /* Tx queue flags (see above) */
534 u32 tqi_aifs; /* Arbitrated Interframe Space */ 531 u8 tqi_aifs; /* Arbitrated Interframe Space */
535 s32 tqi_cw_min; /* Minimum Contention Window */ 532 u16 tqi_cw_min; /* Minimum Contention Window */
536 s32 tqi_cw_max; /* Maximum Contention Window */ 533 u16 tqi_cw_max; /* Maximum Contention Window */
537 u32 tqi_cbr_period; /* Constant bit rate period */ 534 u32 tqi_cbr_period; /* Constant bit rate period */
538 u32 tqi_cbr_overflow_limit; 535 u32 tqi_cbr_overflow_limit;
539 u32 tqi_burst_time; 536 u32 tqi_burst_time;
@@ -1031,8 +1028,6 @@ struct ath5k_hw {
1031 bool ah_turbo; 1028 bool ah_turbo;
1032 bool ah_calibration; 1029 bool ah_calibration;
1033 bool ah_single_chip; 1030 bool ah_single_chip;
1034 bool ah_aes_support;
1035 bool ah_combined_mic;
1036 1031
1037 enum ath5k_version ah_version; 1032 enum ath5k_version ah_version;
1038 enum ath5k_radio ah_radio; 1033 enum ath5k_radio ah_radio;
@@ -1047,9 +1042,6 @@ struct ath5k_hw {
1047#define ah_ee_version ah_capabilities.cap_eeprom.ee_version 1042#define ah_ee_version ah_capabilities.cap_eeprom.ee_version
1048 1043
1049 u32 ah_atim_window; 1044 u32 ah_atim_window;
1050 u32 ah_aifs;
1051 u32 ah_cw_min;
1052 u32 ah_cw_max;
1053 u32 ah_limit_tx_retries; 1045 u32 ah_limit_tx_retries;
1054 u8 ah_coverage_class; 1046 u8 ah_coverage_class;
1055 1047
@@ -1190,7 +1182,7 @@ extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
1190void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class); 1182void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
1191/* BSSID Functions */ 1183/* BSSID Functions */
1192int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac); 1184int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
1193void ath5k_hw_set_associd(struct ath5k_hw *ah); 1185void ath5k_hw_set_bssid(struct ath5k_hw *ah);
1194void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask); 1186void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
1195/* Receive start/stop functions */ 1187/* Receive start/stop functions */
1196void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah); 1188void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
@@ -1210,11 +1202,6 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high);
1210unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec); 1202unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
1211unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock); 1203unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
1212unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah); 1204unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
1213/* Key table (WEP) functions */
1214int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
1215int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
1216 const struct ieee80211_key_conf *key, const u8 *mac);
1217int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
1218 1205
1219/* Queue Control Unit, DFS Control Unit Functions */ 1206/* Queue Control Unit, DFS Control Unit Functions */
1220int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, 1207int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index b32e28caeee2..6e02de311cdd 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -119,8 +119,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; 119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
120 ah->ah_imr = 0; 120 ah->ah_imr = 0;
121 ah->ah_atim_window = 0; 121 ah->ah_atim_window = 0;
122 ah->ah_aifs = AR5K_TUNE_AIFS;
123 ah->ah_cw_min = AR5K_TUNE_CWMIN;
124 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY; 122 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
125 ah->ah_software_retry = false; 123 ah->ah_software_retry = false;
126 ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT; 124 ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
@@ -139,12 +137,12 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
139 else 137 else
140 ah->ah_version = AR5K_AR5212; 138 ah->ah_version = AR5K_AR5212;
141 139
142 /*Fill the ath5k_hw struct with the needed functions*/ 140 /* Fill the ath5k_hw struct with the needed functions */
143 ret = ath5k_hw_init_desc_functions(ah); 141 ret = ath5k_hw_init_desc_functions(ah);
144 if (ret) 142 if (ret)
145 goto err_free; 143 goto err_free;
146 144
147 /* Bring device out of sleep and reset it's units */ 145 /* Bring device out of sleep and reset its units */
148 ret = ath5k_hw_nic_wakeup(ah, 0, true); 146 ret = ath5k_hw_nic_wakeup(ah, 0, true);
149 if (ret) 147 if (ret)
150 goto err_free; 148 goto err_free;
@@ -158,7 +156,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
158 CHANNEL_5GHZ); 156 CHANNEL_5GHZ);
159 ah->ah_phy = AR5K_PHY(0); 157 ah->ah_phy = AR5K_PHY(0);
160 158
161 /* Try to identify radio chip based on it's srev */ 159 /* Try to identify radio chip based on its srev */
162 switch (ah->ah_radio_5ghz_revision & 0xf0) { 160 switch (ah->ah_radio_5ghz_revision & 0xf0) {
163 case AR5K_SREV_RAD_5111: 161 case AR5K_SREV_RAD_5111:
164 ah->ah_radio = AR5K_RF5111; 162 ah->ah_radio = AR5K_RF5111;
@@ -314,12 +312,16 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
314 } 312 }
315 313
316 /* Crypto settings */ 314 /* Crypto settings */
317 ah->ah_aes_support = srev >= AR5K_SREV_AR5212_V4 && 315 common->keymax = (sc->ah->ah_version == AR5K_AR5210 ?
318 (ee->ee_version >= AR5K_EEPROM_VERSION_5_0 && 316 AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
319 !AR5K_EEPROM_AES_DIS(ee->ee_misc5)); 317
318 if (srev >= AR5K_SREV_AR5212_V4 &&
319 (ee->ee_version >= AR5K_EEPROM_VERSION_5_0 &&
320 !AR5K_EEPROM_AES_DIS(ee->ee_misc5)))
321 common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
320 322
321 if (srev >= AR5K_SREV_AR2414) { 323 if (srev >= AR5K_SREV_AR2414) {
322 ah->ah_combined_mic = true; 324 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
323 AR5K_REG_ENABLE_BITS(ah, AR5K_MISC_MODE, 325 AR5K_REG_ENABLE_BITS(ah, AR5K_MISC_MODE,
324 AR5K_MISC_MODE_COMBINED_MIC); 326 AR5K_MISC_MODE_COMBINED_MIC);
325 } 327 }
@@ -329,7 +331,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
329 331
330 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */ 332 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
331 memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN); 333 memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
332 ath5k_hw_set_associd(ah); 334 ath5k_hw_set_bssid(ah);
333 ath5k_hw_set_opmode(ah, sc->opmode); 335 ath5k_hw_set_opmode(ah, sc->opmode);
334 336
335 ath5k_hw_rfgain_opt_init(ah); 337 ath5k_hw_rfgain_opt_init(ah);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index d77ce9906b6c..95072db0ec21 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -70,11 +70,6 @@ static int modparam_all_channels;
70module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO); 70module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
71MODULE_PARM_DESC(all_channels, "Expose all channels the device can use."); 71MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
72 72
73
74/******************\
75* Internal defines *
76\******************/
77
78/* Module info */ 73/* Module info */
79MODULE_AUTHOR("Jiri Slaby"); 74MODULE_AUTHOR("Jiri Slaby");
80MODULE_AUTHOR("Nick Kossifidis"); 75MODULE_AUTHOR("Nick Kossifidis");
@@ -83,6 +78,10 @@ MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
83MODULE_LICENSE("Dual BSD/GPL"); 78MODULE_LICENSE("Dual BSD/GPL");
84MODULE_VERSION("0.6.0 (EXPERIMENTAL)"); 79MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
85 80
81static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
82static int ath5k_beacon_update(struct ieee80211_hw *hw,
83 struct ieee80211_vif *vif);
84static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
86 85
87/* Known PCI ids */ 86/* Known PCI ids */
88static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = { 87static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
@@ -190,129 +189,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
190 /* XR missing */ 189 /* XR missing */
191}; 190};
192 191
193/*
194 * Prototypes - PCI stack related functions
195 */
196static int __devinit ath5k_pci_probe(struct pci_dev *pdev,
197 const struct pci_device_id *id);
198static void __devexit ath5k_pci_remove(struct pci_dev *pdev);
199#ifdef CONFIG_PM_SLEEP
200static int ath5k_pci_suspend(struct device *dev);
201static int ath5k_pci_resume(struct device *dev);
202
203static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
204#define ATH5K_PM_OPS (&ath5k_pm_ops)
205#else
206#define ATH5K_PM_OPS NULL
207#endif /* CONFIG_PM_SLEEP */
208
209static struct pci_driver ath5k_pci_driver = {
210 .name = KBUILD_MODNAME,
211 .id_table = ath5k_pci_id_table,
212 .probe = ath5k_pci_probe,
213 .remove = __devexit_p(ath5k_pci_remove),
214 .driver.pm = ATH5K_PM_OPS,
215};
216
217
218
219/*
220 * Prototypes - MAC 802.11 stack related functions
221 */
222static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
223static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
224 struct ath5k_txq *txq);
225static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
226static int ath5k_start(struct ieee80211_hw *hw);
227static void ath5k_stop(struct ieee80211_hw *hw);
228static int ath5k_add_interface(struct ieee80211_hw *hw,
229 struct ieee80211_vif *vif);
230static void ath5k_remove_interface(struct ieee80211_hw *hw,
231 struct ieee80211_vif *vif);
232static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
233static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
234 struct netdev_hw_addr_list *mc_list);
235static void ath5k_configure_filter(struct ieee80211_hw *hw,
236 unsigned int changed_flags,
237 unsigned int *new_flags,
238 u64 multicast);
239static int ath5k_set_key(struct ieee80211_hw *hw,
240 enum set_key_cmd cmd,
241 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
242 struct ieee80211_key_conf *key);
243static int ath5k_get_stats(struct ieee80211_hw *hw,
244 struct ieee80211_low_level_stats *stats);
245static int ath5k_get_survey(struct ieee80211_hw *hw,
246 int idx, struct survey_info *survey);
247static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
248static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
249static void ath5k_reset_tsf(struct ieee80211_hw *hw);
250static int ath5k_beacon_update(struct ieee80211_hw *hw,
251 struct ieee80211_vif *vif);
252static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
253 struct ieee80211_vif *vif,
254 struct ieee80211_bss_conf *bss_conf,
255 u32 changes);
256static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
257static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
258static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
259 u8 coverage_class);
260
261static const struct ieee80211_ops ath5k_hw_ops = {
262 .tx = ath5k_tx,
263 .start = ath5k_start,
264 .stop = ath5k_stop,
265 .add_interface = ath5k_add_interface,
266 .remove_interface = ath5k_remove_interface,
267 .config = ath5k_config,
268 .prepare_multicast = ath5k_prepare_multicast,
269 .configure_filter = ath5k_configure_filter,
270 .set_key = ath5k_set_key,
271 .get_stats = ath5k_get_stats,
272 .get_survey = ath5k_get_survey,
273 .conf_tx = NULL,
274 .get_tsf = ath5k_get_tsf,
275 .set_tsf = ath5k_set_tsf,
276 .reset_tsf = ath5k_reset_tsf,
277 .bss_info_changed = ath5k_bss_info_changed,
278 .sw_scan_start = ath5k_sw_scan_start,
279 .sw_scan_complete = ath5k_sw_scan_complete,
280 .set_coverage_class = ath5k_set_coverage_class,
281};
282
283/*
284 * Prototypes - Internal functions
285 */
286/* Attach detach */
287static int ath5k_attach(struct pci_dev *pdev,
288 struct ieee80211_hw *hw);
289static void ath5k_detach(struct pci_dev *pdev,
290 struct ieee80211_hw *hw);
291/* Channel/mode setup */
292static inline short ath5k_ieee2mhz(short chan);
293static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
294 struct ieee80211_channel *channels,
295 unsigned int mode,
296 unsigned int max);
297static int ath5k_setup_bands(struct ieee80211_hw *hw);
298static int ath5k_chan_set(struct ath5k_softc *sc,
299 struct ieee80211_channel *chan);
300static void ath5k_setcurmode(struct ath5k_softc *sc,
301 unsigned int mode);
302static void ath5k_mode_setup(struct ath5k_softc *sc);
303
304/* Descriptor setup */
305static int ath5k_desc_alloc(struct ath5k_softc *sc,
306 struct pci_dev *pdev);
307static void ath5k_desc_free(struct ath5k_softc *sc,
308 struct pci_dev *pdev);
309/* Buffers setup */
310static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
311 struct ath5k_buf *bf);
312static int ath5k_txbuf_setup(struct ath5k_softc *sc,
313 struct ath5k_buf *bf,
314 struct ath5k_txq *txq, int padsize);
315
316static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc, 192static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
317 struct ath5k_buf *bf) 193 struct ath5k_buf *bf)
318{ 194{
@@ -345,35 +221,6 @@ static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
345} 221}
346 222
347 223
348/* Queues setup */
349static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc,
350 int qtype, int subtype);
351static int ath5k_beaconq_setup(struct ath5k_hw *ah);
352static int ath5k_beaconq_config(struct ath5k_softc *sc);
353static void ath5k_txq_drainq(struct ath5k_softc *sc,
354 struct ath5k_txq *txq);
355static void ath5k_txq_cleanup(struct ath5k_softc *sc);
356static void ath5k_txq_release(struct ath5k_softc *sc);
357/* Rx handling */
358static int ath5k_rx_start(struct ath5k_softc *sc);
359static void ath5k_rx_stop(struct ath5k_softc *sc);
360static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc,
361 struct sk_buff *skb,
362 struct ath5k_rx_status *rs);
363static void ath5k_tasklet_rx(unsigned long data);
364/* Tx handling */
365static void ath5k_tx_processq(struct ath5k_softc *sc,
366 struct ath5k_txq *txq);
367static void ath5k_tasklet_tx(unsigned long data);
368/* Beacon handling */
369static int ath5k_beacon_setup(struct ath5k_softc *sc,
370 struct ath5k_buf *bf);
371static void ath5k_beacon_send(struct ath5k_softc *sc);
372static void ath5k_beacon_config(struct ath5k_softc *sc);
373static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
374static void ath5k_tasklet_beacon(unsigned long data);
375static void ath5k_tasklet_ani(unsigned long data);
376
377static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) 224static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
378{ 225{
379 u64 tsf = ath5k_hw_get_tsf64(ah); 226 u64 tsf = ath5k_hw_get_tsf64(ah);
@@ -384,50 +231,6 @@ static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
384 return (tsf & ~0x7fff) | rstamp; 231 return (tsf & ~0x7fff) | rstamp;
385} 232}
386 233
387/* Interrupt handling */
388static int ath5k_init(struct ath5k_softc *sc);
389static int ath5k_stop_locked(struct ath5k_softc *sc);
390static int ath5k_stop_hw(struct ath5k_softc *sc);
391static irqreturn_t ath5k_intr(int irq, void *dev_id);
392static void ath5k_reset_work(struct work_struct *work);
393
394static void ath5k_tasklet_calibrate(unsigned long data);
395
396/*
397 * Module init/exit functions
398 */
399static int __init
400init_ath5k_pci(void)
401{
402 int ret;
403
404 ath5k_debug_init();
405
406 ret = pci_register_driver(&ath5k_pci_driver);
407 if (ret) {
408 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
409 return ret;
410 }
411
412 return 0;
413}
414
415static void __exit
416exit_ath5k_pci(void)
417{
418 pci_unregister_driver(&ath5k_pci_driver);
419
420 ath5k_debug_finish();
421}
422
423module_init(init_ath5k_pci);
424module_exit(exit_ath5k_pci);
425
426
427/********************\
428* PCI Initialization *
429\********************/
430
431static const char * 234static const char *
432ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) 235ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
433{ 236{
@@ -466,299 +269,6 @@ static const struct ath_ops ath5k_common_ops = {
466 .write = ath5k_iowrite32, 269 .write = ath5k_iowrite32,
467}; 270};
468 271
469static int __devinit
470ath5k_pci_probe(struct pci_dev *pdev,
471 const struct pci_device_id *id)
472{
473 void __iomem *mem;
474 struct ath5k_softc *sc;
475 struct ath_common *common;
476 struct ieee80211_hw *hw;
477 int ret;
478 u8 csz;
479
480 /*
481 * L0s needs to be disabled on all ath5k cards.
482 *
483 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
484 * by default in the future in 2.6.36) this will also mean both L1 and
485 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
486 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
487 * though but cannot currently undue the effect of a blacklist, for
488 * details you can read pcie_aspm_sanity_check() and see how it adjusts
489 * the device link capability.
490 *
491 * It may be possible in the future to implement some PCI API to allow
492 * drivers to override blacklists for pre 1.1 PCIe but for now it is
493 * best to accept that both L0s and L1 will be disabled completely for
494 * distributions shipping with CONFIG_PCIEASPM rather than having this
495 * issue present. Motivation for adding this new API will be to help
496 * with power consumption for some of these devices.
497 */
498 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
499
500 ret = pci_enable_device(pdev);
501 if (ret) {
502 dev_err(&pdev->dev, "can't enable device\n");
503 goto err;
504 }
505
506 /* XXX 32-bit addressing only */
507 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
508 if (ret) {
509 dev_err(&pdev->dev, "32-bit DMA not available\n");
510 goto err_dis;
511 }
512
513 /*
514 * Cache line size is used to size and align various
515 * structures used to communicate with the hardware.
516 */
517 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
518 if (csz == 0) {
519 /*
520 * Linux 2.4.18 (at least) writes the cache line size
521 * register as a 16-bit wide register which is wrong.
522 * We must have this setup properly for rx buffer
523 * DMA to work so force a reasonable value here if it
524 * comes up zero.
525 */
526 csz = L1_CACHE_BYTES >> 2;
527 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
528 }
529 /*
530 * The default setting of latency timer yields poor results,
531 * set it to the value used by other systems. It may be worth
532 * tweaking this setting more.
533 */
534 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
535
536 /* Enable bus mastering */
537 pci_set_master(pdev);
538
539 /*
540 * Disable the RETRY_TIMEOUT register (0x41) to keep
541 * PCI Tx retries from interfering with C3 CPU state.
542 */
543 pci_write_config_byte(pdev, 0x41, 0);
544
545 ret = pci_request_region(pdev, 0, "ath5k");
546 if (ret) {
547 dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
548 goto err_dis;
549 }
550
551 mem = pci_iomap(pdev, 0, 0);
552 if (!mem) {
553 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
554 ret = -EIO;
555 goto err_reg;
556 }
557
558 /*
559 * Allocate hw (mac80211 main struct)
560 * and hw->priv (driver private data)
561 */
562 hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
563 if (hw == NULL) {
564 dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
565 ret = -ENOMEM;
566 goto err_map;
567 }
568
569 dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
570
571 /* Initialize driver private data */
572 SET_IEEE80211_DEV(hw, &pdev->dev);
573 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
574 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
575 IEEE80211_HW_SIGNAL_DBM;
576
577 hw->wiphy->interface_modes =
578 BIT(NL80211_IFTYPE_AP) |
579 BIT(NL80211_IFTYPE_STATION) |
580 BIT(NL80211_IFTYPE_ADHOC) |
581 BIT(NL80211_IFTYPE_MESH_POINT);
582
583 hw->extra_tx_headroom = 2;
584 hw->channel_change_time = 5000;
585 sc = hw->priv;
586 sc->hw = hw;
587 sc->pdev = pdev;
588
589 ath5k_debug_init_device(sc);
590
591 /*
592 * Mark the device as detached to avoid processing
593 * interrupts until setup is complete.
594 */
595 __set_bit(ATH_STAT_INVALID, sc->status);
596
597 sc->iobase = mem; /* So we can unmap it on detach */
598 sc->opmode = NL80211_IFTYPE_STATION;
599 sc->bintval = 1000;
600 mutex_init(&sc->lock);
601 spin_lock_init(&sc->rxbuflock);
602 spin_lock_init(&sc->txbuflock);
603 spin_lock_init(&sc->block);
604
605 /* Set private data */
606 pci_set_drvdata(pdev, sc);
607
608 /* Setup interrupt handler */
609 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
610 if (ret) {
611 ATH5K_ERR(sc, "request_irq failed\n");
612 goto err_free;
613 }
614
615 /*If we passed the test malloc a ath5k_hw struct*/
616 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
617 if (!sc->ah) {
618 ret = -ENOMEM;
619 ATH5K_ERR(sc, "out of memory\n");
620 goto err_irq;
621 }
622
623 sc->ah->ah_sc = sc;
624 sc->ah->ah_iobase = sc->iobase;
625 common = ath5k_hw_common(sc->ah);
626 common->ops = &ath5k_common_ops;
627 common->ah = sc->ah;
628 common->hw = hw;
629 common->cachelsz = csz << 2; /* convert to bytes */
630
631 /* Initialize device */
632 ret = ath5k_hw_attach(sc);
633 if (ret) {
634 goto err_free_ah;
635 }
636
637 /* set up multi-rate retry capabilities */
638 if (sc->ah->ah_version == AR5K_AR5212) {
639 hw->max_rates = 4;
640 hw->max_rate_tries = 11;
641 }
642
643 /* Finish private driver data initialization */
644 ret = ath5k_attach(pdev, hw);
645 if (ret)
646 goto err_ah;
647
648 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
649 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
650 sc->ah->ah_mac_srev,
651 sc->ah->ah_phy_revision);
652
653 if (!sc->ah->ah_single_chip) {
654 /* Single chip radio (!RF5111) */
655 if (sc->ah->ah_radio_5ghz_revision &&
656 !sc->ah->ah_radio_2ghz_revision) {
657 /* No 5GHz support -> report 2GHz radio */
658 if (!test_bit(AR5K_MODE_11A,
659 sc->ah->ah_capabilities.cap_mode)) {
660 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
661 ath5k_chip_name(AR5K_VERSION_RAD,
662 sc->ah->ah_radio_5ghz_revision),
663 sc->ah->ah_radio_5ghz_revision);
664 /* No 2GHz support (5110 and some
665 * 5Ghz only cards) -> report 5Ghz radio */
666 } else if (!test_bit(AR5K_MODE_11B,
667 sc->ah->ah_capabilities.cap_mode)) {
668 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
669 ath5k_chip_name(AR5K_VERSION_RAD,
670 sc->ah->ah_radio_5ghz_revision),
671 sc->ah->ah_radio_5ghz_revision);
672 /* Multiband radio */
673 } else {
674 ATH5K_INFO(sc, "RF%s multiband radio found"
675 " (0x%x)\n",
676 ath5k_chip_name(AR5K_VERSION_RAD,
677 sc->ah->ah_radio_5ghz_revision),
678 sc->ah->ah_radio_5ghz_revision);
679 }
680 }
681 /* Multi chip radio (RF5111 - RF2111) ->
682 * report both 2GHz/5GHz radios */
683 else if (sc->ah->ah_radio_5ghz_revision &&
684 sc->ah->ah_radio_2ghz_revision){
685 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
686 ath5k_chip_name(AR5K_VERSION_RAD,
687 sc->ah->ah_radio_5ghz_revision),
688 sc->ah->ah_radio_5ghz_revision);
689 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
690 ath5k_chip_name(AR5K_VERSION_RAD,
691 sc->ah->ah_radio_2ghz_revision),
692 sc->ah->ah_radio_2ghz_revision);
693 }
694 }
695
696
697 /* ready to process interrupts */
698 __clear_bit(ATH_STAT_INVALID, sc->status);
699
700 return 0;
701err_ah:
702 ath5k_hw_detach(sc->ah);
703err_irq:
704 free_irq(pdev->irq, sc);
705err_free_ah:
706 kfree(sc->ah);
707err_free:
708 ieee80211_free_hw(hw);
709err_map:
710 pci_iounmap(pdev, mem);
711err_reg:
712 pci_release_region(pdev, 0);
713err_dis:
714 pci_disable_device(pdev);
715err:
716 return ret;
717}
718
719static void __devexit
720ath5k_pci_remove(struct pci_dev *pdev)
721{
722 struct ath5k_softc *sc = pci_get_drvdata(pdev);
723
724 ath5k_debug_finish_device(sc);
725 ath5k_detach(pdev, sc->hw);
726 ath5k_hw_detach(sc->ah);
727 kfree(sc->ah);
728 free_irq(pdev->irq, sc);
729 pci_iounmap(pdev, sc->iobase);
730 pci_release_region(pdev, 0);
731 pci_disable_device(pdev);
732 ieee80211_free_hw(sc->hw);
733}
734
735#ifdef CONFIG_PM_SLEEP
736static int ath5k_pci_suspend(struct device *dev)
737{
738 struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
739
740 ath5k_led_off(sc);
741 return 0;
742}
743
744static int ath5k_pci_resume(struct device *dev)
745{
746 struct pci_dev *pdev = to_pci_dev(dev);
747 struct ath5k_softc *sc = pci_get_drvdata(pdev);
748
749 /*
750 * Suspend/Resume resets the PCI configuration space, so we have to
751 * re-disable the RETRY_TIMEOUT register (0x41) to keep
752 * PCI Tx retries from interfering with C3 CPU state
753 */
754 pci_write_config_byte(pdev, 0x41, 0);
755
756 ath5k_led_enable(sc);
757 return 0;
758}
759#endif /* CONFIG_PM_SLEEP */
760
761
762/***********************\ 272/***********************\
763* Driver Initialization * 273* Driver Initialization *
764\***********************/ 274\***********************/
@@ -772,170 +282,6 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
772 return ath_reg_notifier_apply(wiphy, request, regulatory); 282 return ath_reg_notifier_apply(wiphy, request, regulatory);
773} 283}
774 284
775static int
776ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
777{
778 struct ath5k_softc *sc = hw->priv;
779 struct ath5k_hw *ah = sc->ah;
780 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
781 u8 mac[ETH_ALEN] = {};
782 int ret;
783
784 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
785
786 /*
787 * Check if the MAC has multi-rate retry support.
788 * We do this by trying to setup a fake extended
789 * descriptor. MAC's that don't have support will
790 * return false w/o doing anything. MAC's that do
791 * support it will return true w/o doing anything.
792 */
793 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
794
795 if (ret < 0)
796 goto err;
797 if (ret > 0)
798 __set_bit(ATH_STAT_MRRETRY, sc->status);
799
800 /*
801 * Collect the channel list. The 802.11 layer
802 * is resposible for filtering this list based
803 * on settings like the phy mode and regulatory
804 * domain restrictions.
805 */
806 ret = ath5k_setup_bands(hw);
807 if (ret) {
808 ATH5K_ERR(sc, "can't get channels\n");
809 goto err;
810 }
811
812 /* NB: setup here so ath5k_rate_update is happy */
813 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
814 ath5k_setcurmode(sc, AR5K_MODE_11A);
815 else
816 ath5k_setcurmode(sc, AR5K_MODE_11B);
817
818 /*
819 * Allocate tx+rx descriptors and populate the lists.
820 */
821 ret = ath5k_desc_alloc(sc, pdev);
822 if (ret) {
823 ATH5K_ERR(sc, "can't allocate descriptors\n");
824 goto err;
825 }
826
827 /*
828 * Allocate hardware transmit queues: one queue for
829 * beacon frames and one data queue for each QoS
830 * priority. Note that hw functions handle reseting
831 * these queues at the needed time.
832 */
833 ret = ath5k_beaconq_setup(ah);
834 if (ret < 0) {
835 ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
836 goto err_desc;
837 }
838 sc->bhalq = ret;
839 sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
840 if (IS_ERR(sc->cabq)) {
841 ATH5K_ERR(sc, "can't setup cab queue\n");
842 ret = PTR_ERR(sc->cabq);
843 goto err_bhal;
844 }
845
846 sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
847 if (IS_ERR(sc->txq)) {
848 ATH5K_ERR(sc, "can't setup xmit queue\n");
849 ret = PTR_ERR(sc->txq);
850 goto err_queues;
851 }
852
853 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
854 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
855 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
856 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
857 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
858
859 INIT_WORK(&sc->reset_work, ath5k_reset_work);
860
861 ret = ath5k_eeprom_read_mac(ah, mac);
862 if (ret) {
863 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
864 sc->pdev->device);
865 goto err_queues;
866 }
867
868 SET_IEEE80211_PERM_ADDR(hw, mac);
869 /* All MAC address bits matter for ACKs */
870 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
871 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
872
873 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
874 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
875 if (ret) {
876 ATH5K_ERR(sc, "can't initialize regulatory system\n");
877 goto err_queues;
878 }
879
880 ret = ieee80211_register_hw(hw);
881 if (ret) {
882 ATH5K_ERR(sc, "can't register ieee80211 hw\n");
883 goto err_queues;
884 }
885
886 if (!ath_is_world_regd(regulatory))
887 regulatory_hint(hw->wiphy, regulatory->alpha2);
888
889 ath5k_init_leds(sc);
890
891 ath5k_sysfs_register(sc);
892
893 return 0;
894err_queues:
895 ath5k_txq_release(sc);
896err_bhal:
897 ath5k_hw_release_tx_queue(ah, sc->bhalq);
898err_desc:
899 ath5k_desc_free(sc, pdev);
900err:
901 return ret;
902}
903
904static void
905ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
906{
907 struct ath5k_softc *sc = hw->priv;
908
909 /*
910 * NB: the order of these is important:
911 * o call the 802.11 layer before detaching ath5k_hw to
912 * insure callbacks into the driver to delete global
913 * key cache entries can be handled
914 * o reclaim the tx queue data structures after calling
915 * the 802.11 layer as we'll get called back to reclaim
916 * node state and potentially want to use them
917 * o to cleanup the tx queues the hal is called, so detach
918 * it last
919 * XXX: ??? detach ath5k_hw ???
920 * Other than that, it's straightforward...
921 */
922 ieee80211_unregister_hw(hw);
923 ath5k_desc_free(sc, pdev);
924 ath5k_txq_release(sc);
925 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
926 ath5k_unregister_leds(sc);
927
928 ath5k_sysfs_unregister(sc);
929 /*
930 * NB: can't reclaim these until after ieee80211_ifdetach
931 * returns because we'll get called back to reclaim node
932 * state and potentially want to use them.
933 */
934}
935
936
937
938
939/********************\ 285/********************\
940* Channel/mode setup * 286* Channel/mode setup *
941\********************/ 287\********************/
@@ -1391,6 +737,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1391 737
1392 spin_lock_bh(&txq->lock); 738 spin_lock_bh(&txq->lock);
1393 list_add_tail(&bf->list, &txq->q); 739 list_add_tail(&bf->list, &txq->q);
740 txq->txq_len++;
1394 if (txq->link == NULL) /* is this first packet? */ 741 if (txq->link == NULL) /* is this first packet? */
1395 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); 742 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
1396 else /* no, so only link it */ 743 else /* no, so only link it */
@@ -1494,9 +841,6 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
1494} 841}
1495 842
1496 843
1497
1498
1499
1500/**************\ 844/**************\
1501* Queues setup * 845* Queues setup *
1502\**************/ 846\**************/
@@ -1509,16 +853,18 @@ ath5k_txq_setup(struct ath5k_softc *sc,
1509 struct ath5k_txq *txq; 853 struct ath5k_txq *txq;
1510 struct ath5k_txq_info qi = { 854 struct ath5k_txq_info qi = {
1511 .tqi_subtype = subtype, 855 .tqi_subtype = subtype,
1512 .tqi_aifs = AR5K_TXQ_USEDEFAULT, 856 /* XXX: default values not correct for B and XR channels,
1513 .tqi_cw_min = AR5K_TXQ_USEDEFAULT, 857 * but who cares? */
1514 .tqi_cw_max = AR5K_TXQ_USEDEFAULT 858 .tqi_aifs = AR5K_TUNE_AIFS,
859 .tqi_cw_min = AR5K_TUNE_CWMIN,
860 .tqi_cw_max = AR5K_TUNE_CWMAX
1515 }; 861 };
1516 int qnum; 862 int qnum;
1517 863
1518 /* 864 /*
1519 * Enable interrupts only for EOL and DESC conditions. 865 * Enable interrupts only for EOL and DESC conditions.
1520 * We mark tx descriptors to receive a DESC interrupt 866 * We mark tx descriptors to receive a DESC interrupt
1521 * when a tx queue gets deep; otherwise waiting for the 867 * when a tx queue gets deep; otherwise we wait for the
1522 * EOL to reap descriptors. Note that this is done to 868 * EOL to reap descriptors. Note that this is done to
1523 * reduce interrupt load and this only defers reaping 869 * reduce interrupt load and this only defers reaping
1524 * descriptors, never transmitting frames. Aside from 870 * descriptors, never transmitting frames. Aside from
@@ -1550,6 +896,9 @@ ath5k_txq_setup(struct ath5k_softc *sc,
1550 INIT_LIST_HEAD(&txq->q); 896 INIT_LIST_HEAD(&txq->q);
1551 spin_lock_init(&txq->lock); 897 spin_lock_init(&txq->lock);
1552 txq->setup = true; 898 txq->setup = true;
899 txq->txq_len = 0;
900 txq->txq_poll_mark = false;
901 txq->txq_stuck = 0;
1553 } 902 }
1554 return &sc->txqs[qnum]; 903 return &sc->txqs[qnum];
1555} 904}
@@ -1558,9 +907,11 @@ static int
1558ath5k_beaconq_setup(struct ath5k_hw *ah) 907ath5k_beaconq_setup(struct ath5k_hw *ah)
1559{ 908{
1560 struct ath5k_txq_info qi = { 909 struct ath5k_txq_info qi = {
1561 .tqi_aifs = AR5K_TXQ_USEDEFAULT, 910 /* XXX: default values not correct for B and XR channels,
1562 .tqi_cw_min = AR5K_TXQ_USEDEFAULT, 911 * but who cares? */
1563 .tqi_cw_max = AR5K_TXQ_USEDEFAULT, 912 .tqi_aifs = AR5K_TUNE_AIFS,
913 .tqi_cw_min = AR5K_TUNE_CWMIN,
914 .tqi_cw_max = AR5K_TUNE_CWMAX,
1564 /* NB: for dynamic turbo, don't enable any other interrupts */ 915 /* NB: for dynamic turbo, don't enable any other interrupts */
1565 .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE 916 .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
1566 }; 917 };
@@ -1594,7 +945,7 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1594 */ 945 */
1595 qi.tqi_aifs = 0; 946 qi.tqi_aifs = 0;
1596 qi.tqi_cw_min = 0; 947 qi.tqi_cw_min = 0;
1597 qi.tqi_cw_max = 2 * ah->ah_cw_min; 948 qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
1598 } 949 }
1599 950
1600 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, 951 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
@@ -1644,9 +995,11 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1644 spin_lock_bh(&sc->txbuflock); 995 spin_lock_bh(&sc->txbuflock);
1645 list_move_tail(&bf->list, &sc->txbuf); 996 list_move_tail(&bf->list, &sc->txbuf);
1646 sc->txbuf_len++; 997 sc->txbuf_len++;
998 txq->txq_len--;
1647 spin_unlock_bh(&sc->txbuflock); 999 spin_unlock_bh(&sc->txbuflock);
1648 } 1000 }
1649 txq->link = NULL; 1001 txq->link = NULL;
1002 txq->txq_poll_mark = false;
1650 spin_unlock_bh(&txq->lock); 1003 spin_unlock_bh(&txq->lock);
1651} 1004}
1652 1005
@@ -1696,8 +1049,6 @@ ath5k_txq_release(struct ath5k_softc *sc)
1696} 1049}
1697 1050
1698 1051
1699
1700
1701/*************\ 1052/*************\
1702* RX Handling * 1053* RX Handling *
1703\*************/ 1054\*************/
@@ -1713,7 +1064,7 @@ ath5k_rx_start(struct ath5k_softc *sc)
1713 struct ath5k_buf *bf; 1064 struct ath5k_buf *bf;
1714 int ret; 1065 int ret;
1715 1066
1716 common->rx_bufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz); 1067 common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1717 1068
1718 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n", 1069 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1719 common->cachelsz, common->rx_bufsize); 1070 common->cachelsz, common->rx_bufsize);
@@ -1863,7 +1214,7 @@ ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
1863} 1214}
1864 1215
1865/* 1216/*
1866 * Compute padding position. skb must contains an IEEE 802.11 frame 1217 * Compute padding position. skb must contain an IEEE 802.11 frame
1867 */ 1218 */
1868static int ath5k_common_padpos(struct sk_buff *skb) 1219static int ath5k_common_padpos(struct sk_buff *skb)
1869{ 1220{
@@ -1882,10 +1233,9 @@ static int ath5k_common_padpos(struct sk_buff *skb)
1882} 1233}
1883 1234
1884/* 1235/*
1885 * This function expects a 802.11 frame and returns the number of 1236 * This function expects an 802.11 frame and returns the number of
1886 * bytes added, or -1 if we don't have enought header room. 1237 * bytes added, or -1 if we don't have enough header room.
1887 */ 1238 */
1888
1889static int ath5k_add_padding(struct sk_buff *skb) 1239static int ath5k_add_padding(struct sk_buff *skb)
1890{ 1240{
1891 int padpos = ath5k_common_padpos(skb); 1241 int padpos = ath5k_common_padpos(skb);
@@ -1905,10 +1255,18 @@ static int ath5k_add_padding(struct sk_buff *skb)
1905} 1255}
1906 1256
1907/* 1257/*
1908 * This function expects a 802.11 frame and returns the number of 1258 * The MAC header is padded to have 32-bit boundary if the
1909 * bytes removed 1259 * packet payload is non-zero. The general calculation for
1260 * padsize would take into account odd header lengths:
1261 * padsize = 4 - (hdrlen & 3); however, since only
1262 * even-length headers are used, padding can only be 0 or 2
1263 * bytes and we can optimize this a bit. We must not try to
1264 * remove padding from short control frames that do not have a
1265 * payload.
1266 *
1267 * This function expects an 802.11 frame and returns the number of
1268 * bytes removed.
1910 */ 1269 */
1911
1912static int ath5k_remove_padding(struct sk_buff *skb) 1270static int ath5k_remove_padding(struct sk_buff *skb)
1913{ 1271{
1914 int padpos = ath5k_common_padpos(skb); 1272 int padpos = ath5k_common_padpos(skb);
@@ -1929,14 +1287,6 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1929{ 1287{
1930 struct ieee80211_rx_status *rxs; 1288 struct ieee80211_rx_status *rxs;
1931 1289
1932 /* The MAC header is padded to have 32-bit boundary if the
1933 * packet payload is non-zero. The general calculation for
1934 * padsize would take into account odd header lengths:
1935 * padsize = (4 - hdrlen % 4) % 4; However, since only
1936 * even-length headers are used, padding can only be 0 or 2
1937 * bytes and we can optimize this a bit. In addition, we must
1938 * not try to remove padding from short control frames that do
1939 * not have payload. */
1940 ath5k_remove_padding(skb); 1290 ath5k_remove_padding(skb);
1941 1291
1942 rxs = IEEE80211_SKB_RXCB(skb); 1292 rxs = IEEE80211_SKB_RXCB(skb);
@@ -2040,9 +1390,8 @@ ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
2040 return true; 1390 return true;
2041 } 1391 }
2042 1392
2043 /* let crypto-error packets fall through in MNTR */ 1393 /* reject any frames with non-crypto errors */
2044 if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || 1394 if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
2045 sc->opmode != NL80211_IFTYPE_MONITOR)
2046 return false; 1395 return false;
2047 } 1396 }
2048 1397
@@ -2123,6 +1472,117 @@ unlock:
2123* TX Handling * 1472* TX Handling *
2124\*************/ 1473\*************/
2125 1474
1475static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1476 struct ath5k_txq *txq)
1477{
1478 struct ath5k_softc *sc = hw->priv;
1479 struct ath5k_buf *bf;
1480 unsigned long flags;
1481 int padsize;
1482
1483 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
1484
1485 /*
1486 * The hardware expects the header padded to 4 byte boundaries.
1487 * If this is not the case, we add the padding after the header.
1488 */
1489 padsize = ath5k_add_padding(skb);
1490 if (padsize < 0) {
1491 ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
1492 " headroom to pad");
1493 goto drop_packet;
1494 }
1495
1496 if (txq->txq_len >= ATH5K_TXQ_LEN_MAX)
1497 ieee80211_stop_queue(hw, txq->qnum);
1498
1499 spin_lock_irqsave(&sc->txbuflock, flags);
1500 if (list_empty(&sc->txbuf)) {
1501 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
1502 spin_unlock_irqrestore(&sc->txbuflock, flags);
1503 ieee80211_stop_queues(hw);
1504 goto drop_packet;
1505 }
1506 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
1507 list_del(&bf->list);
1508 sc->txbuf_len--;
1509 if (list_empty(&sc->txbuf))
1510 ieee80211_stop_queues(hw);
1511 spin_unlock_irqrestore(&sc->txbuflock, flags);
1512
1513 bf->skb = skb;
1514
1515 if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
1516 bf->skb = NULL;
1517 spin_lock_irqsave(&sc->txbuflock, flags);
1518 list_add_tail(&bf->list, &sc->txbuf);
1519 sc->txbuf_len++;
1520 spin_unlock_irqrestore(&sc->txbuflock, flags);
1521 goto drop_packet;
1522 }
1523 return NETDEV_TX_OK;
1524
1525drop_packet:
1526 dev_kfree_skb_any(skb);
1527 return NETDEV_TX_OK;
1528}
1529
1530static void
1531ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1532 struct ath5k_tx_status *ts)
1533{
1534 struct ieee80211_tx_info *info;
1535 int i;
1536
1537 sc->stats.tx_all_count++;
1538 info = IEEE80211_SKB_CB(skb);
1539
1540 ieee80211_tx_info_clear_status(info);
1541 for (i = 0; i < 4; i++) {
1542 struct ieee80211_tx_rate *r =
1543 &info->status.rates[i];
1544
1545 if (ts->ts_rate[i]) {
1546 r->idx = ath5k_hw_to_driver_rix(sc, ts->ts_rate[i]);
1547 r->count = ts->ts_retry[i];
1548 } else {
1549 r->idx = -1;
1550 r->count = 0;
1551 }
1552 }
1553
1554 /* count the successful attempt as well */
1555 info->status.rates[ts->ts_final_idx].count++;
1556
1557 if (unlikely(ts->ts_status)) {
1558 sc->stats.ack_fail++;
1559 if (ts->ts_status & AR5K_TXERR_FILT) {
1560 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1561 sc->stats.txerr_filt++;
1562 }
1563 if (ts->ts_status & AR5K_TXERR_XRETRY)
1564 sc->stats.txerr_retry++;
1565 if (ts->ts_status & AR5K_TXERR_FIFO)
1566 sc->stats.txerr_fifo++;
1567 } else {
1568 info->flags |= IEEE80211_TX_STAT_ACK;
1569 info->status.ack_signal = ts->ts_rssi;
1570 }
1571
1572 /*
1573 * Remove MAC header padding before giving the frame
1574 * back to mac80211.
1575 */
1576 ath5k_remove_padding(skb);
1577
1578 if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
1579 sc->stats.antenna_tx[ts->ts_antenna]++;
1580 else
1581 sc->stats.antenna_tx[0]++; /* invalid */
1582
1583 ieee80211_tx_status(sc->hw, skb);
1584}
1585
2126static void 1586static void
2127ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) 1587ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
2128{ 1588{
@@ -2130,96 +1590,51 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
2130 struct ath5k_buf *bf, *bf0; 1590 struct ath5k_buf *bf, *bf0;
2131 struct ath5k_desc *ds; 1591 struct ath5k_desc *ds;
2132 struct sk_buff *skb; 1592 struct sk_buff *skb;
2133 struct ieee80211_tx_info *info; 1593 int ret;
2134 int i, ret;
2135 1594
2136 spin_lock(&txq->lock); 1595 spin_lock(&txq->lock);
2137 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1596 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
2138 ds = bf->desc;
2139
2140 /*
2141 * It's possible that the hardware can say the buffer is
2142 * completed when it hasn't yet loaded the ds_link from
2143 * host memory and moved on. If there are more TX
2144 * descriptors in the queue, wait for TXDP to change
2145 * before processing this one.
2146 */
2147 if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
2148 !list_is_last(&bf->list, &txq->q))
2149 break;
2150
2151 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
2152 if (unlikely(ret == -EINPROGRESS))
2153 break;
2154 else if (unlikely(ret)) {
2155 ATH5K_ERR(sc, "error %d while processing queue %u\n",
2156 ret, txq->qnum);
2157 break;
2158 }
2159 1597
2160 sc->stats.tx_all_count++; 1598 txq->txq_poll_mark = false;
2161 skb = bf->skb;
2162 info = IEEE80211_SKB_CB(skb);
2163 bf->skb = NULL;
2164 1599
2165 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, 1600 /* skb might already have been processed last time. */
2166 PCI_DMA_TODEVICE); 1601 if (bf->skb != NULL) {
1602 ds = bf->desc;
2167 1603
2168 ieee80211_tx_info_clear_status(info); 1604 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
2169 for (i = 0; i < 4; i++) { 1605 if (unlikely(ret == -EINPROGRESS))
2170 struct ieee80211_tx_rate *r = 1606 break;
2171 &info->status.rates[i]; 1607 else if (unlikely(ret)) {
2172 1608 ATH5K_ERR(sc,
2173 if (ts.ts_rate[i]) { 1609 "error %d while processing "
2174 r->idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]); 1610 "queue %u\n", ret, txq->qnum);
2175 r->count = ts.ts_retry[i]; 1611 break;
2176 } else {
2177 r->idx = -1;
2178 r->count = 0;
2179 } 1612 }
2180 }
2181 1613
2182 /* count the successful attempt as well */ 1614 skb = bf->skb;
2183 info->status.rates[ts.ts_final_idx].count++; 1615 bf->skb = NULL;
2184 1616 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
2185 if (unlikely(ts.ts_status)) { 1617 PCI_DMA_TODEVICE);
2186 sc->stats.ack_fail++; 1618 ath5k_tx_frame_completed(sc, skb, &ts);
2187 if (ts.ts_status & AR5K_TXERR_FILT) {
2188 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2189 sc->stats.txerr_filt++;
2190 }
2191 if (ts.ts_status & AR5K_TXERR_XRETRY)
2192 sc->stats.txerr_retry++;
2193 if (ts.ts_status & AR5K_TXERR_FIFO)
2194 sc->stats.txerr_fifo++;
2195 } else {
2196 info->flags |= IEEE80211_TX_STAT_ACK;
2197 info->status.ack_signal = ts.ts_rssi;
2198 } 1619 }
2199 1620
2200 /* 1621 /*
2201 * Remove MAC header padding before giving the frame 1622 * It's possible that the hardware can say the buffer is
2202 * back to mac80211. 1623 * completed when it hasn't yet loaded the ds_link from
1624 * host memory and moved on.
1625 * Always keep the last descriptor to avoid HW races...
2203 */ 1626 */
2204 ath5k_remove_padding(skb); 1627 if (ath5k_hw_get_txdp(sc->ah, txq->qnum) != bf->daddr) {
2205 1628 spin_lock(&sc->txbuflock);
2206 if (ts.ts_antenna > 0 && ts.ts_antenna < 5) 1629 list_move_tail(&bf->list, &sc->txbuf);
2207 sc->stats.antenna_tx[ts.ts_antenna]++; 1630 sc->txbuf_len++;
2208 else 1631 txq->txq_len--;
2209 sc->stats.antenna_tx[0]++; /* invalid */ 1632 spin_unlock(&sc->txbuflock);
2210 1633 }
2211 ieee80211_tx_status(sc->hw, skb);
2212
2213 spin_lock(&sc->txbuflock);
2214 list_move_tail(&bf->list, &sc->txbuf);
2215 sc->txbuf_len++;
2216 spin_unlock(&sc->txbuflock);
2217 } 1634 }
2218 if (likely(list_empty(&txq->q)))
2219 txq->link = NULL;
2220 spin_unlock(&txq->lock); 1635 spin_unlock(&txq->lock);
2221 if (sc->txbuf_len > ATH_TXBUF / 5) 1636 if (txq->txq_len < ATH5K_TXQ_LEN_LOW)
2222 ieee80211_wake_queues(sc->hw); 1637 ieee80211_wake_queue(sc->hw, txq->qnum);
2223} 1638}
2224 1639
2225static void 1640static void
@@ -2285,10 +1700,11 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2285 * default antenna which is supposed to be an omni. 1700 * default antenna which is supposed to be an omni.
2286 * 1701 *
2287 * Note2: On sectored scenarios it's possible to have 1702 * Note2: On sectored scenarios it's possible to have
2288 * multiple antennas (1omni -the default- and 14 sectors) 1703 * multiple antennas (1 omni -- the default -- and 14
2289 * so if we choose to actually support this mode we need 1704 * sectors), so if we choose to actually support this
2290 * to allow user to set how many antennas we have and tweak 1705 * mode, we need to allow the user to set how many antennas
2291 * the code below to send beacons on all of them. 1706 * we have and tweak the code below to send beacons
1707 * on all of them.
2292 */ 1708 */
2293 if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP) 1709 if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
2294 antenna = sc->bsent & 4 ? 2 : 1; 1710 antenna = sc->bsent & 4 ? 2 : 1;
@@ -2314,6 +1730,43 @@ err_unmap:
2314} 1730}
2315 1731
2316/* 1732/*
1733 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
1734 * this is called only once at config_bss time, for AP we do it every
1735 * SWBA interrupt so that the TIM will reflect buffered frames.
1736 *
1737 * Called with the beacon lock.
1738 */
1739static int
1740ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1741{
1742 int ret;
1743 struct ath5k_softc *sc = hw->priv;
1744 struct sk_buff *skb;
1745
1746 if (WARN_ON(!vif)) {
1747 ret = -EINVAL;
1748 goto out;
1749 }
1750
1751 skb = ieee80211_beacon_get(hw, vif);
1752
1753 if (!skb) {
1754 ret = -ENOMEM;
1755 goto out;
1756 }
1757
1758 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
1759
1760 ath5k_txbuf_free_skb(sc, sc->bbuf);
1761 sc->bbuf->skb = skb;
1762 ret = ath5k_beacon_setup(sc, sc->bbuf);
1763 if (ret)
1764 sc->bbuf->skb = NULL;
1765out:
1766 return ret;
1767}
1768
1769/*
2317 * Transmit a beacon frame at SWBA. Dynamic updates to the 1770 * Transmit a beacon frame at SWBA. Dynamic updates to the
2318 * frame contents are done as needed and the slot time is 1771 * frame contents are done as needed and the slot time is
2319 * also adjusted based on current state. 1772 * also adjusted based on current state.
@@ -2330,14 +1783,13 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2330 1783
2331 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n"); 1784 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
2332 1785
2333 if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION || 1786 if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION)) {
2334 sc->opmode == NL80211_IFTYPE_MONITOR)) {
2335 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL); 1787 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
2336 return; 1788 return;
2337 } 1789 }
2338 /* 1790 /*
2339 * Check if the previous beacon has gone out. If 1791 * Check if the previous beacon has gone out. If
2340 * not don't don't try to post another, skip this 1792 * not, don't don't try to post another: skip this
2341 * period and wait for the next. Missed beacons 1793 * period and wait for the next. Missed beacons
2342 * indicate a problem and should not occur. If we 1794 * indicate a problem and should not occur. If we
2343 * miss too many consecutive beacons reset the device. 1795 * miss too many consecutive beacons reset the device.
@@ -2391,7 +1843,6 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2391 sc->bsent++; 1843 sc->bsent++;
2392} 1844}
2393 1845
2394
2395/** 1846/**
2396 * ath5k_beacon_update_timers - update beacon timers 1847 * ath5k_beacon_update_timers - update beacon timers
2397 * 1848 *
@@ -2493,7 +1944,6 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2493 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : ""); 1944 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2494} 1945}
2495 1946
2496
2497/** 1947/**
2498 * ath5k_beacon_config - Configure the beacon queues and interrupts 1948 * ath5k_beacon_config - Configure the beacon queues and interrupts
2499 * 1949 *
@@ -2572,155 +2022,6 @@ static void ath5k_tasklet_beacon(unsigned long data)
2572* Interrupt handling * 2022* Interrupt handling *
2573\********************/ 2023\********************/
2574 2024
2575static int
2576ath5k_init(struct ath5k_softc *sc)
2577{
2578 struct ath5k_hw *ah = sc->ah;
2579 int ret, i;
2580
2581 mutex_lock(&sc->lock);
2582
2583 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2584
2585 /*
2586 * Stop anything previously setup. This is safe
2587 * no matter this is the first time through or not.
2588 */
2589 ath5k_stop_locked(sc);
2590
2591 /*
2592 * The basic interface to setting the hardware in a good
2593 * state is ``reset''. On return the hardware is known to
2594 * be powered up and with interrupts disabled. This must
2595 * be followed by initialization of the appropriate bits
2596 * and then setup of the interrupt mask.
2597 */
2598 sc->curchan = sc->hw->conf.channel;
2599 sc->curband = &sc->sbands[sc->curchan->band];
2600 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2601 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2602 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2603
2604 ret = ath5k_reset(sc, NULL);
2605 if (ret)
2606 goto done;
2607
2608 ath5k_rfkill_hw_start(ah);
2609
2610 /*
2611 * Reset the key cache since some parts do not reset the
2612 * contents on initial power up or resume from suspend.
2613 */
2614 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
2615 ath5k_hw_reset_key(ah, i);
2616
2617 ath5k_hw_set_ack_bitrate_high(ah, true);
2618 ret = 0;
2619done:
2620 mmiowb();
2621 mutex_unlock(&sc->lock);
2622 return ret;
2623}
2624
2625static int
2626ath5k_stop_locked(struct ath5k_softc *sc)
2627{
2628 struct ath5k_hw *ah = sc->ah;
2629
2630 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
2631 test_bit(ATH_STAT_INVALID, sc->status));
2632
2633 /*
2634 * Shutdown the hardware and driver:
2635 * stop output from above
2636 * disable interrupts
2637 * turn off timers
2638 * turn off the radio
2639 * clear transmit machinery
2640 * clear receive machinery
2641 * drain and release tx queues
2642 * reclaim beacon resources
2643 * power down hardware
2644 *
2645 * Note that some of this work is not possible if the
2646 * hardware is gone (invalid).
2647 */
2648 ieee80211_stop_queues(sc->hw);
2649
2650 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2651 ath5k_led_off(sc);
2652 ath5k_hw_set_imr(ah, 0);
2653 synchronize_irq(sc->pdev->irq);
2654 }
2655 ath5k_txq_cleanup(sc);
2656 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2657 ath5k_rx_stop(sc);
2658 ath5k_hw_phy_disable(ah);
2659 }
2660
2661 return 0;
2662}
2663
2664static void stop_tasklets(struct ath5k_softc *sc)
2665{
2666 tasklet_kill(&sc->rxtq);
2667 tasklet_kill(&sc->txtq);
2668 tasklet_kill(&sc->calib);
2669 tasklet_kill(&sc->beacontq);
2670 tasklet_kill(&sc->ani_tasklet);
2671}
2672
2673/*
2674 * Stop the device, grabbing the top-level lock to protect
2675 * against concurrent entry through ath5k_init (which can happen
2676 * if another thread does a system call and the thread doing the
2677 * stop is preempted).
2678 */
2679static int
2680ath5k_stop_hw(struct ath5k_softc *sc)
2681{
2682 int ret;
2683
2684 mutex_lock(&sc->lock);
2685 ret = ath5k_stop_locked(sc);
2686 if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
2687 /*
2688 * Don't set the card in full sleep mode!
2689 *
2690 * a) When the device is in this state it must be carefully
2691 * woken up or references to registers in the PCI clock
2692 * domain may freeze the bus (and system). This varies
2693 * by chip and is mostly an issue with newer parts
2694 * (madwifi sources mentioned srev >= 0x78) that go to
2695 * sleep more quickly.
2696 *
2697 * b) On older chips full sleep results a weird behaviour
2698 * during wakeup. I tested various cards with srev < 0x78
2699 * and they don't wake up after module reload, a second
2700 * module reload is needed to bring the card up again.
2701 *
2702 * Until we figure out what's going on don't enable
2703 * full chip reset on any chip (this is what Legacy HAL
2704 * and Sam's HAL do anyway). Instead Perform a full reset
2705 * on the device (same as initial state after attach) and
2706 * leave it idle (keep MAC/BB on warm reset) */
2707 ret = ath5k_hw_on_hold(sc->ah);
2708
2709 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2710 "putting device to sleep\n");
2711 }
2712 ath5k_txbuf_free_skb(sc, sc->bbuf);
2713
2714 mmiowb();
2715 mutex_unlock(&sc->lock);
2716
2717 stop_tasklets(sc);
2718
2719 ath5k_rfkill_hw_stop(sc->ah);
2720
2721 return ret;
2722}
2723
2724static void 2025static void
2725ath5k_intr_calibration_poll(struct ath5k_hw *ah) 2026ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2726{ 2027{
@@ -2857,14 +2158,13 @@ ath5k_tasklet_calibrate(unsigned long data)
2857 sc->curchan->center_freq)); 2158 sc->curchan->center_freq));
2858 2159
2859 /* Noise floor calibration interrupts rx/tx path while I/Q calibration 2160 /* Noise floor calibration interrupts rx/tx path while I/Q calibration
2860 * doesn't. We stop the queues so that calibration doesn't interfere 2161 * doesn't.
2861 * with TX and don't run it as often */ 2162 * TODO: We should stop TX here, so that it doesn't interfere.
2163 * Note that stopping the queues is not enough to stop TX! */
2862 if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) { 2164 if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
2863 ah->ah_cal_next_nf = jiffies + 2165 ah->ah_cal_next_nf = jiffies +
2864 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF); 2166 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
2865 ieee80211_stop_queues(sc->hw);
2866 ath5k_hw_update_noise_floor(ah); 2167 ath5k_hw_update_noise_floor(ah);
2867 ieee80211_wake_queues(sc->hw);
2868 } 2168 }
2869 2169
2870 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; 2170 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
@@ -2883,71 +2183,205 @@ ath5k_tasklet_ani(unsigned long data)
2883} 2183}
2884 2184
2885 2185
2886/********************\ 2186static void
2887* Mac80211 functions * 2187ath5k_tx_complete_poll_work(struct work_struct *work)
2888\********************/ 2188{
2189 struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
2190 tx_complete_work.work);
2191 struct ath5k_txq *txq;
2192 int i;
2193 bool needreset = false;
2194
2195 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
2196 if (sc->txqs[i].setup) {
2197 txq = &sc->txqs[i];
2198 spin_lock_bh(&txq->lock);
2199 if (txq->txq_len > 1) {
2200 if (txq->txq_poll_mark) {
2201 ATH5K_DBG(sc, ATH5K_DEBUG_XMIT,
2202 "TX queue stuck %d\n",
2203 txq->qnum);
2204 needreset = true;
2205 txq->txq_stuck++;
2206 spin_unlock_bh(&txq->lock);
2207 break;
2208 } else {
2209 txq->txq_poll_mark = true;
2210 }
2211 }
2212 spin_unlock_bh(&txq->lock);
2213 }
2214 }
2215
2216 if (needreset) {
2217 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2218 "TX queues stuck, resetting\n");
2219 ath5k_reset(sc, sc->curchan);
2220 }
2221
2222 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2223 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2224}
2225
2226
2227/*************************\
2228* Initialization routines *
2229\*************************/
2889 2230
2890static int 2231static int
2891ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 2232ath5k_stop_locked(struct ath5k_softc *sc)
2892{ 2233{
2893 struct ath5k_softc *sc = hw->priv; 2234 struct ath5k_hw *ah = sc->ah;
2894 2235
2895 return ath5k_tx_queue(hw, skb, sc->txq); 2236 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
2237 test_bit(ATH_STAT_INVALID, sc->status));
2238
2239 /*
2240 * Shutdown the hardware and driver:
2241 * stop output from above
2242 * disable interrupts
2243 * turn off timers
2244 * turn off the radio
2245 * clear transmit machinery
2246 * clear receive machinery
2247 * drain and release tx queues
2248 * reclaim beacon resources
2249 * power down hardware
2250 *
2251 * Note that some of this work is not possible if the
2252 * hardware is gone (invalid).
2253 */
2254 ieee80211_stop_queues(sc->hw);
2255
2256 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2257 ath5k_led_off(sc);
2258 ath5k_hw_set_imr(ah, 0);
2259 synchronize_irq(sc->pdev->irq);
2260 }
2261 ath5k_txq_cleanup(sc);
2262 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2263 ath5k_rx_stop(sc);
2264 ath5k_hw_phy_disable(ah);
2265 }
2266
2267 return 0;
2896} 2268}
2897 2269
2898static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 2270static int
2899 struct ath5k_txq *txq) 2271ath5k_init(struct ath5k_softc *sc)
2900{ 2272{
2901 struct ath5k_softc *sc = hw->priv; 2273 struct ath5k_hw *ah = sc->ah;
2902 struct ath5k_buf *bf; 2274 struct ath_common *common = ath5k_hw_common(ah);
2903 unsigned long flags; 2275 int ret, i;
2904 int padsize;
2905 2276
2906 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 2277 mutex_lock(&sc->lock);
2907 2278
2908 if (sc->opmode == NL80211_IFTYPE_MONITOR) 2279 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2909 ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n");
2910 2280
2911 /* 2281 /*
2912 * the hardware expects the header padded to 4 byte boundaries 2282 * Stop anything previously setup. This is safe
2913 * if this is not the case we add the padding after the header 2283 * no matter this is the first time through or not.
2914 */ 2284 */
2915 padsize = ath5k_add_padding(skb); 2285 ath5k_stop_locked(sc);
2916 if (padsize < 0) {
2917 ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
2918 " headroom to pad");
2919 goto drop_packet;
2920 }
2921 2286
2922 spin_lock_irqsave(&sc->txbuflock, flags); 2287 /*
2923 if (list_empty(&sc->txbuf)) { 2288 * The basic interface to setting the hardware in a good
2924 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); 2289 * state is ``reset''. On return the hardware is known to
2925 spin_unlock_irqrestore(&sc->txbuflock, flags); 2290 * be powered up and with interrupts disabled. This must
2926 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); 2291 * be followed by initialization of the appropriate bits
2927 goto drop_packet; 2292 * and then setup of the interrupt mask.
2928 } 2293 */
2929 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); 2294 sc->curchan = sc->hw->conf.channel;
2930 list_del(&bf->list); 2295 sc->curband = &sc->sbands[sc->curchan->band];
2931 sc->txbuf_len--; 2296 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2932 if (list_empty(&sc->txbuf)) 2297 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2933 ieee80211_stop_queues(hw); 2298 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2934 spin_unlock_irqrestore(&sc->txbuflock, flags);
2935 2299
2936 bf->skb = skb; 2300 ret = ath5k_reset(sc, NULL);
2301 if (ret)
2302 goto done;
2937 2303
2938 if (ath5k_txbuf_setup(sc, bf, txq, padsize)) { 2304 ath5k_rfkill_hw_start(ah);
2939 bf->skb = NULL; 2305
2940 spin_lock_irqsave(&sc->txbuflock, flags); 2306 /*
2941 list_add_tail(&bf->list, &sc->txbuf); 2307 * Reset the key cache since some parts do not reset the
2942 sc->txbuf_len++; 2308 * contents on initial power up or resume from suspend.
2943 spin_unlock_irqrestore(&sc->txbuflock, flags); 2309 */
2944 goto drop_packet; 2310 for (i = 0; i < common->keymax; i++)
2311 ath_hw_keyreset(common, (u16) i);
2312
2313 ath5k_hw_set_ack_bitrate_high(ah, true);
2314 ret = 0;
2315done:
2316 mmiowb();
2317 mutex_unlock(&sc->lock);
2318
2319 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2320 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2321
2322 return ret;
2323}
2324
2325static void stop_tasklets(struct ath5k_softc *sc)
2326{
2327 tasklet_kill(&sc->rxtq);
2328 tasklet_kill(&sc->txtq);
2329 tasklet_kill(&sc->calib);
2330 tasklet_kill(&sc->beacontq);
2331 tasklet_kill(&sc->ani_tasklet);
2332}
2333
2334/*
2335 * Stop the device, grabbing the top-level lock to protect
2336 * against concurrent entry through ath5k_init (which can happen
2337 * if another thread does a system call and the thread doing the
2338 * stop is preempted).
2339 */
2340static int
2341ath5k_stop_hw(struct ath5k_softc *sc)
2342{
2343 int ret;
2344
2345 mutex_lock(&sc->lock);
2346 ret = ath5k_stop_locked(sc);
2347 if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
2348 /*
2349 * Don't set the card in full sleep mode!
2350 *
2351 * a) When the device is in this state it must be carefully
2352 * woken up or references to registers in the PCI clock
2353 * domain may freeze the bus (and system). This varies
2354 * by chip and is mostly an issue with newer parts
2355 * (madwifi sources mentioned srev >= 0x78) that go to
2356 * sleep more quickly.
2357 *
2358 * b) On older chips full sleep results a weird behaviour
2359 * during wakeup. I tested various cards with srev < 0x78
2360 * and they don't wake up after module reload, a second
2361 * module reload is needed to bring the card up again.
2362 *
2363 * Until we figure out what's going on don't enable
2364 * full chip reset on any chip (this is what Legacy HAL
2365 * and Sam's HAL do anyway). Instead Perform a full reset
2366 * on the device (same as initial state after attach) and
2367 * leave it idle (keep MAC/BB on warm reset) */
2368 ret = ath5k_hw_on_hold(sc->ah);
2369
2370 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2371 "putting device to sleep\n");
2945 } 2372 }
2946 return NETDEV_TX_OK; 2373 ath5k_txbuf_free_skb(sc, sc->bbuf);
2947 2374
2948drop_packet: 2375 mmiowb();
2949 dev_kfree_skb_any(skb); 2376 mutex_unlock(&sc->lock);
2950 return NETDEV_TX_OK; 2377
2378 stop_tasklets(sc);
2379
2380 cancel_delayed_work_sync(&sc->tx_complete_work);
2381
2382 ath5k_rfkill_hw_stop(sc->ah);
2383
2384 return ret;
2951} 2385}
2952 2386
2953/* 2387/*
@@ -3024,6 +2458,208 @@ static void ath5k_reset_work(struct work_struct *work)
3024 mutex_unlock(&sc->lock); 2458 mutex_unlock(&sc->lock);
3025} 2459}
3026 2460
2461static int
2462ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2463{
2464 struct ath5k_softc *sc = hw->priv;
2465 struct ath5k_hw *ah = sc->ah;
2466 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
2467 struct ath5k_txq *txq;
2468 u8 mac[ETH_ALEN] = {};
2469 int ret;
2470
2471 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
2472
2473 /*
2474 * Check if the MAC has multi-rate retry support.
2475 * We do this by trying to setup a fake extended
2476 * descriptor. MACs that don't have support will
2477 * return false w/o doing anything. MACs that do
2478 * support it will return true w/o doing anything.
2479 */
2480 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
2481
2482 if (ret < 0)
2483 goto err;
2484 if (ret > 0)
2485 __set_bit(ATH_STAT_MRRETRY, sc->status);
2486
2487 /*
2488 * Collect the channel list. The 802.11 layer
2489 * is resposible for filtering this list based
2490 * on settings like the phy mode and regulatory
2491 * domain restrictions.
2492 */
2493 ret = ath5k_setup_bands(hw);
2494 if (ret) {
2495 ATH5K_ERR(sc, "can't get channels\n");
2496 goto err;
2497 }
2498
2499 /* NB: setup here so ath5k_rate_update is happy */
2500 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
2501 ath5k_setcurmode(sc, AR5K_MODE_11A);
2502 else
2503 ath5k_setcurmode(sc, AR5K_MODE_11B);
2504
2505 /*
2506 * Allocate tx+rx descriptors and populate the lists.
2507 */
2508 ret = ath5k_desc_alloc(sc, pdev);
2509 if (ret) {
2510 ATH5K_ERR(sc, "can't allocate descriptors\n");
2511 goto err;
2512 }
2513
2514 /*
2515 * Allocate hardware transmit queues: one queue for
2516 * beacon frames and one data queue for each QoS
2517 * priority. Note that hw functions handle resetting
2518 * these queues at the needed time.
2519 */
2520 ret = ath5k_beaconq_setup(ah);
2521 if (ret < 0) {
2522 ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
2523 goto err_desc;
2524 }
2525 sc->bhalq = ret;
2526 sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
2527 if (IS_ERR(sc->cabq)) {
2528 ATH5K_ERR(sc, "can't setup cab queue\n");
2529 ret = PTR_ERR(sc->cabq);
2530 goto err_bhal;
2531 }
2532
2533 /* This order matches mac80211's queue priority, so we can
2534 * directly use the mac80211 queue number without any mapping */
2535 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2536 if (IS_ERR(txq)) {
2537 ATH5K_ERR(sc, "can't setup xmit queue\n");
2538 ret = PTR_ERR(txq);
2539 goto err_queues;
2540 }
2541 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2542 if (IS_ERR(txq)) {
2543 ATH5K_ERR(sc, "can't setup xmit queue\n");
2544 ret = PTR_ERR(txq);
2545 goto err_queues;
2546 }
2547 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2548 if (IS_ERR(txq)) {
2549 ATH5K_ERR(sc, "can't setup xmit queue\n");
2550 ret = PTR_ERR(txq);
2551 goto err_queues;
2552 }
2553 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2554 if (IS_ERR(txq)) {
2555 ATH5K_ERR(sc, "can't setup xmit queue\n");
2556 ret = PTR_ERR(txq);
2557 goto err_queues;
2558 }
2559 hw->queues = 4;
2560
2561 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
2562 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
2563 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
2564 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
2565 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
2566
2567 INIT_WORK(&sc->reset_work, ath5k_reset_work);
2568 INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
2569
2570 ret = ath5k_eeprom_read_mac(ah, mac);
2571 if (ret) {
2572 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
2573 sc->pdev->device);
2574 goto err_queues;
2575 }
2576
2577 SET_IEEE80211_PERM_ADDR(hw, mac);
2578 /* All MAC address bits matter for ACKs */
2579 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
2580 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
2581
2582 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
2583 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
2584 if (ret) {
2585 ATH5K_ERR(sc, "can't initialize regulatory system\n");
2586 goto err_queues;
2587 }
2588
2589 ret = ieee80211_register_hw(hw);
2590 if (ret) {
2591 ATH5K_ERR(sc, "can't register ieee80211 hw\n");
2592 goto err_queues;
2593 }
2594
2595 if (!ath_is_world_regd(regulatory))
2596 regulatory_hint(hw->wiphy, regulatory->alpha2);
2597
2598 ath5k_init_leds(sc);
2599
2600 ath5k_sysfs_register(sc);
2601
2602 return 0;
2603err_queues:
2604 ath5k_txq_release(sc);
2605err_bhal:
2606 ath5k_hw_release_tx_queue(ah, sc->bhalq);
2607err_desc:
2608 ath5k_desc_free(sc, pdev);
2609err:
2610 return ret;
2611}
2612
2613static void
2614ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2615{
2616 struct ath5k_softc *sc = hw->priv;
2617
2618 /*
2619 * NB: the order of these is important:
2620 * o call the 802.11 layer before detaching ath5k_hw to
2621 * ensure callbacks into the driver to delete global
2622 * key cache entries can be handled
2623 * o reclaim the tx queue data structures after calling
2624 * the 802.11 layer as we'll get called back to reclaim
2625 * node state and potentially want to use them
2626 * o to cleanup the tx queues the hal is called, so detach
2627 * it last
2628 * XXX: ??? detach ath5k_hw ???
2629 * Other than that, it's straightforward...
2630 */
2631 ieee80211_unregister_hw(hw);
2632 ath5k_desc_free(sc, pdev);
2633 ath5k_txq_release(sc);
2634 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
2635 ath5k_unregister_leds(sc);
2636
2637 ath5k_sysfs_unregister(sc);
2638 /*
2639 * NB: can't reclaim these until after ieee80211_ifdetach
2640 * returns because we'll get called back to reclaim node
2641 * state and potentially want to use them.
2642 */
2643}
2644
2645/********************\
2646* Mac80211 functions *
2647\********************/
2648
2649static int
2650ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2651{
2652 struct ath5k_softc *sc = hw->priv;
2653 u16 qnum = skb_get_queue_mapping(skb);
2654
2655 if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
2656 dev_kfree_skb_any(skb);
2657 return 0;
2658 }
2659
2660 return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
2661}
2662
3027static int ath5k_start(struct ieee80211_hw *hw) 2663static int ath5k_start(struct ieee80211_hw *hw)
3028{ 2664{
3029 return ath5k_init(hw->priv); 2665 return ath5k_init(hw->priv);
@@ -3053,7 +2689,6 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
3053 case NL80211_IFTYPE_STATION: 2689 case NL80211_IFTYPE_STATION:
3054 case NL80211_IFTYPE_ADHOC: 2690 case NL80211_IFTYPE_ADHOC:
3055 case NL80211_IFTYPE_MESH_POINT: 2691 case NL80211_IFTYPE_MESH_POINT:
3056 case NL80211_IFTYPE_MONITOR:
3057 sc->opmode = vif->type; 2692 sc->opmode = vif->type;
3058 break; 2693 break;
3059 default: 2694 default:
@@ -3237,9 +2872,9 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
3237 rfilt |= AR5K_RX_FILTER_PHYERR; 2872 rfilt |= AR5K_RX_FILTER_PHYERR;
3238 2873
3239 /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons 2874 /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
3240 * and probes for any BSSID, this needs testing */ 2875 * and probes for any BSSID */
3241 if (*new_flags & FIF_BCN_PRBRESP_PROMISC) 2876 if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
3242 rfilt |= AR5K_RX_FILTER_BEACON | AR5K_RX_FILTER_PROBEREQ; 2877 rfilt |= AR5K_RX_FILTER_BEACON;
3243 2878
3244 /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not 2879 /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
3245 * set we should only pass on control frames for this 2880 * set we should only pass on control frames for this
@@ -3255,7 +2890,6 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
3255 2890
3256 switch (sc->opmode) { 2891 switch (sc->opmode) {
3257 case NL80211_IFTYPE_MESH_POINT: 2892 case NL80211_IFTYPE_MESH_POINT:
3258 case NL80211_IFTYPE_MONITOR:
3259 rfilt |= AR5K_RX_FILTER_CONTROL | 2893 rfilt |= AR5K_RX_FILTER_CONTROL |
3260 AR5K_RX_FILTER_BEACON | 2894 AR5K_RX_FILTER_BEACON |
3261 AR5K_RX_FILTER_PROBEREQ | 2895 AR5K_RX_FILTER_PROBEREQ |
@@ -3278,7 +2912,7 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
3278 2912
3279 /* Set multicast bits */ 2913 /* Set multicast bits */
3280 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]); 2914 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
3281 /* Set the cached hw filter flags, this will alter actually 2915 /* Set the cached hw filter flags, this will later actually
3282 * be set in HW */ 2916 * be set in HW */
3283 sc->filter_flags = rfilt; 2917 sc->filter_flags = rfilt;
3284 2918
@@ -3298,17 +2932,14 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3298 if (modparam_nohwcrypt) 2932 if (modparam_nohwcrypt)
3299 return -EOPNOTSUPP; 2933 return -EOPNOTSUPP;
3300 2934
3301 if (sc->opmode == NL80211_IFTYPE_AP) 2935 switch (key->cipher) {
3302 return -EOPNOTSUPP; 2936 case WLAN_CIPHER_SUITE_WEP40:
3303 2937 case WLAN_CIPHER_SUITE_WEP104:
3304 switch (key->alg) { 2938 case WLAN_CIPHER_SUITE_TKIP:
3305 case ALG_WEP:
3306 case ALG_TKIP:
3307 break; 2939 break;
3308 case ALG_CCMP: 2940 case WLAN_CIPHER_SUITE_CCMP:
3309 if (sc->ah->ah_aes_support) 2941 if (common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)
3310 break; 2942 break;
3311
3312 return -EOPNOTSUPP; 2943 return -EOPNOTSUPP;
3313 default: 2944 default:
3314 WARN_ON(1); 2945 WARN_ON(1);
@@ -3319,27 +2950,25 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3319 2950
3320 switch (cmd) { 2951 switch (cmd) {
3321 case SET_KEY: 2952 case SET_KEY:
3322 ret = ath5k_hw_set_key(sc->ah, key->keyidx, key, 2953 ret = ath_key_config(common, vif, sta, key);
3323 sta ? sta->addr : NULL); 2954 if (ret >= 0) {
3324 if (ret) { 2955 key->hw_key_idx = ret;
3325 ATH5K_ERR(sc, "can't set the key\n"); 2956 /* push IV and Michael MIC generation to stack */
3326 goto unlock; 2957 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2958 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
2959 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2960 if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
2961 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
2962 ret = 0;
3327 } 2963 }
3328 __set_bit(key->keyidx, common->keymap);
3329 key->hw_key_idx = key->keyidx;
3330 key->flags |= (IEEE80211_KEY_FLAG_GENERATE_IV |
3331 IEEE80211_KEY_FLAG_GENERATE_MMIC);
3332 break; 2964 break;
3333 case DISABLE_KEY: 2965 case DISABLE_KEY:
3334 ath5k_hw_reset_key(sc->ah, key->keyidx); 2966 ath_key_delete(common, key);
3335 __clear_bit(key->keyidx, common->keymap);
3336 break; 2967 break;
3337 default: 2968 default:
3338 ret = -EINVAL; 2969 ret = -EINVAL;
3339 goto unlock;
3340 } 2970 }
3341 2971
3342unlock:
3343 mmiowb(); 2972 mmiowb();
3344 mutex_unlock(&sc->lock); 2973 mutex_unlock(&sc->lock);
3345 return ret; 2974 return ret;
@@ -3409,43 +3038,6 @@ ath5k_reset_tsf(struct ieee80211_hw *hw)
3409 ath5k_hw_reset_tsf(sc->ah); 3038 ath5k_hw_reset_tsf(sc->ah);
3410} 3039}
3411 3040
3412/*
3413 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
3414 * this is called only once at config_bss time, for AP we do it every
3415 * SWBA interrupt so that the TIM will reflect buffered frames.
3416 *
3417 * Called with the beacon lock.
3418 */
3419static int
3420ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
3421{
3422 int ret;
3423 struct ath5k_softc *sc = hw->priv;
3424 struct sk_buff *skb;
3425
3426 if (WARN_ON(!vif)) {
3427 ret = -EINVAL;
3428 goto out;
3429 }
3430
3431 skb = ieee80211_beacon_get(hw, vif);
3432
3433 if (!skb) {
3434 ret = -ENOMEM;
3435 goto out;
3436 }
3437
3438 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
3439
3440 ath5k_txbuf_free_skb(sc, sc->bbuf);
3441 sc->bbuf->skb = skb;
3442 ret = ath5k_beacon_setup(sc, sc->bbuf);
3443 if (ret)
3444 sc->bbuf->skb = NULL;
3445out:
3446 return ret;
3447}
3448
3449static void 3041static void
3450set_beacon_filter(struct ieee80211_hw *hw, bool enable) 3042set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3451{ 3043{
@@ -3479,7 +3071,7 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3479 /* Cache for later use during resets */ 3071 /* Cache for later use during resets */
3480 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 3072 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
3481 common->curaid = 0; 3073 common->curaid = 0;
3482 ath5k_hw_set_associd(ah); 3074 ath5k_hw_set_bssid(ah);
3483 mmiowb(); 3075 mmiowb();
3484 } 3076 }
3485 3077
@@ -3497,7 +3089,7 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3497 "Bss Info ASSOC %d, bssid: %pM\n", 3089 "Bss Info ASSOC %d, bssid: %pM\n",
3498 bss_conf->aid, common->curbssid); 3090 bss_conf->aid, common->curbssid);
3499 common->curaid = bss_conf->aid; 3091 common->curaid = bss_conf->aid;
3500 ath5k_hw_set_associd(ah); 3092 ath5k_hw_set_bssid(ah);
3501 /* Once ANI is available you would start it here */ 3093 /* Once ANI is available you would start it here */
3502 } 3094 }
3503 } 3095 }
@@ -3551,3 +3143,402 @@ static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
3551 ath5k_hw_set_coverage_class(sc->ah, coverage_class); 3143 ath5k_hw_set_coverage_class(sc->ah, coverage_class);
3552 mutex_unlock(&sc->lock); 3144 mutex_unlock(&sc->lock);
3553} 3145}
3146
3147static int ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3148 const struct ieee80211_tx_queue_params *params)
3149{
3150 struct ath5k_softc *sc = hw->priv;
3151 struct ath5k_hw *ah = sc->ah;
3152 struct ath5k_txq_info qi;
3153 int ret = 0;
3154
3155 if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
3156 return 0;
3157
3158 mutex_lock(&sc->lock);
3159
3160 ath5k_hw_get_tx_queueprops(ah, queue, &qi);
3161
3162 qi.tqi_aifs = params->aifs;
3163 qi.tqi_cw_min = params->cw_min;
3164 qi.tqi_cw_max = params->cw_max;
3165 qi.tqi_burst_time = params->txop;
3166
3167 ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
3168 "Configure tx [queue %d], "
3169 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
3170 queue, params->aifs, params->cw_min,
3171 params->cw_max, params->txop);
3172
3173 if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
3174 ATH5K_ERR(sc,
3175 "Unable to update hardware queue %u!\n", queue);
3176 ret = -EIO;
3177 } else
3178 ath5k_hw_reset_tx_queue(ah, queue);
3179
3180 mutex_unlock(&sc->lock);
3181
3182 return ret;
3183}
3184
3185static const struct ieee80211_ops ath5k_hw_ops = {
3186 .tx = ath5k_tx,
3187 .start = ath5k_start,
3188 .stop = ath5k_stop,
3189 .add_interface = ath5k_add_interface,
3190 .remove_interface = ath5k_remove_interface,
3191 .config = ath5k_config,
3192 .prepare_multicast = ath5k_prepare_multicast,
3193 .configure_filter = ath5k_configure_filter,
3194 .set_key = ath5k_set_key,
3195 .get_stats = ath5k_get_stats,
3196 .get_survey = ath5k_get_survey,
3197 .conf_tx = ath5k_conf_tx,
3198 .get_tsf = ath5k_get_tsf,
3199 .set_tsf = ath5k_set_tsf,
3200 .reset_tsf = ath5k_reset_tsf,
3201 .bss_info_changed = ath5k_bss_info_changed,
3202 .sw_scan_start = ath5k_sw_scan_start,
3203 .sw_scan_complete = ath5k_sw_scan_complete,
3204 .set_coverage_class = ath5k_set_coverage_class,
3205};
3206
3207/********************\
3208* PCI Initialization *
3209\********************/
3210
3211static int __devinit
3212ath5k_pci_probe(struct pci_dev *pdev,
3213 const struct pci_device_id *id)
3214{
3215 void __iomem *mem;
3216 struct ath5k_softc *sc;
3217 struct ath_common *common;
3218 struct ieee80211_hw *hw;
3219 int ret;
3220 u8 csz;
3221
3222 /*
3223 * L0s needs to be disabled on all ath5k cards.
3224 *
3225 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
3226 * by default in the future in 2.6.36) this will also mean both L1 and
3227 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
3228 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
3229 * though but cannot currently undue the effect of a blacklist, for
3230 * details you can read pcie_aspm_sanity_check() and see how it adjusts
3231 * the device link capability.
3232 *
3233 * It may be possible in the future to implement some PCI API to allow
3234 * drivers to override blacklists for pre 1.1 PCIe but for now it is
3235 * best to accept that both L0s and L1 will be disabled completely for
3236 * distributions shipping with CONFIG_PCIEASPM rather than having this
3237 * issue present. Motivation for adding this new API will be to help
3238 * with power consumption for some of these devices.
3239 */
3240 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
3241
3242 ret = pci_enable_device(pdev);
3243 if (ret) {
3244 dev_err(&pdev->dev, "can't enable device\n");
3245 goto err;
3246 }
3247
3248 /* XXX 32-bit addressing only */
3249 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3250 if (ret) {
3251 dev_err(&pdev->dev, "32-bit DMA not available\n");
3252 goto err_dis;
3253 }
3254
3255 /*
3256 * Cache line size is used to size and align various
3257 * structures used to communicate with the hardware.
3258 */
3259 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
3260 if (csz == 0) {
3261 /*
3262 * Linux 2.4.18 (at least) writes the cache line size
3263 * register as a 16-bit wide register which is wrong.
3264 * We must have this setup properly for rx buffer
3265 * DMA to work so force a reasonable value here if it
3266 * comes up zero.
3267 */
3268 csz = L1_CACHE_BYTES >> 2;
3269 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
3270 }
3271 /*
3272 * The default setting of latency timer yields poor results,
3273 * set it to the value used by other systems. It may be worth
3274 * tweaking this setting more.
3275 */
3276 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
3277
3278 /* Enable bus mastering */
3279 pci_set_master(pdev);
3280
3281 /*
3282 * Disable the RETRY_TIMEOUT register (0x41) to keep
3283 * PCI Tx retries from interfering with C3 CPU state.
3284 */
3285 pci_write_config_byte(pdev, 0x41, 0);
3286
3287 ret = pci_request_region(pdev, 0, "ath5k");
3288 if (ret) {
3289 dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
3290 goto err_dis;
3291 }
3292
3293 mem = pci_iomap(pdev, 0, 0);
3294 if (!mem) {
3295 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
3296 ret = -EIO;
3297 goto err_reg;
3298 }
3299
3300 /*
3301 * Allocate hw (mac80211 main struct)
3302 * and hw->priv (driver private data)
3303 */
3304 hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
3305 if (hw == NULL) {
3306 dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
3307 ret = -ENOMEM;
3308 goto err_map;
3309 }
3310
3311 dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
3312
3313 /* Initialize driver private data */
3314 SET_IEEE80211_DEV(hw, &pdev->dev);
3315 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
3316 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
3317 IEEE80211_HW_SIGNAL_DBM;
3318
3319 hw->wiphy->interface_modes =
3320 BIT(NL80211_IFTYPE_AP) |
3321 BIT(NL80211_IFTYPE_STATION) |
3322 BIT(NL80211_IFTYPE_ADHOC) |
3323 BIT(NL80211_IFTYPE_MESH_POINT);
3324
3325 hw->extra_tx_headroom = 2;
3326 hw->channel_change_time = 5000;
3327 sc = hw->priv;
3328 sc->hw = hw;
3329 sc->pdev = pdev;
3330
3331 ath5k_debug_init_device(sc);
3332
3333 /*
3334 * Mark the device as detached to avoid processing
3335 * interrupts until setup is complete.
3336 */
3337 __set_bit(ATH_STAT_INVALID, sc->status);
3338
3339 sc->iobase = mem; /* So we can unmap it on detach */
3340 sc->opmode = NL80211_IFTYPE_STATION;
3341 sc->bintval = 1000;
3342 mutex_init(&sc->lock);
3343 spin_lock_init(&sc->rxbuflock);
3344 spin_lock_init(&sc->txbuflock);
3345 spin_lock_init(&sc->block);
3346
3347 /* Set private data */
3348 pci_set_drvdata(pdev, sc);
3349
3350 /* Setup interrupt handler */
3351 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
3352 if (ret) {
3353 ATH5K_ERR(sc, "request_irq failed\n");
3354 goto err_free;
3355 }
3356
3357 /* If we passed the test, malloc an ath5k_hw struct */
3358 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
3359 if (!sc->ah) {
3360 ret = -ENOMEM;
3361 ATH5K_ERR(sc, "out of memory\n");
3362 goto err_irq;
3363 }
3364
3365 sc->ah->ah_sc = sc;
3366 sc->ah->ah_iobase = sc->iobase;
3367 common = ath5k_hw_common(sc->ah);
3368 common->ops = &ath5k_common_ops;
3369 common->ah = sc->ah;
3370 common->hw = hw;
3371 common->cachelsz = csz << 2; /* convert to bytes */
3372
3373 /* Initialize device */
3374 ret = ath5k_hw_attach(sc);
3375 if (ret) {
3376 goto err_free_ah;
3377 }
3378
3379 /* set up multi-rate retry capabilities */
3380 if (sc->ah->ah_version == AR5K_AR5212) {
3381 hw->max_rates = 4;
3382 hw->max_rate_tries = 11;
3383 }
3384
3385 /* Finish private driver data initialization */
3386 ret = ath5k_attach(pdev, hw);
3387 if (ret)
3388 goto err_ah;
3389
3390 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
3391 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
3392 sc->ah->ah_mac_srev,
3393 sc->ah->ah_phy_revision);
3394
3395 if (!sc->ah->ah_single_chip) {
3396 /* Single chip radio (!RF5111) */
3397 if (sc->ah->ah_radio_5ghz_revision &&
3398 !sc->ah->ah_radio_2ghz_revision) {
3399 /* No 5GHz support -> report 2GHz radio */
3400 if (!test_bit(AR5K_MODE_11A,
3401 sc->ah->ah_capabilities.cap_mode)) {
3402 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
3403 ath5k_chip_name(AR5K_VERSION_RAD,
3404 sc->ah->ah_radio_5ghz_revision),
3405 sc->ah->ah_radio_5ghz_revision);
3406 /* No 2GHz support (5110 and some
3407 * 5Ghz only cards) -> report 5Ghz radio */
3408 } else if (!test_bit(AR5K_MODE_11B,
3409 sc->ah->ah_capabilities.cap_mode)) {
3410 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
3411 ath5k_chip_name(AR5K_VERSION_RAD,
3412 sc->ah->ah_radio_5ghz_revision),
3413 sc->ah->ah_radio_5ghz_revision);
3414 /* Multiband radio */
3415 } else {
3416 ATH5K_INFO(sc, "RF%s multiband radio found"
3417 " (0x%x)\n",
3418 ath5k_chip_name(AR5K_VERSION_RAD,
3419 sc->ah->ah_radio_5ghz_revision),
3420 sc->ah->ah_radio_5ghz_revision);
3421 }
3422 }
3423 /* Multi chip radio (RF5111 - RF2111) ->
3424 * report both 2GHz/5GHz radios */
3425 else if (sc->ah->ah_radio_5ghz_revision &&
3426 sc->ah->ah_radio_2ghz_revision){
3427 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
3428 ath5k_chip_name(AR5K_VERSION_RAD,
3429 sc->ah->ah_radio_5ghz_revision),
3430 sc->ah->ah_radio_5ghz_revision);
3431 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
3432 ath5k_chip_name(AR5K_VERSION_RAD,
3433 sc->ah->ah_radio_2ghz_revision),
3434 sc->ah->ah_radio_2ghz_revision);
3435 }
3436 }
3437
3438
3439 /* ready to process interrupts */
3440 __clear_bit(ATH_STAT_INVALID, sc->status);
3441
3442 return 0;
3443err_ah:
3444 ath5k_hw_detach(sc->ah);
3445err_free_ah:
3446 kfree(sc->ah);
3447err_irq:
3448 free_irq(pdev->irq, sc);
3449err_free:
3450 ieee80211_free_hw(hw);
3451err_map:
3452 pci_iounmap(pdev, mem);
3453err_reg:
3454 pci_release_region(pdev, 0);
3455err_dis:
3456 pci_disable_device(pdev);
3457err:
3458 return ret;
3459}
3460
3461static void __devexit
3462ath5k_pci_remove(struct pci_dev *pdev)
3463{
3464 struct ath5k_softc *sc = pci_get_drvdata(pdev);
3465
3466 ath5k_debug_finish_device(sc);
3467 ath5k_detach(pdev, sc->hw);
3468 ath5k_hw_detach(sc->ah);
3469 kfree(sc->ah);
3470 free_irq(pdev->irq, sc);
3471 pci_iounmap(pdev, sc->iobase);
3472 pci_release_region(pdev, 0);
3473 pci_disable_device(pdev);
3474 ieee80211_free_hw(sc->hw);
3475}
3476
3477#ifdef CONFIG_PM_SLEEP
3478static int ath5k_pci_suspend(struct device *dev)
3479{
3480 struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
3481
3482 ath5k_led_off(sc);
3483 return 0;
3484}
3485
3486static int ath5k_pci_resume(struct device *dev)
3487{
3488 struct pci_dev *pdev = to_pci_dev(dev);
3489 struct ath5k_softc *sc = pci_get_drvdata(pdev);
3490
3491 /*
3492 * Suspend/Resume resets the PCI configuration space, so we have to
3493 * re-disable the RETRY_TIMEOUT register (0x41) to keep
3494 * PCI Tx retries from interfering with C3 CPU state
3495 */
3496 pci_write_config_byte(pdev, 0x41, 0);
3497
3498 ath5k_led_enable(sc);
3499 return 0;
3500}
3501
3502static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
3503#define ATH5K_PM_OPS (&ath5k_pm_ops)
3504#else
3505#define ATH5K_PM_OPS NULL
3506#endif /* CONFIG_PM_SLEEP */
3507
3508static struct pci_driver ath5k_pci_driver = {
3509 .name = KBUILD_MODNAME,
3510 .id_table = ath5k_pci_id_table,
3511 .probe = ath5k_pci_probe,
3512 .remove = __devexit_p(ath5k_pci_remove),
3513 .driver.pm = ATH5K_PM_OPS,
3514};
3515
3516/*
3517 * Module init/exit functions
3518 */
3519static int __init
3520init_ath5k_pci(void)
3521{
3522 int ret;
3523
3524 ath5k_debug_init();
3525
3526 ret = pci_register_driver(&ath5k_pci_driver);
3527 if (ret) {
3528 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
3529 return ret;
3530 }
3531
3532 return 0;
3533}
3534
3535static void __exit
3536exit_ath5k_pci(void)
3537{
3538 pci_unregister_driver(&ath5k_pci_driver);
3539
3540 ath5k_debug_finish();
3541}
3542
3543module_init(init_ath5k_pci);
3544module_exit(exit_ath5k_pci);
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index dc1241f9c4e8..7f9d0d3018e8 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -60,6 +60,9 @@
60#define ATH_TXBUF 200 /* number of TX buffers */ 60#define ATH_TXBUF 200 /* number of TX buffers */
61#define ATH_BCBUF 1 /* number of beacon buffers */ 61#define ATH_BCBUF 1 /* number of beacon buffers */
62 62
63#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
64#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
65
63struct ath5k_buf { 66struct ath5k_buf {
64 struct list_head list; 67 struct list_head list;
65 struct ath5k_desc *desc; /* virtual addr of desc */ 68 struct ath5k_desc *desc; /* virtual addr of desc */
@@ -83,6 +86,9 @@ struct ath5k_txq {
83 struct list_head q; /* transmit queue */ 86 struct list_head q; /* transmit queue */
84 spinlock_t lock; /* lock on q and link */ 87 spinlock_t lock; /* lock on q and link */
85 bool setup; 88 bool setup;
89 int txq_len; /* number of queued buffers */
90 bool txq_poll_mark;
91 unsigned int txq_stuck; /* informational counter */
86}; 92};
87 93
88#define ATH5K_LED_MAX_NAME_LEN 31 94#define ATH5K_LED_MAX_NAME_LEN 31
@@ -204,7 +210,6 @@ struct ath5k_softc {
204 spinlock_t txbuflock; 210 spinlock_t txbuflock;
205 unsigned int txbuf_len; /* buf count in txbuf list */ 211 unsigned int txbuf_len; /* buf count in txbuf list */
206 struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */ 212 struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */
207 struct ath5k_txq *txq; /* main tx queue */
208 struct tasklet_struct txtq; /* tx intr tasklet */ 213 struct tasklet_struct txtq; /* tx intr tasklet */
209 struct ath5k_led tx_led; /* tx led */ 214 struct ath5k_led tx_led; /* tx led */
210 215
@@ -230,6 +235,8 @@ struct ath5k_softc {
230 235
231 struct ath5k_ani_state ani_state; 236 struct ath5k_ani_state ani_state;
232 struct tasklet_struct ani_tasklet; /* ANI calibration */ 237 struct tasklet_struct ani_tasklet; /* ANI calibration */
238
239 struct delayed_work tx_complete_work;
233}; 240};
234 241
235#define ath5k_hw_hasbssidmask(_ah) \ 242#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 4cccc29964f6..6583a82a0783 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -312,6 +312,7 @@ static const struct {
312 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, 312 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
313 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, 313 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
314 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" }, 314 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
315 { ATH5K_DEBUG_DESC, "desc", "descriptor chains" },
315 { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, 316 { ATH5K_DEBUG_ANY, "all", "show all debug levels" },
316}; 317};
317 318
@@ -762,7 +763,7 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
762 763
763 struct ath5k_txq *txq; 764 struct ath5k_txq *txq;
764 struct ath5k_buf *bf, *bf0; 765 struct ath5k_buf *bf, *bf0;
765 int i, n = 0; 766 int i, n;
766 767
767 len += snprintf(buf+len, sizeof(buf)-len, 768 len += snprintf(buf+len, sizeof(buf)-len,
768 "available txbuffers: %d\n", sc->txbuf_len); 769 "available txbuffers: %d\n", sc->txbuf_len);
@@ -776,9 +777,16 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
776 if (!txq->setup) 777 if (!txq->setup)
777 continue; 778 continue;
778 779
780 n = 0;
781 spin_lock_bh(&txq->lock);
779 list_for_each_entry_safe(bf, bf0, &txq->q, list) 782 list_for_each_entry_safe(bf, bf0, &txq->q, list)
780 n++; 783 n++;
781 len += snprintf(buf+len, sizeof(buf)-len, " len: %d\n", n); 784 spin_unlock_bh(&txq->lock);
785
786 len += snprintf(buf+len, sizeof(buf)-len,
787 " len: %d bufs: %d\n", txq->txq_len, n);
788 len += snprintf(buf+len, sizeof(buf)-len,
789 " stuck: %d\n", txq->txq_stuck);
782 } 790 }
783 791
784 if (len > sizeof(buf)) 792 if (len > sizeof(buf))
@@ -955,7 +963,7 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
955 struct ath5k_rx_status rs = {}; 963 struct ath5k_rx_status rs = {};
956 int status; 964 int status;
957 965
958 if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET))) 966 if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
959 return; 967 return;
960 968
961 printk(KERN_DEBUG "rxdp %x, rxlink %p\n", 969 printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
@@ -997,7 +1005,7 @@ ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
997 struct ath5k_tx_status ts = {}; 1005 struct ath5k_tx_status ts = {};
998 int done; 1006 int done;
999 1007
1000 if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET))) 1008 if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
1001 return; 1009 return;
1002 1010
1003 done = sc->ah->ah_proc_tx_desc(sc->ah, bf->desc, &ts); 1011 done = sc->ah->ah_proc_tx_desc(sc->ah, bf->desc, &ts);
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 606ae94a9157..9b22722a95f0 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -95,6 +95,7 @@ struct ath5k_dbg_info {
95 * @ATH5K_DEBUG_DUMP_TX: print transmit skb content 95 * @ATH5K_DEBUG_DUMP_TX: print transmit skb content
96 * @ATH5K_DEBUG_DUMPBANDS: dump bands 96 * @ATH5K_DEBUG_DUMPBANDS: dump bands
97 * @ATH5K_DEBUG_TRACE: trace function calls 97 * @ATH5K_DEBUG_TRACE: trace function calls
98 * @ATH5K_DEBUG_DESC: descriptor setup
98 * @ATH5K_DEBUG_ANY: show at any debug level 99 * @ATH5K_DEBUG_ANY: show at any debug level
99 * 100 *
100 * The debug level is used to control the amount and type of debugging output 101 * The debug level is used to control the amount and type of debugging output
@@ -117,6 +118,7 @@ enum ath5k_debug_level {
117 ATH5K_DEBUG_DUMP_TX = 0x00000200, 118 ATH5K_DEBUG_DUMP_TX = 0x00000200,
118 ATH5K_DEBUG_DUMPBANDS = 0x00000400, 119 ATH5K_DEBUG_DUMPBANDS = 0x00000400,
119 ATH5K_DEBUG_ANI = 0x00002000, 120 ATH5K_DEBUG_ANI = 0x00002000,
121 ATH5K_DEBUG_DESC = 0x00004000,
120 ATH5K_DEBUG_ANY = 0xffffffff 122 ATH5K_DEBUG_ANY = 0xffffffff
121}; 123};
122 124
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 484f31870ba8..58bb6c5dda7b 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -377,11 +377,11 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
377 * 377 *
378 * This function increases/decreases the tx trigger level for the tx fifo 378 * This function increases/decreases the tx trigger level for the tx fifo
379 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes 379 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
380 * the buffer and transmits it's data. Lowering this results sending small 380 * the buffer and transmits its data. Lowering this results sending small
381 * frames more quickly but can lead to tx underruns, raising it a lot can 381 * frames more quickly but can lead to tx underruns, raising it a lot can
382 * result other problems (i think bmiss is related). Right now we start with 382 * result other problems (i think bmiss is related). Right now we start with
383 * the lowest possible (64Bytes) and if we get tx underrun we increase it using 383 * the lowest possible (64Bytes) and if we get tx underrun we increase it using
384 * the increase flag. Returns -EIO if we have have reached maximum/minimum. 384 * the increase flag. Returns -EIO if we have reached maximum/minimum.
385 * 385 *
386 * XXX: Link this with tx DMA size ? 386 * XXX: Link this with tx DMA size ?
387 * XXX: Use it to save interrupts ? 387 * XXX: Use it to save interrupts ?
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index ae316fec4a6a..39722dd73e43 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -661,7 +661,7 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
661 * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC 661 * (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC
662 * steps that match with the power values we read from eeprom. On 662 * steps that match with the power values we read from eeprom. On
663 * older eeprom versions (< 3.2) these steps are equaly spaced at 663 * older eeprom versions (< 3.2) these steps are equaly spaced at
664 * 10% of the pcdac curve -until the curve reaches it's maximum- 664 * 10% of the pcdac curve -until the curve reaches its maximum-
665 * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2) 665 * (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
666 * these 11 steps are spaced in a different way. This function returns 666 * these 11 steps are spaced in a different way. This function returns
667 * the pcdac steps based on eeprom version and curve min/max so that we 667 * the pcdac steps based on eeprom version and curve min/max so that we
@@ -1113,7 +1113,7 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
1113 */ 1113 */
1114 1114
1115/* For RF2413 power calibration data doesn't start on a fixed location and 1115/* For RF2413 power calibration data doesn't start on a fixed location and
1116 * if a mode is not supported, it's section is missing -not zeroed-. 1116 * if a mode is not supported, its section is missing -not zeroed-.
1117 * So we need to calculate the starting offset for each section by using 1117 * So we need to calculate the starting offset for each section by using
1118 * these two functions */ 1118 * these two functions */
1119 1119
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 86fdb6ddfaaa..6a891c4484a0 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -137,11 +137,11 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
137 * ath5k_hw_set_ack_bitrate - set bitrate for ACKs 137 * ath5k_hw_set_ack_bitrate - set bitrate for ACKs
138 * 138 *
139 * @ah: The &struct ath5k_hw 139 * @ah: The &struct ath5k_hw
140 * @high: Flag to determine if we want to use high transmition rate 140 * @high: Flag to determine if we want to use high transmission rate
141 * for ACKs or not 141 * for ACKs or not
142 * 142 *
143 * If high flag is set, we tell hw to use a set of control rates based on 143 * If high flag is set, we tell hw to use a set of control rates based on
144 * the current transmition rate (check out control_rates array inside reset.c). 144 * the current transmission rate (check out control_rates array inside reset.c).
145 * If not hw just uses the lowest rate available for the current modulation 145 * If not hw just uses the lowest rate available for the current modulation
146 * scheme being used (1Mbit for CCK and 6Mbits for OFDM). 146 * scheme being used (1Mbit for CCK and 6Mbits for OFDM).
147 */ 147 */
@@ -308,27 +308,26 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
308} 308}
309 309
310/** 310/**
311 * ath5k_hw_set_associd - Set BSSID for association 311 * ath5k_hw_set_bssid - Set current BSSID on hw
312 * 312 *
313 * @ah: The &struct ath5k_hw 313 * @ah: The &struct ath5k_hw
314 * @bssid: BSSID
315 * @assoc_id: Assoc id
316 * 314 *
317 * Sets the BSSID which trigers the "SME Join" operation 315 * Sets the current BSSID and BSSID mask we have from the
316 * common struct into the hardware
318 */ 317 */
319void ath5k_hw_set_associd(struct ath5k_hw *ah) 318void ath5k_hw_set_bssid(struct ath5k_hw *ah)
320{ 319{
321 struct ath_common *common = ath5k_hw_common(ah); 320 struct ath_common *common = ath5k_hw_common(ah);
322 u16 tim_offset = 0; 321 u16 tim_offset = 0;
323 322
324 /* 323 /*
325 * Set simple BSSID mask on 5212 324 * Set BSSID mask on 5212
326 */ 325 */
327 if (ah->ah_version == AR5K_AR5212) 326 if (ah->ah_version == AR5K_AR5212)
328 ath_hw_setbssidmask(common); 327 ath_hw_setbssidmask(common);
329 328
330 /* 329 /*
331 * Set BSSID which triggers the "SME Join" operation 330 * Set BSSID
332 */ 331 */
333 ath5k_hw_reg_write(ah, 332 ath5k_hw_reg_write(ah,
334 get_unaligned_le32(common->curbssid), 333 get_unaligned_le32(common->curbssid),
@@ -641,200 +640,6 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
641 640
642} 641}
643 642
644
645/*********************\
646* Key table functions *
647\*********************/
648
649/*
650 * Reset a key entry on the table
651 */
652int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
653{
654 unsigned int i, type;
655 u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
656
657 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
658
659 type = ath5k_hw_reg_read(ah, AR5K_KEYTABLE_TYPE(entry));
660
661 for (i = 0; i < AR5K_KEYCACHE_SIZE; i++)
662 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i));
663
664 /* Reset associated MIC entry if TKIP
665 * is enabled located at offset (entry + 64) */
666 if (type == AR5K_KEYTABLE_TYPE_TKIP) {
667 AR5K_ASSERT_ENTRY(micentry, AR5K_KEYTABLE_SIZE);
668 for (i = 0; i < AR5K_KEYCACHE_SIZE / 2 ; i++)
669 ath5k_hw_reg_write(ah, 0,
670 AR5K_KEYTABLE_OFF(micentry, i));
671 }
672
673 /*
674 * Set NULL encryption on AR5212+
675 *
676 * Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5)
677 * AR5K_KEYTABLE_TYPE_NULL -> 0x00000007
678 *
679 * Note2: Windows driver (ndiswrapper) sets this to
680 * 0x00000714 instead of 0x00000007
681 */
682 if (ah->ah_version >= AR5K_AR5211) {
683 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
684 AR5K_KEYTABLE_TYPE(entry));
685
686 if (type == AR5K_KEYTABLE_TYPE_TKIP) {
687 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
688 AR5K_KEYTABLE_TYPE(micentry));
689 }
690 }
691
692 return 0;
693}
694
695static
696int ath5k_keycache_type(const struct ieee80211_key_conf *key)
697{
698 switch (key->alg) {
699 case ALG_TKIP:
700 return AR5K_KEYTABLE_TYPE_TKIP;
701 case ALG_CCMP:
702 return AR5K_KEYTABLE_TYPE_CCM;
703 case ALG_WEP:
704 if (key->keylen == WLAN_KEY_LEN_WEP40)
705 return AR5K_KEYTABLE_TYPE_40;
706 else if (key->keylen == WLAN_KEY_LEN_WEP104)
707 return AR5K_KEYTABLE_TYPE_104;
708 return -EINVAL;
709 default:
710 return -EINVAL;
711 }
712 return -EINVAL;
713}
714
715/*
716 * Set a key entry on the table
717 */
718int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
719 const struct ieee80211_key_conf *key, const u8 *mac)
720{
721 unsigned int i;
722 int keylen;
723 __le32 key_v[5] = {};
724 __le32 key0 = 0, key1 = 0;
725 __le32 *rxmic, *txmic;
726 int keytype;
727 u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
728 bool is_tkip;
729 const u8 *key_ptr;
730
731 is_tkip = (key->alg == ALG_TKIP);
732
733 /*
734 * key->keylen comes in from mac80211 in bytes.
735 * TKIP is 128 bit + 128 bit mic
736 */
737 keylen = (is_tkip) ? (128 / 8) : key->keylen;
738
739 if (entry > AR5K_KEYTABLE_SIZE ||
740 (is_tkip && micentry > AR5K_KEYTABLE_SIZE))
741 return -EOPNOTSUPP;
742
743 if (unlikely(keylen > 16))
744 return -EOPNOTSUPP;
745
746 keytype = ath5k_keycache_type(key);
747 if (keytype < 0)
748 return keytype;
749
750 /*
751 * each key block is 6 bytes wide, written as pairs of
752 * alternating 32 and 16 bit le values.
753 */
754 key_ptr = key->key;
755 for (i = 0; keylen >= 6; keylen -= 6) {
756 memcpy(&key_v[i], key_ptr, 6);
757 i += 2;
758 key_ptr += 6;
759 }
760 if (keylen)
761 memcpy(&key_v[i], key_ptr, keylen);
762
763 /* intentionally corrupt key until mic is installed */
764 if (is_tkip) {
765 key0 = key_v[0] = ~key_v[0];
766 key1 = key_v[1] = ~key_v[1];
767 }
768
769 for (i = 0; i < ARRAY_SIZE(key_v); i++)
770 ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
771 AR5K_KEYTABLE_OFF(entry, i));
772
773 ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry));
774
775 if (is_tkip) {
776 /* Install rx/tx MIC */
777 rxmic = (__le32 *) &key->key[16];
778 txmic = (__le32 *) &key->key[24];
779
780 if (ah->ah_combined_mic) {
781 key_v[0] = rxmic[0];
782 key_v[1] = cpu_to_le32(le32_to_cpu(txmic[0]) >> 16);
783 key_v[2] = rxmic[1];
784 key_v[3] = cpu_to_le32(le32_to_cpu(txmic[0]) & 0xffff);
785 key_v[4] = txmic[1];
786 } else {
787 key_v[0] = rxmic[0];
788 key_v[1] = 0;
789 key_v[2] = rxmic[1];
790 key_v[3] = 0;
791 key_v[4] = 0;
792 }
793 for (i = 0; i < ARRAY_SIZE(key_v); i++)
794 ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
795 AR5K_KEYTABLE_OFF(micentry, i));
796
797 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
798 AR5K_KEYTABLE_TYPE(micentry));
799 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC0(micentry));
800 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC1(micentry));
801
802 /* restore first 2 words of key */
803 ath5k_hw_reg_write(ah, le32_to_cpu(~key0),
804 AR5K_KEYTABLE_OFF(entry, 0));
805 ath5k_hw_reg_write(ah, le32_to_cpu(~key1),
806 AR5K_KEYTABLE_OFF(entry, 1));
807 }
808
809 return ath5k_hw_set_key_lladdr(ah, entry, mac);
810}
811
812int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
813{
814 u32 low_id, high_id;
815
816 /* Invalid entry (key table overflow) */
817 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
818
819 /*
820 * MAC may be NULL if it's a broadcast key. In this case no need to
821 * to compute get_unaligned_le32 and get_unaligned_le16 as we
822 * already know it.
823 */
824 if (!mac) {
825 low_id = 0xffffffff;
826 high_id = 0xffff | AR5K_KEYTABLE_VALID;
827 } else {
828 low_id = get_unaligned_le32(mac);
829 high_id = get_unaligned_le16(mac + 4) | AR5K_KEYTABLE_VALID;
830 }
831
832 ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry));
833 ath5k_hw_reg_write(ah, high_id, AR5K_KEYTABLE_MAC1(entry));
834
835 return 0;
836}
837
838/** 643/**
839 * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class 644 * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
840 * 645 *
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 6284c389ba18..4932bf2f35eb 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -115,7 +115,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
115\**********************/ 115\**********************/
116 116
117/* 117/*
118 * This code is used to optimize rf gain on different environments 118 * This code is used to optimize RF gain on different environments
119 * (temperature mostly) based on feedback from a power detector. 119 * (temperature mostly) based on feedback from a power detector.
120 * 120 *
121 * It's only used on RF5111 and RF5112, later RF chips seem to have 121 * It's only used on RF5111 and RF5112, later RF chips seem to have
@@ -302,7 +302,7 @@ static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
302} 302}
303 303
304/* Perform gain_F adjustment by choosing the right set 304/* Perform gain_F adjustment by choosing the right set
305 * of parameters from rf gain optimization ladder */ 305 * of parameters from RF gain optimization ladder */
306static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah) 306static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
307{ 307{
308 const struct ath5k_gain_opt *go; 308 const struct ath5k_gain_opt *go;
@@ -367,7 +367,7 @@ done:
367 return ret; 367 return ret;
368} 368}
369 369
370/* Main callback for thermal rf gain calibration engine 370/* Main callback for thermal RF gain calibration engine
371 * Check for a new gain reading and schedule an adjustment 371 * Check for a new gain reading and schedule an adjustment
372 * if needed. 372 * if needed.
373 * 373 *
@@ -433,7 +433,7 @@ done:
433 return ah->ah_gain.g_state; 433 return ah->ah_gain.g_state;
434} 434}
435 435
436/* Write initial rf gain table to set the RF sensitivity 436/* Write initial RF gain table to set the RF sensitivity
437 * this one works on all RF chips and has nothing to do 437 * this one works on all RF chips and has nothing to do
438 * with gain_F calibration */ 438 * with gain_F calibration */
439int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq) 439int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq)
@@ -496,7 +496,7 @@ int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq)
496 496
497 497
498/* 498/*
499 * Setup RF registers by writing rf buffer on hw 499 * Setup RF registers by writing RF buffer on hw
500 */ 500 */
501int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, 501int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
502 unsigned int mode) 502 unsigned int mode)
@@ -571,7 +571,7 @@ int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
571 return -EINVAL; 571 return -EINVAL;
572 } 572 }
573 573
574 /* If it's the first time we set rf buffer, allocate 574 /* If it's the first time we set RF buffer, allocate
575 * ah->ah_rf_banks based on ah->ah_rf_banks_size 575 * ah->ah_rf_banks based on ah->ah_rf_banks_size
576 * we set above */ 576 * we set above */
577 if (ah->ah_rf_banks == NULL) { 577 if (ah->ah_rf_banks == NULL) {
@@ -1377,7 +1377,7 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
1377 1377
1378 /* protect against divide by 0 and loss of sign bits */ 1378 /* protect against divide by 0 and loss of sign bits */
1379 if (i_coffd == 0 || q_coffd < 2) 1379 if (i_coffd == 0 || q_coffd < 2)
1380 return -1; 1380 return 0;
1381 1381
1382 i_coff = (-iq_corr) / i_coffd; 1382 i_coff = (-iq_corr) / i_coffd;
1383 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */ 1383 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1582,7 +1582,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1582 else if (curr_sym_off >= 31 && curr_sym_off <= 46) 1582 else if (curr_sym_off >= 31 && curr_sym_off <= 46)
1583 mag_mask[2] |= 1583 mag_mask[2] |=
1584 plt_mag_map << (curr_sym_off - 31) * 2; 1584 plt_mag_map << (curr_sym_off - 31) * 2;
1585 else if (curr_sym_off >= 46 && curr_sym_off <= 53) 1585 else if (curr_sym_off >= 47 && curr_sym_off <= 53)
1586 mag_mask[3] |= 1586 mag_mask[3] |=
1587 plt_mag_map << (curr_sym_off - 47) * 2; 1587 plt_mag_map << (curr_sym_off - 47) * 2;
1588 1588
@@ -2987,7 +2987,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
2987 2987
2988 2988
2989/* 2989/*
2990 * Set transmition power 2990 * Set transmission power
2991 */ 2991 */
2992int 2992int
2993ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, 2993ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
@@ -3035,9 +3035,6 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3035 /* Limit max power if we have a CTL available */ 3035 /* Limit max power if we have a CTL available */
3036 ath5k_get_max_ctl_power(ah, channel); 3036 ath5k_get_max_ctl_power(ah, channel);
3037 3037
3038 /* FIXME: Tx power limit for this regdomain
3039 * XXX: Mac80211/CRDA will do that anyway ? */
3040
3041 /* FIXME: Antenna reduction stuff */ 3038 /* FIXME: Antenna reduction stuff */
3042 3039
3043 /* FIXME: Limit power on turbo modes */ 3040 /* FIXME: Limit power on turbo modes */
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 4186ff4c6e9c..84c717ded1c5 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -36,24 +36,58 @@ int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
36} 36}
37 37
38/* 38/*
39 * Make sure cw is a power of 2 minus 1 and smaller than 1024
40 */
41static u16 ath5k_cw_validate(u16 cw_req)
42{
43 u32 cw = 1;
44 cw_req = min(cw_req, (u16)1023);
45
46 while (cw < cw_req)
47 cw = (cw << 1) | 1;
48
49 return cw;
50}
51
52/*
39 * Set properties for a transmit queue 53 * Set properties for a transmit queue
40 */ 54 */
41int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, 55int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
42 const struct ath5k_txq_info *queue_info) 56 const struct ath5k_txq_info *qinfo)
43{ 57{
58 struct ath5k_txq_info *qi;
59
44 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 60 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
45 61
46 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 62 qi = &ah->ah_txq[queue];
63
64 if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
47 return -EIO; 65 return -EIO;
48 66
49 memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info)); 67 /* copy and validate values */
68 qi->tqi_type = qinfo->tqi_type;
69 qi->tqi_subtype = qinfo->tqi_subtype;
70 qi->tqi_flags = qinfo->tqi_flags;
71 /*
72 * According to the docs: Although the AIFS field is 8 bit wide,
73 * the maximum supported value is 0xFC. Setting it higher than that
74 * will cause the DCU to hang.
75 */
76 qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
77 qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
78 qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
79 qi->tqi_cbr_period = qinfo->tqi_cbr_period;
80 qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
81 qi->tqi_burst_time = qinfo->tqi_burst_time;
82 qi->tqi_ready_time = qinfo->tqi_ready_time;
50 83
51 /*XXX: Is this supported on 5210 ?*/ 84 /*XXX: Is this supported on 5210 ?*/
52 if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA && 85 /*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
53 ((queue_info->tqi_subtype == AR5K_WME_AC_VI) || 86 if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
54 (queue_info->tqi_subtype == AR5K_WME_AC_VO))) || 87 ((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
55 queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD) 88 (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
56 ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS; 89 qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
90 qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
57 91
58 return 0; 92 return 0;
59} 93}
@@ -186,7 +220,7 @@ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
186 */ 220 */
187int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) 221int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
188{ 222{
189 u32 cw_min, cw_max, retry_lg, retry_sh; 223 u32 retry_lg, retry_sh;
190 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; 224 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
191 225
192 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 226 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
@@ -217,14 +251,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
217 /* Set IFS0 */ 251 /* Set IFS0 */
218 if (ah->ah_turbo) { 252 if (ah->ah_turbo) {
219 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO + 253 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
220 (ah->ah_aifs + tq->tqi_aifs) * 254 tq->tqi_aifs * AR5K_INIT_SLOT_TIME_TURBO) <<
221 AR5K_INIT_SLOT_TIME_TURBO) <<
222 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO, 255 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
223 AR5K_IFS0); 256 AR5K_IFS0);
224 } else { 257 } else {
225 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS + 258 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
226 (ah->ah_aifs + tq->tqi_aifs) * 259 tq->tqi_aifs * AR5K_INIT_SLOT_TIME) <<
227 AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) | 260 AR5K_IFS0_DIFS_S) |
228 AR5K_INIT_SIFS, AR5K_IFS0); 261 AR5K_INIT_SIFS, AR5K_IFS0);
229 } 262 }
230 263
@@ -248,35 +281,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
248 } 281 }
249 282
250 /* 283 /*
251 * Calculate cwmin/max by channel mode
252 */
253 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
254 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
255 ah->ah_aifs = AR5K_TUNE_AIFS;
256 /*XR is only supported on 5212*/
257 if (IS_CHAN_XR(ah->ah_current_channel) &&
258 ah->ah_version == AR5K_AR5212) {
259 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
260 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
261 ah->ah_aifs = AR5K_TUNE_AIFS_XR;
262 /*B mode is not supported on 5210*/
263 } else if (IS_CHAN_B(ah->ah_current_channel) &&
264 ah->ah_version != AR5K_AR5210) {
265 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
266 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
267 ah->ah_aifs = AR5K_TUNE_AIFS_11B;
268 }
269
270 cw_min = 1;
271 while (cw_min < ah->ah_cw_min)
272 cw_min = (cw_min << 1) | 1;
273
274 cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
275 ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
276 cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
277 ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
278
279 /*
280 * Calculate and set retry limits 284 * Calculate and set retry limits
281 */ 285 */
282 if (ah->ah_software_retry) { 286 if (ah->ah_software_retry) {
@@ -292,7 +296,7 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
292 /*No QCU/DCU [5210]*/ 296 /*No QCU/DCU [5210]*/
293 if (ah->ah_version == AR5K_AR5210) { 297 if (ah->ah_version == AR5K_AR5210) {
294 ath5k_hw_reg_write(ah, 298 ath5k_hw_reg_write(ah,
295 (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S) 299 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
296 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY, 300 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
297 AR5K_NODCU_RETRY_LMT_SLG_RETRY) 301 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
298 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY, 302 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
@@ -314,14 +318,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
314 /*===Rest is also for QCU/DCU only [5211+]===*/ 318 /*===Rest is also for QCU/DCU only [5211+]===*/
315 319
316 /* 320 /*
317 * Set initial content window (cw_min/cw_max) 321 * Set contention window (cw_min/cw_max)
318 * and arbitrated interframe space (aifs)... 322 * and arbitrated interframe space (aifs)...
319 */ 323 */
320 ath5k_hw_reg_write(ah, 324 ath5k_hw_reg_write(ah,
321 AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) | 325 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
322 AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) | 326 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
323 AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs, 327 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
324 AR5K_DCU_LCL_IFS_AIFS),
325 AR5K_QUEUE_DFS_LOCAL_IFS(queue)); 328 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
326 329
327 /* 330 /*
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 55b4ac6d236f..67d63081705a 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -1822,50 +1822,8 @@
1822 1822
1823/*===5212 end===*/ 1823/*===5212 end===*/
1824 1824
1825/*
1826 * Key table (WEP) register
1827 */
1828#define AR5K_KEYTABLE_0_5210 0x9000
1829#define AR5K_KEYTABLE_0_5211 0x8800
1830#define AR5K_KEYTABLE_5210(_n) (AR5K_KEYTABLE_0_5210 + ((_n) << 5))
1831#define AR5K_KEYTABLE_5211(_n) (AR5K_KEYTABLE_0_5211 + ((_n) << 5))
1832#define AR5K_KEYTABLE(_n) (ah->ah_version == AR5K_AR5210 ? \
1833 AR5K_KEYTABLE_5210(_n) : AR5K_KEYTABLE_5211(_n))
1834#define AR5K_KEYTABLE_OFF(_n, x) (AR5K_KEYTABLE(_n) + (x << 2))
1835#define AR5K_KEYTABLE_TYPE(_n) AR5K_KEYTABLE_OFF(_n, 5)
1836#define AR5K_KEYTABLE_TYPE_40 0x00000000
1837#define AR5K_KEYTABLE_TYPE_104 0x00000001
1838#define AR5K_KEYTABLE_TYPE_128 0x00000003
1839#define AR5K_KEYTABLE_TYPE_TKIP 0x00000004 /* [5212+] */
1840#define AR5K_KEYTABLE_TYPE_AES 0x00000005 /* [5211+] */
1841#define AR5K_KEYTABLE_TYPE_CCM 0x00000006 /* [5212+] */
1842#define AR5K_KEYTABLE_TYPE_NULL 0x00000007 /* [5211+] */
1843#define AR5K_KEYTABLE_ANTENNA 0x00000008 /* [5212+] */
1844#define AR5K_KEYTABLE_MAC0(_n) AR5K_KEYTABLE_OFF(_n, 6)
1845#define AR5K_KEYTABLE_MAC1(_n) AR5K_KEYTABLE_OFF(_n, 7)
1846#define AR5K_KEYTABLE_VALID 0x00008000
1847
1848/* If key type is TKIP and MIC is enabled
1849 * MIC key goes in offset entry + 64 */
1850#define AR5K_KEYTABLE_MIC_OFFSET 64
1851
1852/* WEP 40-bit = 40-bit entered key + 24 bit IV = 64-bit
1853 * WEP 104-bit = 104-bit entered key + 24-bit IV = 128-bit
1854 * WEP 128-bit = 128-bit entered key + 24 bit IV = 152-bit
1855 *
1856 * Some vendors have introduced bigger WEP keys to address
1857 * security vulnerabilities in WEP. This includes:
1858 *
1859 * WEP 232-bit = 232-bit entered key + 24 bit IV = 256-bit
1860 *
1861 * We can expand this if we find ar5k Atheros cards with a larger
1862 * key table size.
1863 */
1864#define AR5K_KEYTABLE_SIZE_5210 64 1825#define AR5K_KEYTABLE_SIZE_5210 64
1865#define AR5K_KEYTABLE_SIZE_5211 128 1826#define AR5K_KEYTABLE_SIZE_5211 128
1866#define AR5K_KEYTABLE_SIZE (ah->ah_version == AR5K_AR5210 ? \
1867 AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211)
1868
1869 1827
1870/*===PHY REGISTERS===*/ 1828/*===PHY REGISTERS===*/
1871 1829
@@ -1911,7 +1869,7 @@
1911#define AR5K_PHY_TURBO 0x9804 /* Register Address */ 1869#define AR5K_PHY_TURBO 0x9804 /* Register Address */
1912#define AR5K_PHY_TURBO_MODE 0x00000001 /* Enable turbo mode */ 1870#define AR5K_PHY_TURBO_MODE 0x00000001 /* Enable turbo mode */
1913#define AR5K_PHY_TURBO_SHORT 0x00000002 /* Set short symbols to turbo mode */ 1871#define AR5K_PHY_TURBO_SHORT 0x00000002 /* Set short symbols to turbo mode */
1914#define AR5K_PHY_TURBO_MIMO 0x00000004 /* Set turbo for mimo mimo */ 1872#define AR5K_PHY_TURBO_MIMO 0x00000004 /* Set turbo for mimo */
1915 1873
1916/* 1874/*
1917 * PHY agility command register 1875 * PHY agility command register
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 498aa28ea9e6..58912cd762d9 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -326,7 +326,7 @@ commit:
326 * register). After this MAC and Baseband are 326 * register). After this MAC and Baseband are
327 * disabled and a full reset is needed to come 327 * disabled and a full reset is needed to come
328 * back. This way we save as much power as possible 328 * back. This way we save as much power as possible
329 * without puting the card on full sleep. 329 * without putting the card on full sleep.
330 */ 330 */
331int ath5k_hw_on_hold(struct ath5k_hw *ah) 331int ath5k_hw_on_hold(struct ath5k_hw *ah)
332{ 332{
@@ -344,7 +344,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
344 /* 344 /*
345 * Put chipset on warm reset... 345 * Put chipset on warm reset...
346 * 346 *
347 * Note: puting PCI core on warm reset on PCI-E cards 347 * Note: putting PCI core on warm reset on PCI-E cards
348 * results card to hang and always return 0xffff... so 348 * results card to hang and always return 0xffff... so
349 * we ingore that flag for PCI-E cards. On PCI cards 349 * we ingore that flag for PCI-E cards. On PCI cards
350 * this flag gets cleared after 64 PCI clocks. 350 * this flag gets cleared after 64 PCI clocks.
@@ -400,7 +400,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
400 /* 400 /*
401 * Put chipset on warm reset... 401 * Put chipset on warm reset...
402 * 402 *
403 * Note: puting PCI core on warm reset on PCI-E cards 403 * Note: putting PCI core on warm reset on PCI-E cards
404 * results card to hang and always return 0xffff... so 404 * results card to hang and always return 0xffff... so
405 * we ingore that flag for PCI-E cards. On PCI cards 405 * we ingore that flag for PCI-E cards. On PCI cards
406 * this flag gets cleared after 64 PCI clocks. 406 * this flag gets cleared after 64 PCI clocks.
@@ -959,7 +959,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
959 AR5K_QUEUE_DCU_SEQNUM(0)); 959 AR5K_QUEUE_DCU_SEQNUM(0));
960 } 960 }
961 961
962 /* TSF accelerates on AR5211 durring reset 962 /* TSF accelerates on AR5211 during reset
963 * As a workaround save it here and restore 963 * As a workaround save it here and restore
964 * it later so that it's back in time after 964 * it later so that it's back in time after
965 * reset. This way it'll get re-synced on the 965 * reset. This way it'll get re-synced on the
@@ -1080,7 +1080,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1080 return ret; 1080 return ret;
1081 1081
1082 /* Spur info is available only from EEPROM versions 1082 /* Spur info is available only from EEPROM versions
1083 * bigger than 5.3 but but the EEPOM routines will use 1083 * greater than 5.3, but the EEPROM routines will use
1084 * static values for older versions */ 1084 * static values for older versions */
1085 if (ah->ah_mac_srev >= AR5K_SREV_AR5424) 1085 if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
1086 ath5k_hw_set_spur_mitigation_filter(ah, 1086 ath5k_hw_set_spur_mitigation_filter(ah,
@@ -1160,7 +1160,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1160 */ 1160 */
1161 1161
1162 /* Restore bssid and bssid mask */ 1162 /* Restore bssid and bssid mask */
1163 ath5k_hw_set_associd(ah); 1163 ath5k_hw_set_bssid(ah);
1164 1164
1165 /* Set PCU config */ 1165 /* Set PCU config */
1166 ath5k_hw_set_opmode(ah, op_mode); 1166 ath5k_hw_set_opmode(ah, op_mode);
@@ -1173,11 +1173,11 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1173 /* Set RSSI/BRSSI thresholds 1173 /* Set RSSI/BRSSI thresholds
1174 * 1174 *
1175 * Note: If we decide to set this value 1175 * Note: If we decide to set this value
1176 * dynamicaly, have in mind that when AR5K_RSSI_THR 1176 * dynamically, keep in mind that when AR5K_RSSI_THR
1177 * register is read it might return 0x40 if we haven't 1177 * register is read, it might return 0x40 if we haven't
1178 * wrote anything to it plus BMISS RSSI threshold is zeroed. 1178 * written anything to it. Also, BMISS RSSI threshold is zeroed.
1179 * So doing a save/restore procedure here isn't the right 1179 * So doing a save/restore procedure here isn't the right
1180 * choice. Instead store it on ath5k_hw */ 1180 * choice. Instead, store it in ath5k_hw */
1181 ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES | 1181 ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES |
1182 AR5K_TUNE_BMISS_THRES << 1182 AR5K_TUNE_BMISS_THRES <<
1183 AR5K_RSSI_THR_BMISS_S), 1183 AR5K_RSSI_THR_BMISS_S),
@@ -1235,7 +1235,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1235 1235
1236 /* 1236 /*
1237 * Perform ADC test to see if baseband is ready 1237 * Perform ADC test to see if baseband is ready
1238 * Set tx hold and check adc test register 1238 * Set TX hold and check ADC test register
1239 */ 1239 */
1240 phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); 1240 phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
1241 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); 1241 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
@@ -1254,15 +1254,15 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1254 * 1254 *
1255 * This method is used to calibrate some static offsets 1255 * This method is used to calibrate some static offsets
1256 * used together with on-the fly I/Q calibration (the 1256 * used together with on-the fly I/Q calibration (the
1257 * one performed via ath5k_hw_phy_calibrate), that doesn't 1257 * one performed via ath5k_hw_phy_calibrate), which doesn't
1258 * interrupt rx path. 1258 * interrupt rx path.
1259 * 1259 *
1260 * While rx path is re-routed to the power detector we also 1260 * While rx path is re-routed to the power detector we also
1261 * start a noise floor calibration, to measure the 1261 * start a noise floor calibration to measure the
1262 * card's noise floor (the noise we measure when we are not 1262 * card's noise floor (the noise we measure when we are not
1263 * transmiting or receiving anything). 1263 * transmitting or receiving anything).
1264 * 1264 *
1265 * If we are in a noisy environment AGC calibration may time 1265 * If we are in a noisy environment, AGC calibration may time
1266 * out and/or noise floor calibration might timeout. 1266 * out and/or noise floor calibration might timeout.
1267 */ 1267 */
1268 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 1268 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index e50baff66175..3ac4cff4239d 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -25,10 +25,10 @@
25 * 25 *
26 * We don't write on those registers directly but 26 * We don't write on those registers directly but
27 * we send a data packet on the chip, using a special register, 27 * we send a data packet on the chip, using a special register,
28 * that holds all the settings we need. After we 've sent the 28 * that holds all the settings we need. After we've sent the
29 * data packet, we write on another special register to notify hw 29 * data packet, we write on another special register to notify hw
30 * to apply the settings. This is done so that control registers 30 * to apply the settings. This is done so that control registers
31 * can be dynamicaly programmed during operation and the settings 31 * can be dynamically programmed during operation and the settings
32 * are applied faster on the hw. 32 * are applied faster on the hw.
33 * 33 *
34 * We call each data packet an "RF Bank" and all the data we write 34 * We call each data packet an "RF Bank" and all the data we write
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 35f23bdc442f..ad57a6d23110 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -32,6 +32,14 @@ config ATH9K_DEBUGFS
32 32
33 Also required for changing debug message flags at run time. 33 Also required for changing debug message flags at run time.
34 34
35config ATH9K_RATE_CONTROL
36 bool "Atheros ath9k rate control"
37 depends on ATH9K
38 default y
39 ---help---
40 Say Y, if you want to use the ath9k specific rate control
41 module instead of minstrel_ht.
42
35config ATH9K_HTC 43config ATH9K_HTC
36 tristate "Atheros HTC based wireless cards support" 44 tristate "Atheros HTC based wireless cards support"
37 depends on USB && MAC80211 45 depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 973ae4f49f35..aca01621c205 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -5,8 +5,8 @@ ath9k-y += beacon.o \
5 recv.o \ 5 recv.o \
6 xmit.o \ 6 xmit.o \
7 virtual.o \ 7 virtual.o \
8 rc.o
9 8
9ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
10ath9k-$(CONFIG_PCI) += pci.o 10ath9k-$(CONFIG_PCI) += pci.o
11ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o 11ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o
12ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o 12ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
@@ -46,6 +46,7 @@ ath9k_htc-y += htc_hst.o \
46 htc_drv_txrx.o \ 46 htc_drv_txrx.o \
47 htc_drv_main.o \ 47 htc_drv_main.o \
48 htc_drv_beacon.o \ 48 htc_drv_beacon.o \
49 htc_drv_init.o 49 htc_drv_init.o \
50 htc_drv_gpio.o
50 51
51obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o 52obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index cc648b6ae31c..0496f965314f 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -14,6 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/kernel.h>
17#include "hw.h" 18#include "hw.h"
18#include "hw-ops.h" 19#include "hw-ops.h"
19 20
@@ -48,7 +49,7 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = {
48 { 7, 8, 0 } /* lvl 9 */ 49 { 7, 8, 0 } /* lvl 9 */
49}; 50};
50#define ATH9K_ANI_OFDM_NUM_LEVEL \ 51#define ATH9K_ANI_OFDM_NUM_LEVEL \
51 (sizeof(ofdm_level_table)/sizeof(ofdm_level_table[0])) 52 ARRAY_SIZE(ofdm_level_table)
52#define ATH9K_ANI_OFDM_MAX_LEVEL \ 53#define ATH9K_ANI_OFDM_MAX_LEVEL \
53 (ATH9K_ANI_OFDM_NUM_LEVEL-1) 54 (ATH9K_ANI_OFDM_NUM_LEVEL-1)
54#define ATH9K_ANI_OFDM_DEF_LEVEL \ 55#define ATH9K_ANI_OFDM_DEF_LEVEL \
@@ -94,7 +95,7 @@ static const struct ani_cck_level_entry cck_level_table[] = {
94}; 95};
95 96
96#define ATH9K_ANI_CCK_NUM_LEVEL \ 97#define ATH9K_ANI_CCK_NUM_LEVEL \
97 (sizeof(cck_level_table)/sizeof(cck_level_table[0])) 98 ARRAY_SIZE(cck_level_table)
98#define ATH9K_ANI_CCK_MAX_LEVEL \ 99#define ATH9K_ANI_CCK_MAX_LEVEL \
99 (ATH9K_ANI_CCK_NUM_LEVEL-1) 100 (ATH9K_ANI_CCK_NUM_LEVEL-1)
100#define ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI \ 101#define ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI \
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 303c63da5ea3..94392daebaa0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -580,3 +580,53 @@ void ar9002_hw_attach_ops(struct ath_hw *ah)
580 else 580 else
581 ath9k_hw_attach_ani_ops_old(ah); 581 ath9k_hw_attach_ani_ops_old(ah);
582} 582}
583
584void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
585{
586 u32 modesIndex;
587 int i;
588
589 switch (chan->chanmode) {
590 case CHANNEL_A:
591 case CHANNEL_A_HT20:
592 modesIndex = 1;
593 break;
594 case CHANNEL_A_HT40PLUS:
595 case CHANNEL_A_HT40MINUS:
596 modesIndex = 2;
597 break;
598 case CHANNEL_G:
599 case CHANNEL_G_HT20:
600 case CHANNEL_B:
601 modesIndex = 4;
602 break;
603 case CHANNEL_G_HT40PLUS:
604 case CHANNEL_G_HT40MINUS:
605 modesIndex = 3;
606 break;
607
608 default:
609 return;
610 }
611
612 ENABLE_REGWRITE_BUFFER(ah);
613
614 for (i = 0; i < ah->iniModes_9271_ANI_reg.ia_rows; i++) {
615 u32 reg = INI_RA(&ah->iniModes_9271_ANI_reg, i, 0);
616 u32 val = INI_RA(&ah->iniModes_9271_ANI_reg, i, modesIndex);
617 u32 val_orig;
618
619 if (reg == AR_PHY_CCK_DETECT) {
620 val_orig = REG_READ(ah, reg);
621 val &= AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK;
622 val_orig &= ~AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK;
623
624 REG_WRITE(ah, reg, val|val_orig);
625 } else
626 REG_WRITE(ah, reg, val);
627 }
628
629 REGWRITE_BUFFER_FLUSH(ah);
630 DISABLE_REGWRITE_BUFFER(ah);
631
632}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index adbf031fbc5a..cd56c8692705 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -530,3 +530,38 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
530 530
531 ar9002_hw_set_nf_limits(ah); 531 ar9002_hw_set_nf_limits(ah);
532} 532}
533
534void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah,
535 struct ath_hw_antcomb_conf *antconf)
536{
537 u32 regval;
538
539 regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
540 antconf->main_lna_conf = (regval & AR_PHY_9285_ANT_DIV_MAIN_LNACONF) >>
541 AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S;
542 antconf->alt_lna_conf = (regval & AR_PHY_9285_ANT_DIV_ALT_LNACONF) >>
543 AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
544 antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
545 AR_PHY_9285_FAST_DIV_BIAS_S;
546}
547EXPORT_SYMBOL(ath9k_hw_antdiv_comb_conf_get);
548
549void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
550 struct ath_hw_antcomb_conf *antconf)
551{
552 u32 regval;
553
554 regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
555 regval &= ~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF |
556 AR_PHY_9285_ANT_DIV_ALT_LNACONF |
557 AR_PHY_9285_FAST_DIV_BIAS);
558 regval |= ((antconf->main_lna_conf << AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S)
559 & AR_PHY_9285_ANT_DIV_MAIN_LNACONF);
560 regval |= ((antconf->alt_lna_conf << AR_PHY_9285_ANT_DIV_ALT_LNACONF_S)
561 & AR_PHY_9285_ANT_DIV_ALT_LNACONF);
562 regval |= ((antconf->fast_div_bias << AR_PHY_9285_FAST_DIV_BIAS_S)
563 & AR_PHY_9285_FAST_DIV_BIAS);
564
565 REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
566}
567EXPORT_SYMBOL(ath9k_hw_antdiv_comb_conf_set);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index c5151a4dd10b..37663dbbcf57 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -302,6 +302,8 @@
302#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000 302#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
303 303
304#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac 304#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
305#define AR_PHY_9285_FAST_DIV_BIAS 0x00007E00
306#define AR_PHY_9285_FAST_DIV_BIAS_S 9
305#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000 307#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
306#define AR_PHY_9285_ANT_DIV_CTL 0x01000000 308#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
307#define AR_PHY_9285_ANT_DIV_CTL_S 24 309#define AR_PHY_9285_ANT_DIV_CTL_S 24
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 057fb69ddf7f..c4182359bee4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -968,7 +968,7 @@ static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
968} 968}
969 969
970static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah, 970static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
971 enum ieee80211_band freq_band) 971 enum ath9k_hal_freq_band freq_band)
972{ 972{
973 return 1; 973 return 1;
974} 974}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 5b995bee70ae..3b424ca1ba84 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -185,7 +185,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
185 ath_print(common, ATH_DBG_INTERRUPT, 185 ath_print(common, ATH_DBG_INTERRUPT,
186 "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); 186 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
187 187
188 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); 188 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
189 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR); 189 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
190 190
191 } 191 }
@@ -616,7 +616,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
616 rxs->rs_status |= ATH9K_RXERR_DECRYPT; 616 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
617 } else if (rxsp->status11 & AR_MichaelErr) { 617 } else if (rxsp->status11 & AR_MichaelErr) {
618 rxs->rs_status |= ATH9K_RXERR_MIC; 618 rxs->rs_status |= ATH9K_RXERR_MIC;
619 } 619 } else if (rxsp->status11 & AR_KeyMiss)
620 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
620 } 621 }
621 622
622 return 0; 623 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 07f26ee7a723..9f8e542ef47e 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -254,7 +254,7 @@ struct ath_atx_tid {
254 struct list_head buf_q; 254 struct list_head buf_q;
255 struct ath_node *an; 255 struct ath_node *an;
256 struct ath_atx_ac *ac; 256 struct ath_atx_ac *ac;
257 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; 257 unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
258 u16 seq_start; 258 u16 seq_start;
259 u16 seq_next; 259 u16 seq_next;
260 u16 baw_size; 260 u16 baw_size;
@@ -345,9 +345,8 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
345void ath_tx_tasklet(struct ath_softc *sc); 345void ath_tx_tasklet(struct ath_softc *sc);
346void ath_tx_edma_tasklet(struct ath_softc *sc); 346void ath_tx_edma_tasklet(struct ath_softc *sc);
347void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb); 347void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
348bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno); 348int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
349void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 349 u16 tid, u16 *ssn);
350 u16 tid, u16 *ssn);
351void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 350void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
352void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 351void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
353void ath9k_enable_ps(struct ath_softc *sc); 352void ath9k_enable_ps(struct ath_softc *sc);
@@ -423,6 +422,7 @@ int ath_beaconq_config(struct ath_softc *sc);
423#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */ 422#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
424#define ATH_ANI_POLLINTERVAL_OLD 100 /* 100 ms */ 423#define ATH_ANI_POLLINTERVAL_OLD 100 /* 100 ms */
425#define ATH_ANI_POLLINTERVAL_NEW 1000 /* 1000 ms */ 424#define ATH_ANI_POLLINTERVAL_NEW 1000 /* 1000 ms */
425#define ATH_LONG_CALINTERVAL_INT 1000 /* 1000 ms */
426#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ 426#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
427#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 427#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
428 428
@@ -436,14 +436,6 @@ void ath_ani_calibrate(unsigned long data);
436/* BTCOEX */ 436/* BTCOEX */
437/**********/ 437/**********/
438 438
439/* Defines the BT AR_BT_COEX_WGHT used */
440enum ath_stomp_type {
441 ATH_BTCOEX_NO_STOMP,
442 ATH_BTCOEX_STOMP_ALL,
443 ATH_BTCOEX_STOMP_LOW,
444 ATH_BTCOEX_STOMP_NONE
445};
446
447struct ath_btcoex { 439struct ath_btcoex {
448 bool hw_timer_enabled; 440 bool hw_timer_enabled;
449 spinlock_t btcoex_lock; 441 spinlock_t btcoex_lock;
@@ -488,6 +480,60 @@ struct ath_led {
488void ath_init_leds(struct ath_softc *sc); 480void ath_init_leds(struct ath_softc *sc);
489void ath_deinit_leds(struct ath_softc *sc); 481void ath_deinit_leds(struct ath_softc *sc);
490 482
483/* Antenna diversity/combining */
484#define ATH_ANT_RX_CURRENT_SHIFT 4
485#define ATH_ANT_RX_MAIN_SHIFT 2
486#define ATH_ANT_RX_MASK 0x3
487
488#define ATH_ANT_DIV_COMB_SHORT_SCAN_INTR 50
489#define ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT 0x100
490#define ATH_ANT_DIV_COMB_MAX_PKTCOUNT 0x200
491#define ATH_ANT_DIV_COMB_INIT_COUNT 95
492#define ATH_ANT_DIV_COMB_MAX_COUNT 100
493#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30
494#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20
495
496#define ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA -3
497#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
498#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
499#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
500#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
501
502enum ath9k_ant_div_comb_lna_conf {
503 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
504 ATH_ANT_DIV_COMB_LNA2,
505 ATH_ANT_DIV_COMB_LNA1,
506 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
507};
508
509struct ath_ant_comb {
510 u16 count;
511 u16 total_pkt_count;
512 bool scan;
513 bool scan_not_start;
514 int main_total_rssi;
515 int alt_total_rssi;
516 int alt_recv_cnt;
517 int main_recv_cnt;
518 int rssi_lna1;
519 int rssi_lna2;
520 int rssi_add;
521 int rssi_sub;
522 int rssi_first;
523 int rssi_second;
524 int rssi_third;
525 bool alt_good;
526 int quick_scan_cnt;
527 int main_conf;
528 enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
529 enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
530 int first_bias;
531 int second_bias;
532 bool first_ratio;
533 bool second_ratio;
534 unsigned long scan_start_time;
535};
536
491/********************/ 537/********************/
492/* Main driver core */ 538/* Main driver core */
493/********************/ 539/********************/
@@ -516,7 +562,6 @@ void ath_deinit_leds(struct ath_softc *sc);
516#define SC_OP_RXFLUSH BIT(7) 562#define SC_OP_RXFLUSH BIT(7)
517#define SC_OP_LED_ASSOCIATED BIT(8) 563#define SC_OP_LED_ASSOCIATED BIT(8)
518#define SC_OP_LED_ON BIT(9) 564#define SC_OP_LED_ON BIT(9)
519#define SC_OP_SCANNING BIT(10)
520#define SC_OP_TSF_RESET BIT(11) 565#define SC_OP_TSF_RESET BIT(11)
521#define SC_OP_BT_PRIORITY_DETECTED BIT(12) 566#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
522#define SC_OP_BT_SCAN BIT(13) 567#define SC_OP_BT_SCAN BIT(13)
@@ -604,6 +649,8 @@ struct ath_softc {
604 struct ath_btcoex btcoex; 649 struct ath_btcoex btcoex;
605 650
606 struct ath_descdma txsdma; 651 struct ath_descdma txsdma;
652
653 struct ath_ant_comb ant_comb;
607}; 654};
608 655
609struct ath_wiphy { 656struct ath_wiphy {
@@ -670,7 +717,7 @@ static inline void ath_ahb_exit(void) {};
670void ath9k_ps_wakeup(struct ath_softc *sc); 717void ath9k_ps_wakeup(struct ath_softc *sc);
671void ath9k_ps_restore(struct ath_softc *sc); 718void ath9k_ps_restore(struct ath_softc *sc);
672 719
673void ath9k_set_bssid_mask(struct ieee80211_hw *hw); 720void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
674int ath9k_wiphy_add(struct ath_softc *sc); 721int ath9k_wiphy_add(struct ath_softc *sc);
675int ath9k_wiphy_del(struct ath_wiphy *aphy); 722int ath9k_wiphy_del(struct ath_wiphy *aphy);
676void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb); 723void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 4d4b22d52dfd..081192e78a46 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -359,11 +359,12 @@ void ath_beacon_tasklet(unsigned long data)
359 sc->beacon.bmisscnt++; 359 sc->beacon.bmisscnt++;
360 360
361 if (sc->beacon.bmisscnt < BSTUCK_THRESH) { 361 if (sc->beacon.bmisscnt < BSTUCK_THRESH) {
362 ath_print(common, ATH_DBG_BEACON, 362 ath_print(common, ATH_DBG_BSTUCK,
363 "missed %u consecutive beacons\n", 363 "missed %u consecutive beacons\n",
364 sc->beacon.bmisscnt); 364 sc->beacon.bmisscnt);
365 ath9k_hw_bstuck_nfcal(ah);
365 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 366 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
366 ath_print(common, ATH_DBG_BEACON, 367 ath_print(common, ATH_DBG_BSTUCK,
367 "beacon is officially stuck\n"); 368 "beacon is officially stuck\n");
368 sc->sc_flags |= SC_OP_TSF_RESET; 369 sc->sc_flags |= SC_OP_TSF_RESET;
369 ath_reset(sc, false); 370 ath_reset(sc, false);
@@ -373,7 +374,7 @@ void ath_beacon_tasklet(unsigned long data)
373 } 374 }
374 375
375 if (sc->beacon.bmisscnt != 0) { 376 if (sc->beacon.bmisscnt != 0) {
376 ath_print(common, ATH_DBG_BEACON, 377 ath_print(common, ATH_DBG_BSTUCK,
377 "resume beacon xmit after %u misses\n", 378 "resume beacon xmit after %u misses\n",
378 sc->beacon.bmisscnt); 379 sc->beacon.bmisscnt);
379 sc->beacon.bmisscnt = 0; 380 sc->beacon.bmisscnt = 0;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index fb4ac15f3b93..6a92e57fddf0 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -168,6 +168,7 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
168static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah) 168static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
169{ 169{
170 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 170 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
171 u32 val;
171 172
172 /* 173 /*
173 * Program coex mode and weight registers to 174 * Program coex mode and weight registers to
@@ -177,6 +178,12 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
177 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights); 178 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
178 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2); 179 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
179 180
181 if (AR_SREV_9271(ah)) {
182 val = REG_READ(ah, 0x50040);
183 val &= 0xFFFFFEFF;
184 REG_WRITE(ah, 0x50040, val);
185 }
186
180 REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1); 187 REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
181 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); 188 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
182 189
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 45208690c0ec..67ee5d735cc1 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -19,8 +19,7 @@
19 19
20/* Common calibration code */ 20/* Common calibration code */
21 21
22/* We can tune this as we go by monitoring really low values */ 22#define ATH9K_NF_TOO_HIGH -60
23#define ATH9K_NF_TOO_LOW -60
24 23
25static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) 24static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
26{ 25{
@@ -45,11 +44,39 @@ static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
45 return nfval; 44 return nfval;
46} 45}
47 46
48static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h, 47static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
48 struct ath9k_channel *chan)
49{
50 struct ath_nf_limits *limit;
51
52 if (!chan || IS_CHAN_2GHZ(chan))
53 limit = &ah->nf_2g;
54 else
55 limit = &ah->nf_5g;
56
57 return limit;
58}
59
60static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
61 struct ath9k_channel *chan)
62{
63 return ath9k_hw_get_nf_limits(ah, chan)->nominal;
64}
65
66
67static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
68 struct ath9k_hw_cal_data *cal,
49 int16_t *nfarray) 69 int16_t *nfarray)
50{ 70{
71 struct ath_common *common = ath9k_hw_common(ah);
72 struct ath_nf_limits *limit;
73 struct ath9k_nfcal_hist *h;
74 bool high_nf_mid = false;
51 int i; 75 int i;
52 76
77 h = cal->nfCalHist;
78 limit = ath9k_hw_get_nf_limits(ah, ah->curchan);
79
53 for (i = 0; i < NUM_NF_READINGS; i++) { 80 for (i = 0; i < NUM_NF_READINGS; i++) {
54 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i]; 81 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
55 82
@@ -63,7 +90,39 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
63 h[i].privNF = 90 h[i].privNF =
64 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer); 91 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
65 } 92 }
93
94 if (!h[i].privNF)
95 continue;
96
97 if (h[i].privNF > limit->max) {
98 high_nf_mid = true;
99
100 ath_print(common, ATH_DBG_CALIBRATE,
101 "NFmid[%d] (%d) > MAX (%d), %s\n",
102 i, h[i].privNF, limit->max,
103 (cal->nfcal_interference ?
104 "not corrected (due to interference)" :
105 "correcting to MAX"));
106
107 /*
108 * Normally we limit the average noise floor by the
109 * hardware specific maximum here. However if we have
110 * encountered stuck beacons because of interference,
111 * we bypass this limit here in order to better deal
112 * with our environment.
113 */
114 if (!cal->nfcal_interference)
115 h[i].privNF = limit->max;
116 }
66 } 117 }
118
119 /*
120 * If the noise floor seems normal for all chains, assume that
121 * there is no significant interference in the environment anymore.
122 * Re-enable the enforcement of the NF maximum again.
123 */
124 if (!high_nf_mid)
125 cal->nfcal_interference = false;
67} 126}
68 127
69static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah, 128static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
@@ -104,19 +163,6 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
104 ah->cal_samples = 0; 163 ah->cal_samples = 0;
105} 164}
106 165
107static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
108 struct ath9k_channel *chan)
109{
110 struct ath_nf_limits *limit;
111
112 if (!chan || IS_CHAN_2GHZ(chan))
113 limit = &ah->nf_2g;
114 else
115 limit = &ah->nf_5g;
116
117 return limit->nominal;
118}
119
120/* This is done for the currently configured channel */ 166/* This is done for the currently configured channel */
121bool ath9k_hw_reset_calvalid(struct ath_hw *ah) 167bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
122{ 168{
@@ -277,10 +323,10 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
277 "NF calibrated [%s] [chain %d] is %d\n", 323 "NF calibrated [%s] [chain %d] is %d\n",
278 (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]); 324 (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
279 325
280 if (nf[i] > limit->max) { 326 if (nf[i] > ATH9K_NF_TOO_HIGH) {
281 ath_print(common, ATH_DBG_CALIBRATE, 327 ath_print(common, ATH_DBG_CALIBRATE,
282 "NF[%d] (%d) > MAX (%d), correcting to MAX", 328 "NF[%d] (%d) > MAX (%d), correcting to MAX",
283 i, nf[i], limit->max); 329 i, nf[i], ATH9K_NF_TOO_HIGH);
284 nf[i] = limit->max; 330 nf[i] = limit->max;
285 } else if (nf[i] < limit->min) { 331 } else if (nf[i] < limit->min) {
286 ath_print(common, ATH_DBG_CALIBRATE, 332 ath_print(common, ATH_DBG_CALIBRATE,
@@ -326,7 +372,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
326 372
327 h = caldata->nfCalHist; 373 h = caldata->nfCalHist;
328 caldata->nfcal_pending = false; 374 caldata->nfcal_pending = false;
329 ath9k_hw_update_nfcal_hist_buffer(h, nfarray); 375 ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
330 caldata->rawNoiseFloor = h[0].privNF; 376 caldata->rawNoiseFloor = h[0].privNF;
331 return true; 377 return true;
332} 378}
@@ -361,3 +407,28 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
361 return ah->caldata->rawNoiseFloor; 407 return ah->caldata->rawNoiseFloor;
362} 408}
363EXPORT_SYMBOL(ath9k_hw_getchan_noise); 409EXPORT_SYMBOL(ath9k_hw_getchan_noise);
410
411void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
412{
413 struct ath9k_hw_cal_data *caldata = ah->caldata;
414
415 if (unlikely(!caldata))
416 return;
417
418 /*
419 * If beacons are stuck, the most likely cause is interference.
420 * Triggering a noise floor calibration at this point helps the
421 * hardware adapt to a noisy environment much faster.
422 * To ensure that we recover from stuck beacons quickly, let
423 * the baseband update the internal NF value itself, similar to
424 * what is being done after a full reset.
425 */
426 if (!caldata->nfcal_pending)
427 ath9k_hw_start_nfcal(ah, true);
428 else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
429 ath9k_hw_getnf(ah, ah->curchan);
430
431 caldata->nfcal_interference = true;
432}
433EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
434
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 0a304b3eeeb6..5b053a6260b2 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -113,6 +113,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
113bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan); 113bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan);
114void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, 114void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
115 struct ath9k_channel *chan); 115 struct ath9k_channel *chan);
116void ath9k_hw_bstuck_nfcal(struct ath_hw *ah);
116s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan); 117s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
117void ath9k_hw_reset_calibration(struct ath_hw *ah, 118void ath9k_hw_reset_calibration(struct ath_hw *ah,
118 struct ath9k_cal_list *currCal); 119 struct ath9k_cal_list *currCal);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index c86f7d3593ab..f43a2d98421c 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -46,12 +46,17 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
46 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 46 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
47 47
48 if (tx_info->control.hw_key) { 48 if (tx_info->control.hw_key) {
49 if (tx_info->control.hw_key->alg == ALG_WEP) 49 switch (tx_info->control.hw_key->cipher) {
50 case WLAN_CIPHER_SUITE_WEP40:
51 case WLAN_CIPHER_SUITE_WEP104:
50 return ATH9K_KEY_TYPE_WEP; 52 return ATH9K_KEY_TYPE_WEP;
51 else if (tx_info->control.hw_key->alg == ALG_TKIP) 53 case WLAN_CIPHER_SUITE_TKIP:
52 return ATH9K_KEY_TYPE_TKIP; 54 return ATH9K_KEY_TYPE_TKIP;
53 else if (tx_info->control.hw_key->alg == ALG_CCMP) 55 case WLAN_CIPHER_SUITE_CCMP:
54 return ATH9K_KEY_TYPE_AES; 56 return ATH9K_KEY_TYPE_AES;
57 default:
58 break;
59 }
55 } 60 }
56 61
57 return ATH9K_KEY_TYPE_CLEAR; 62 return ATH9K_KEY_TYPE_CLEAR;
@@ -143,276 +148,49 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
143} 148}
144EXPORT_SYMBOL(ath9k_cmn_get_curchannel); 149EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
145 150
146static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key, 151int ath9k_cmn_count_streams(unsigned int chainmask, int max)
147 struct ath9k_keyval *hk, const u8 *addr,
148 bool authenticator)
149{
150 struct ath_hw *ah = common->ah;
151 const u8 *key_rxmic;
152 const u8 *key_txmic;
153
154 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
155 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
156
157 if (addr == NULL) {
158 /*
159 * Group key installation - only two key cache entries are used
160 * regardless of splitmic capability since group key is only
161 * used either for TX or RX.
162 */
163 if (authenticator) {
164 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
165 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
166 } else {
167 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
168 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
169 }
170 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
171 }
172 if (!common->splitmic) {
173 /* TX and RX keys share the same key cache entry. */
174 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
175 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
176 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
177 }
178
179 /* Separate key cache entries for TX and RX */
180
181 /* TX key goes at first index, RX key at +32. */
182 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
183 if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
184 /* TX MIC entry failed. No need to proceed further */
185 ath_print(common, ATH_DBG_FATAL,
186 "Setting TX MIC Key Failed\n");
187 return 0;
188 }
189
190 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
191 /* XXX delete tx key on failure? */
192 return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
193}
194
195static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
196{
197 int i;
198
199 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
200 if (test_bit(i, common->keymap) ||
201 test_bit(i + 64, common->keymap))
202 continue; /* At least one part of TKIP key allocated */
203 if (common->splitmic &&
204 (test_bit(i + 32, common->keymap) ||
205 test_bit(i + 64 + 32, common->keymap)))
206 continue; /* At least one part of TKIP key allocated */
207
208 /* Found a free slot for a TKIP key */
209 return i;
210 }
211 return -1;
212}
213
214static int ath_reserve_key_cache_slot(struct ath_common *common,
215 enum ieee80211_key_alg alg)
216{ 152{
217 int i; 153 int streams = 0;
218
219 if (alg == ALG_TKIP)
220 return ath_reserve_key_cache_slot_tkip(common);
221
222 /* First, try to find slots that would not be available for TKIP. */
223 if (common->splitmic) {
224 for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
225 if (!test_bit(i, common->keymap) &&
226 (test_bit(i + 32, common->keymap) ||
227 test_bit(i + 64, common->keymap) ||
228 test_bit(i + 64 + 32, common->keymap)))
229 return i;
230 if (!test_bit(i + 32, common->keymap) &&
231 (test_bit(i, common->keymap) ||
232 test_bit(i + 64, common->keymap) ||
233 test_bit(i + 64 + 32, common->keymap)))
234 return i + 32;
235 if (!test_bit(i + 64, common->keymap) &&
236 (test_bit(i , common->keymap) ||
237 test_bit(i + 32, common->keymap) ||
238 test_bit(i + 64 + 32, common->keymap)))
239 return i + 64;
240 if (!test_bit(i + 64 + 32, common->keymap) &&
241 (test_bit(i, common->keymap) ||
242 test_bit(i + 32, common->keymap) ||
243 test_bit(i + 64, common->keymap)))
244 return i + 64 + 32;
245 }
246 } else {
247 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
248 if (!test_bit(i, common->keymap) &&
249 test_bit(i + 64, common->keymap))
250 return i;
251 if (test_bit(i, common->keymap) &&
252 !test_bit(i + 64, common->keymap))
253 return i + 64;
254 }
255 }
256
257 /* No partially used TKIP slots, pick any available slot */
258 for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
259 /* Do not allow slots that could be needed for TKIP group keys
260 * to be used. This limitation could be removed if we know that
261 * TKIP will not be used. */
262 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
263 continue;
264 if (common->splitmic) {
265 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
266 continue;
267 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
268 continue;
269 }
270 154
271 if (!test_bit(i, common->keymap)) 155 do {
272 return i; /* Found a free slot for a key */ 156 if (++streams == max)
273 } 157 break;
158 } while ((chainmask = chainmask & (chainmask - 1)));
274 159
275 /* No free slot found */ 160 return streams;
276 return -1;
277} 161}
162EXPORT_SYMBOL(ath9k_cmn_count_streams);
278 163
279/* 164/*
280 * Configure encryption in the HW. 165 * Configures appropriate weight based on stomp type.
281 */ 166 */
282int ath9k_cmn_key_config(struct ath_common *common, 167void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
283 struct ieee80211_vif *vif, 168 enum ath_stomp_type stomp_type)
284 struct ieee80211_sta *sta,
285 struct ieee80211_key_conf *key)
286{ 169{
287 struct ath_hw *ah = common->ah; 170 struct ath_hw *ah = common->ah;
288 struct ath9k_keyval hk;
289 const u8 *mac = NULL;
290 u8 gmac[ETH_ALEN];
291 int ret = 0;
292 int idx;
293 171
294 memset(&hk, 0, sizeof(hk)); 172 switch (stomp_type) {
295 173 case ATH_BTCOEX_STOMP_ALL:
296 switch (key->alg) { 174 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
297 case ALG_WEP: 175 AR_STOMP_ALL_WLAN_WGHT);
298 hk.kv_type = ATH9K_CIPHER_WEP;
299 break; 176 break;
300 case ALG_TKIP: 177 case ATH_BTCOEX_STOMP_LOW:
301 hk.kv_type = ATH9K_CIPHER_TKIP; 178 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
179 AR_STOMP_LOW_WLAN_WGHT);
302 break; 180 break;
303 case ALG_CCMP: 181 case ATH_BTCOEX_STOMP_NONE:
304 hk.kv_type = ATH9K_CIPHER_AES_CCM; 182 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
183 AR_STOMP_NONE_WLAN_WGHT);
305 break; 184 break;
306 default: 185 default:
307 return -EOPNOTSUPP; 186 ath_print(common, ATH_DBG_BTCOEX,
308 } 187 "Invalid Stomptype\n");
309 188 break;
310 hk.kv_len = key->keylen;
311 memcpy(hk.kv_val, key->key, key->keylen);
312
313 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
314 switch (vif->type) {
315 case NL80211_IFTYPE_AP:
316 memcpy(gmac, vif->addr, ETH_ALEN);
317 gmac[0] |= 0x01;
318 mac = gmac;
319 idx = ath_reserve_key_cache_slot(common, key->alg);
320 break;
321 case NL80211_IFTYPE_ADHOC:
322 if (!sta) {
323 idx = key->keyidx;
324 break;
325 }
326 memcpy(gmac, sta->addr, ETH_ALEN);
327 gmac[0] |= 0x01;
328 mac = gmac;
329 idx = ath_reserve_key_cache_slot(common, key->alg);
330 break;
331 default:
332 idx = key->keyidx;
333 break;
334 }
335 } else if (key->keyidx) {
336 if (WARN_ON(!sta))
337 return -EOPNOTSUPP;
338 mac = sta->addr;
339
340 if (vif->type != NL80211_IFTYPE_AP) {
341 /* Only keyidx 0 should be used with unicast key, but
342 * allow this for client mode for now. */
343 idx = key->keyidx;
344 } else
345 return -EIO;
346 } else {
347 if (WARN_ON(!sta))
348 return -EOPNOTSUPP;
349 mac = sta->addr;
350
351 idx = ath_reserve_key_cache_slot(common, key->alg);
352 }
353
354 if (idx < 0)
355 return -ENOSPC; /* no free key cache entries */
356
357 if (key->alg == ALG_TKIP)
358 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
359 vif->type == NL80211_IFTYPE_AP);
360 else
361 ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
362
363 if (!ret)
364 return -EIO;
365
366 set_bit(idx, common->keymap);
367 if (key->alg == ALG_TKIP) {
368 set_bit(idx + 64, common->keymap);
369 if (common->splitmic) {
370 set_bit(idx + 32, common->keymap);
371 set_bit(idx + 64 + 32, common->keymap);
372 }
373 }
374
375 return idx;
376}
377EXPORT_SYMBOL(ath9k_cmn_key_config);
378
379/*
380 * Delete Key.
381 */
382void ath9k_cmn_key_delete(struct ath_common *common,
383 struct ieee80211_key_conf *key)
384{
385 struct ath_hw *ah = common->ah;
386
387 ath9k_hw_keyreset(ah, key->hw_key_idx);
388 if (key->hw_key_idx < IEEE80211_WEP_NKID)
389 return;
390
391 clear_bit(key->hw_key_idx, common->keymap);
392 if (key->alg != ALG_TKIP)
393 return;
394
395 clear_bit(key->hw_key_idx + 64, common->keymap);
396 if (common->splitmic) {
397 ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
398 clear_bit(key->hw_key_idx + 32, common->keymap);
399 clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
400 } 189 }
401}
402EXPORT_SYMBOL(ath9k_cmn_key_delete);
403
404int ath9k_cmn_count_streams(unsigned int chainmask, int max)
405{
406 int streams = 0;
407
408 do {
409 if (++streams == max)
410 break;
411 } while ((chainmask = chainmask & (chainmask - 1)));
412 190
413 return streams; 191 ath9k_hw_btcoex_enable(ah);
414} 192}
415EXPORT_SYMBOL(ath9k_cmn_count_streams); 193EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp);
416 194
417static int __init ath9k_cmn_init(void) 195static int __init ath9k_cmn_init(void)
418{ 196{
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 97809d39c73f..fea3b3315391 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -52,16 +52,20 @@
52#define ATH_EP_RND(x, mul) \ 52#define ATH_EP_RND(x, mul) \
53 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 53 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
54 54
55/* Defines the BT AR_BT_COEX_WGHT used */
56enum ath_stomp_type {
57 ATH_BTCOEX_NO_STOMP,
58 ATH_BTCOEX_STOMP_ALL,
59 ATH_BTCOEX_STOMP_LOW,
60 ATH_BTCOEX_STOMP_NONE
61};
62
55int ath9k_cmn_padpos(__le16 frame_control); 63int ath9k_cmn_padpos(__le16 frame_control);
56int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); 64int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
57void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw, 65void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
58 struct ath9k_channel *ichan); 66 struct ath9k_channel *ichan);
59struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw, 67struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
60 struct ath_hw *ah); 68 struct ath_hw *ah);
61int ath9k_cmn_key_config(struct ath_common *common,
62 struct ieee80211_vif *vif,
63 struct ieee80211_sta *sta,
64 struct ieee80211_key_conf *key);
65void ath9k_cmn_key_delete(struct ath_common *common,
66 struct ieee80211_key_conf *key);
67int ath9k_cmn_count_streams(unsigned int chainmask, int max); 69int ath9k_cmn_count_streams(unsigned int chainmask, int max);
70void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
71 enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 54aae931424e..d65a896a421d 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -492,12 +492,55 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
492 unsigned int len = 0; 492 unsigned int len = 0;
493 int i; 493 int i;
494 u8 addr[ETH_ALEN]; 494 u8 addr[ETH_ALEN];
495 u32 tmp;
495 496
496 len += snprintf(buf + len, sizeof(buf) - len, 497 len += snprintf(buf + len, sizeof(buf) - len,
497 "primary: %s (%s chan=%d ht=%d)\n", 498 "primary: %s (%s chan=%d ht=%d)\n",
498 wiphy_name(sc->pri_wiphy->hw->wiphy), 499 wiphy_name(sc->pri_wiphy->hw->wiphy),
499 ath_wiphy_state_str(sc->pri_wiphy->state), 500 ath_wiphy_state_str(sc->pri_wiphy->state),
500 sc->pri_wiphy->chan_idx, sc->pri_wiphy->chan_is_ht); 501 sc->pri_wiphy->chan_idx, sc->pri_wiphy->chan_is_ht);
502
503 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
504 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
505 len += snprintf(buf + len, sizeof(buf) - len,
506 "addr: %pM\n", addr);
507 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_BSSMSKL), addr);
508 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4);
509 len += snprintf(buf + len, sizeof(buf) - len,
510 "addrmask: %pM\n", addr);
511 tmp = ath9k_hw_getrxfilter(sc->sc_ah);
512 len += snprintf(buf + len, sizeof(buf) - len,
513 "rfilt: 0x%x", tmp);
514 if (tmp & ATH9K_RX_FILTER_UCAST)
515 len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
516 if (tmp & ATH9K_RX_FILTER_MCAST)
517 len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
518 if (tmp & ATH9K_RX_FILTER_BCAST)
519 len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
520 if (tmp & ATH9K_RX_FILTER_CONTROL)
521 len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
522 if (tmp & ATH9K_RX_FILTER_BEACON)
523 len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
524 if (tmp & ATH9K_RX_FILTER_PROM)
525 len += snprintf(buf + len, sizeof(buf) - len, " PROM");
526 if (tmp & ATH9K_RX_FILTER_PROBEREQ)
527 len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
528 if (tmp & ATH9K_RX_FILTER_PHYERR)
529 len += snprintf(buf + len, sizeof(buf) - len, " PHYERR");
530 if (tmp & ATH9K_RX_FILTER_MYBEACON)
531 len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON");
532 if (tmp & ATH9K_RX_FILTER_COMP_BAR)
533 len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
534 if (tmp & ATH9K_RX_FILTER_PSPOLL)
535 len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL");
536 if (tmp & ATH9K_RX_FILTER_PHYRADAR)
537 len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
538 if (tmp & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
539 len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL\n");
540 else
541 len += snprintf(buf + len, sizeof(buf) - len, "\n");
542
543 /* Put variable-length stuff down here, and check for overflows. */
501 for (i = 0; i < sc->num_sec_wiphy; i++) { 544 for (i = 0; i < sc->num_sec_wiphy; i++) {
502 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 545 struct ath_wiphy *aphy = sc->sec_wiphy[i];
503 if (aphy == NULL) 546 if (aphy == NULL)
@@ -508,16 +551,6 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
508 ath_wiphy_state_str(aphy->state), 551 ath_wiphy_state_str(aphy->state),
509 aphy->chan_idx, aphy->chan_is_ht); 552 aphy->chan_idx, aphy->chan_is_ht);
510 } 553 }
511
512 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
513 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
514 len += snprintf(buf + len, sizeof(buf) - len,
515 "addr: %pM\n", addr);
516 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_BSSMSKL), addr);
517 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4);
518 len += snprintf(buf + len, sizeof(buf) - len,
519 "addrmask: %pM\n", addr);
520
521 if (len > sizeof(buf)) 554 if (len > sizeof(buf))
522 len = sizeof(buf); 555 len = sizeof(buf);
523 556
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 0b09db0f8e7d..3030564a0f21 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -266,6 +266,8 @@ enum eeprom_param {
266 EEP_INTERNAL_REGULATOR, 266 EEP_INTERNAL_REGULATOR,
267 EEP_SWREG, 267 EEP_SWREG,
268 EEP_PAPRD, 268 EEP_PAPRD,
269 EEP_MODAL_VER,
270 EEP_ANT_DIV_CTL1,
269}; 271};
270 272
271enum ar5416_rates { 273enum ar5416_rates {
@@ -670,7 +672,8 @@ struct eeprom_ops {
670 bool (*fill_eeprom)(struct ath_hw *hw); 672 bool (*fill_eeprom)(struct ath_hw *hw);
671 int (*get_eeprom_ver)(struct ath_hw *hw); 673 int (*get_eeprom_ver)(struct ath_hw *hw);
672 int (*get_eeprom_rev)(struct ath_hw *hw); 674 int (*get_eeprom_rev)(struct ath_hw *hw);
673 u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band); 675 u8 (*get_num_ant_config)(struct ath_hw *hw,
676 enum ath9k_hal_freq_band band);
674 u32 (*get_eeprom_antenna_cfg)(struct ath_hw *hw, 677 u32 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
675 struct ath9k_channel *chan); 678 struct ath9k_channel *chan);
676 void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan); 679 void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9cccd12e8f21..ead8b0dd3b53 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -213,6 +213,10 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
213 return 0; 213 return 0;
214 case EEP_PWR_TABLE_OFFSET: 214 case EEP_PWR_TABLE_OFFSET:
215 return AR5416_PWR_TABLE_OFFSET_DB; 215 return AR5416_PWR_TABLE_OFFSET_DB;
216 case EEP_MODAL_VER:
217 return pModal->version;
218 case EEP_ANT_DIV_CTL1:
219 return pModal->antdiv_ctl1;
216 default: 220 default:
217 return 0; 221 return 0;
218 } 222 }
@@ -1157,7 +1161,7 @@ static u32 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
1157} 1161}
1158 1162
1159static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah, 1163static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah,
1160 enum ieee80211_band freq_band) 1164 enum ath9k_hal_freq_band freq_band)
1161{ 1165{
1162 return 1; 1166 return 1;
1163} 1167}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index dff2da777312..e6186515d05b 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -1126,7 +1126,7 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
1126} 1126}
1127 1127
1128static u8 ath9k_hw_ar9287_get_num_ant_config(struct ath_hw *ah, 1128static u8 ath9k_hw_ar9287_get_num_ant_config(struct ath_hw *ah,
1129 enum ieee80211_band freq_band) 1129 enum ath9k_hal_freq_band freq_band)
1130{ 1130{
1131 return 1; 1131 return 1;
1132} 1132}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index afa2b73ddbdd..23f480d4c770 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -1418,11 +1418,11 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1418} 1418}
1419 1419
1420static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah, 1420static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
1421 enum ieee80211_band freq_band) 1421 enum ath9k_hal_freq_band freq_band)
1422{ 1422{
1423 struct ar5416_eeprom_def *eep = &ah->eeprom.def; 1423 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
1424 struct modal_eep_header *pModal = 1424 struct modal_eep_header *pModal =
1425 &(eep->modalHeader[ATH9K_HAL_FREQ_BAND_2GHZ == freq_band]); 1425 &(eep->modalHeader[freq_band]);
1426 struct base_eep_header *pBase = &eep->baseEepHeader; 1426 struct base_eep_header *pBase = &eep->baseEepHeader;
1427 u8 num_ant_config; 1427 u8 num_ant_config;
1428 1428
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 3a8ee999da5d..4a9a68bba324 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -251,36 +251,6 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
251 } 251 }
252} 252}
253 253
254/*
255 * Configures appropriate weight based on stomp type.
256 */
257static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
258 enum ath_stomp_type stomp_type)
259{
260 struct ath_hw *ah = sc->sc_ah;
261
262 switch (stomp_type) {
263 case ATH_BTCOEX_STOMP_ALL:
264 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
265 AR_STOMP_ALL_WLAN_WGHT);
266 break;
267 case ATH_BTCOEX_STOMP_LOW:
268 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
269 AR_STOMP_LOW_WLAN_WGHT);
270 break;
271 case ATH_BTCOEX_STOMP_NONE:
272 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
273 AR_STOMP_NONE_WLAN_WGHT);
274 break;
275 default:
276 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
277 "Invalid Stomptype\n");
278 break;
279 }
280
281 ath9k_hw_btcoex_enable(ah);
282}
283
284static void ath9k_gen_timer_start(struct ath_hw *ah, 254static void ath9k_gen_timer_start(struct ath_hw *ah,
285 struct ath_gen_timer *timer, 255 struct ath_gen_timer *timer,
286 u32 timer_next, 256 u32 timer_next,
@@ -319,6 +289,7 @@ static void ath_btcoex_period_timer(unsigned long data)
319 struct ath_softc *sc = (struct ath_softc *) data; 289 struct ath_softc *sc = (struct ath_softc *) data;
320 struct ath_hw *ah = sc->sc_ah; 290 struct ath_hw *ah = sc->sc_ah;
321 struct ath_btcoex *btcoex = &sc->btcoex; 291 struct ath_btcoex *btcoex = &sc->btcoex;
292 struct ath_common *common = ath9k_hw_common(ah);
322 u32 timer_period; 293 u32 timer_period;
323 bool is_btscan; 294 bool is_btscan;
324 295
@@ -328,7 +299,7 @@ static void ath_btcoex_period_timer(unsigned long data)
328 299
329 spin_lock_bh(&btcoex->btcoex_lock); 300 spin_lock_bh(&btcoex->btcoex_lock);
330 301
331 ath9k_btcoex_bt_stomp(sc, is_btscan ? ATH_BTCOEX_STOMP_ALL : 302 ath9k_cmn_btcoex_bt_stomp(common, is_btscan ? ATH_BTCOEX_STOMP_ALL :
332 btcoex->bt_stomp_type); 303 btcoex->bt_stomp_type);
333 304
334 spin_unlock_bh(&btcoex->btcoex_lock); 305 spin_unlock_bh(&btcoex->btcoex_lock);
@@ -359,17 +330,18 @@ static void ath_btcoex_no_stomp_timer(void *arg)
359 struct ath_softc *sc = (struct ath_softc *)arg; 330 struct ath_softc *sc = (struct ath_softc *)arg;
360 struct ath_hw *ah = sc->sc_ah; 331 struct ath_hw *ah = sc->sc_ah;
361 struct ath_btcoex *btcoex = &sc->btcoex; 332 struct ath_btcoex *btcoex = &sc->btcoex;
333 struct ath_common *common = ath9k_hw_common(ah);
362 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN; 334 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
363 335
364 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX, 336 ath_print(common, ATH_DBG_BTCOEX,
365 "no stomp timer running\n"); 337 "no stomp timer running\n");
366 338
367 spin_lock_bh(&btcoex->btcoex_lock); 339 spin_lock_bh(&btcoex->btcoex_lock);
368 340
369 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) 341 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
370 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE); 342 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_NONE);
371 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) 343 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
372 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW); 344 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_LOW);
373 345
374 spin_unlock_bh(&btcoex->btcoex_lock); 346 spin_unlock_bh(&btcoex->btcoex_lock);
375} 347}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 17e7a9a367e7..728d904c74d7 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -92,10 +92,10 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
92 cmd->skb = skb; 92 cmd->skb = skb;
93 cmd->hif_dev = hif_dev; 93 cmd->hif_dev = hif_dev;
94 94
95 usb_fill_int_urb(urb, hif_dev->udev, 95 usb_fill_bulk_urb(urb, hif_dev->udev,
96 usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE), 96 usb_sndbulkpipe(hif_dev->udev, USB_REG_OUT_PIPE),
97 skb->data, skb->len, 97 skb->data, skb->len,
98 hif_usb_regout_cb, cmd, 1); 98 hif_usb_regout_cb, cmd);
99 99
100 usb_anchor_urb(urb, &hif_dev->regout_submitted); 100 usb_anchor_urb(urb, &hif_dev->regout_submitted);
101 ret = usb_submit_urb(urb, GFP_KERNEL); 101 ret = usb_submit_urb(urb, GFP_KERNEL);
@@ -541,7 +541,8 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
541 } 541 }
542 542
543 usb_fill_int_urb(urb, hif_dev->udev, 543 usb_fill_int_urb(urb, hif_dev->udev,
544 usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), 544 usb_rcvbulkpipe(hif_dev->udev,
545 USB_REG_IN_PIPE),
545 nskb->data, MAX_REG_IN_BUF_SIZE, 546 nskb->data, MAX_REG_IN_BUF_SIZE,
546 ath9k_hif_usb_reg_in_cb, nskb, 1); 547 ath9k_hif_usb_reg_in_cb, nskb, 1);
547 548
@@ -720,7 +721,8 @@ static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
720 goto err; 721 goto err;
721 722
722 usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev, 723 usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev,
723 usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), 724 usb_rcvbulkpipe(hif_dev->udev,
725 USB_REG_IN_PIPE),
724 skb->data, MAX_REG_IN_BUF_SIZE, 726 skb->data, MAX_REG_IN_BUF_SIZE,
725 ath9k_hif_usb_reg_in_cb, skb, 1); 727 ath9k_hif_usb_reg_in_cb, skb, 1);
726 728
@@ -822,7 +824,9 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
822 824
823static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) 825static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
824{ 826{
825 int ret; 827 int ret, idx;
828 struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
829 struct usb_endpoint_descriptor *endp;
826 830
827 /* Request firmware */ 831 /* Request firmware */
828 ret = request_firmware(&hif_dev->firmware, hif_dev->fw_name, 832 ret = request_firmware(&hif_dev->firmware, hif_dev->fw_name,
@@ -850,6 +854,22 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
850 goto err_fw_download; 854 goto err_fw_download;
851 } 855 }
852 856
857 /* On downloading the firmware to the target, the USB descriptor of EP4
858 * is 'patched' to change the type of the endpoint to Bulk. This will
859 * bring down CPU usage during the scan period.
860 */
861 for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
862 endp = &alt->endpoint[idx].desc;
863 if (((endp->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
864 == 0x04) &&
865 ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
866 == USB_ENDPOINT_XFER_INT)) {
867 endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
868 endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
869 endp->bInterval = 0;
870 }
871 }
872
853 return 0; 873 return 0;
854 874
855err_fw_download: 875err_fw_download:
@@ -920,7 +940,8 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
920 } 940 }
921 941
922 ret = ath9k_htc_hw_init(hif_dev->htc_handle, 942 ret = ath9k_htc_hw_init(hif_dev->htc_handle,
923 &hif_dev->udev->dev, hif_dev->device_id); 943 &hif_dev->udev->dev, hif_dev->device_id,
944 hif_dev->udev->product);
924 if (ret) { 945 if (ret) {
925 ret = -EINVAL; 946 ret = -EINVAL;
926 goto err_htc_hw_init; 947 goto err_htc_hw_init;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 43b9e21bc562..75ecf6a30d25 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -316,17 +316,32 @@ struct htc_beacon_config {
316 u8 dtim_count; 316 u8 dtim_count;
317}; 317};
318 318
319#define OP_INVALID BIT(0) 319struct ath_btcoex {
320#define OP_SCANNING BIT(1) 320 u32 bt_priority_cnt;
321#define OP_FULL_RESET BIT(2) 321 unsigned long bt_priority_time;
322#define OP_LED_ASSOCIATED BIT(3) 322 int bt_stomp_type; /* Types of BT stomping */
323#define OP_LED_ON BIT(4) 323 u32 btcoex_no_stomp;
324#define OP_PREAMBLE_SHORT BIT(5) 324 u32 btcoex_period;
325#define OP_PROTECT_ENABLE BIT(6) 325 u32 btscan_no_stomp;
326#define OP_ASSOCIATED BIT(7) 326};
327#define OP_ENABLE_BEACON BIT(8) 327
328#define OP_LED_DEINIT BIT(9) 328void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv);
329#define OP_UNPLUGGED BIT(10) 329void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv);
330void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv);
331
332#define OP_INVALID BIT(0)
333#define OP_SCANNING BIT(1)
334#define OP_FULL_RESET BIT(2)
335#define OP_LED_ASSOCIATED BIT(3)
336#define OP_LED_ON BIT(4)
337#define OP_PREAMBLE_SHORT BIT(5)
338#define OP_PROTECT_ENABLE BIT(6)
339#define OP_ASSOCIATED BIT(7)
340#define OP_ENABLE_BEACON BIT(8)
341#define OP_LED_DEINIT BIT(9)
342#define OP_UNPLUGGED BIT(10)
343#define OP_BT_PRIORITY_DETECTED BIT(11)
344#define OP_BT_SCAN BIT(12)
330 345
331struct ath9k_htc_priv { 346struct ath9k_htc_priv {
332 struct device *dev; 347 struct device *dev;
@@ -391,6 +406,9 @@ struct ath9k_htc_priv {
391 int cabq; 406 int cabq;
392 int hwq_map[WME_NUM_AC]; 407 int hwq_map[WME_NUM_AC];
393 408
409 struct ath_btcoex btcoex;
410 struct delayed_work coex_period_work;
411 struct delayed_work duty_cycle_work;
394#ifdef CONFIG_ATH9K_HTC_DEBUGFS 412#ifdef CONFIG_ATH9K_HTC_DEBUGFS
395 struct ath9k_debug debug; 413 struct ath9k_debug debug;
396#endif 414#endif
@@ -443,7 +461,7 @@ void ath9k_init_leds(struct ath9k_htc_priv *priv);
443void ath9k_deinit_leds(struct ath9k_htc_priv *priv); 461void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
444 462
445int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, 463int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
446 u16 devid); 464 u16 devid, char *product);
447void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug); 465void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
448#ifdef CONFIG_PM 466#ifdef CONFIG_PM
449int ath9k_htc_resume(struct htc_target *htc_handle); 467int ath9k_htc_resume(struct htc_target *htc_handle);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
new file mode 100644
index 000000000000..50eec9a3b88c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -0,0 +1,134 @@
1#include "htc.h"
2
3/******************/
4/* BTCOEX */
5/******************/
6
7/*
8 * Detects if there is any priority bt traffic
9 */
10static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
11{
12 struct ath_btcoex *btcoex = &priv->btcoex;
13 struct ath_hw *ah = priv->ah;
14
15 if (ath9k_hw_gpio_get(ah, ah->btcoex_hw.btpriority_gpio))
16 btcoex->bt_priority_cnt++;
17
18 if (time_after(jiffies, btcoex->bt_priority_time +
19 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
20 priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
21 /* Detect if colocated bt started scanning */
22 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
23 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
24 "BT scan detected");
25 priv->op_flags |= (OP_BT_SCAN |
26 OP_BT_PRIORITY_DETECTED);
27 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
28 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
29 "BT priority traffic detected");
30 priv->op_flags |= OP_BT_PRIORITY_DETECTED;
31 }
32
33 btcoex->bt_priority_cnt = 0;
34 btcoex->bt_priority_time = jiffies;
35 }
36}
37
38/*
39 * This is the master bt coex work which runs for every
40 * 45ms, bt traffic will be given priority during 55% of this
41 * period while wlan gets remaining 45%
42 */
43static void ath_btcoex_period_work(struct work_struct *work)
44{
45 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
46 coex_period_work.work);
47 struct ath_btcoex *btcoex = &priv->btcoex;
48 struct ath_common *common = ath9k_hw_common(priv->ah);
49 u32 timer_period;
50 bool is_btscan;
51 int ret;
52 u8 cmd_rsp, aggr;
53
54 ath_detect_bt_priority(priv);
55
56 is_btscan = !!(priv->op_flags & OP_BT_SCAN);
57
58 aggr = priv->op_flags & OP_BT_PRIORITY_DETECTED;
59
60 WMI_CMD_BUF(WMI_AGGR_LIMIT_CMD, &aggr);
61
62 ath9k_cmn_btcoex_bt_stomp(common, is_btscan ? ATH_BTCOEX_STOMP_ALL :
63 btcoex->bt_stomp_type);
64
65 timer_period = is_btscan ? btcoex->btscan_no_stomp :
66 btcoex->btcoex_no_stomp;
67 ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
68 msecs_to_jiffies(timer_period));
69 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work,
70 msecs_to_jiffies(btcoex->btcoex_period));
71}
72
73/*
74 * Work to time slice between wlan and bt traffic and
75 * configure weight registers
76 */
77static void ath_btcoex_duty_cycle_work(struct work_struct *work)
78{
79 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
80 duty_cycle_work.work);
81 struct ath_hw *ah = priv->ah;
82 struct ath_btcoex *btcoex = &priv->btcoex;
83 struct ath_common *common = ath9k_hw_common(ah);
84 bool is_btscan = priv->op_flags & OP_BT_SCAN;
85
86 ath_print(common, ATH_DBG_BTCOEX,
87 "time slice work for bt and wlan\n");
88
89 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
90 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_NONE);
91 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
92 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_LOW);
93}
94
95void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
96{
97 struct ath_btcoex *btcoex = &priv->btcoex;
98
99 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
100 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
101 btcoex->btcoex_period / 100;
102 btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
103 btcoex->btcoex_period / 100;
104 INIT_DELAYED_WORK(&priv->coex_period_work, ath_btcoex_period_work);
105 INIT_DELAYED_WORK(&priv->duty_cycle_work, ath_btcoex_duty_cycle_work);
106}
107
108/*
109 * (Re)start btcoex work
110 */
111
112void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
113{
114 struct ath_btcoex *btcoex = &priv->btcoex;
115 struct ath_hw *ah = priv->ah;
116
117 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
118 "Starting btcoex work");
119
120 btcoex->bt_priority_cnt = 0;
121 btcoex->bt_priority_time = jiffies;
122 priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
123 ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work, 0);
124}
125
126
127/*
128 * Cancel btcoex and bt duty cycle work.
129 */
130void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
131{
132 cancel_delayed_work_sync(&priv->coex_period_work);
133 cancel_delayed_work_sync(&priv->duty_cycle_work);
134}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 2d4279191d7a..33850c952314 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -41,6 +41,8 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
41 .max_power = 20, \ 41 .max_power = 20, \
42} 42}
43 43
44#define ATH_HTC_BTCOEX_PRODUCT_ID "wb193"
45
44static struct ieee80211_channel ath9k_2ghz_channels[] = { 46static struct ieee80211_channel ath9k_2ghz_channels[] = {
45 CHAN2G(2412, 0), /* Channel 1 */ 47 CHAN2G(2412, 0), /* Channel 1 */
46 CHAN2G(2417, 1), /* Channel 2 */ 48 CHAN2G(2417, 1), /* Channel 2 */
@@ -564,7 +566,7 @@ static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
564 * reset the contents on initial power up. 566 * reset the contents on initial power up.
565 */ 567 */
566 for (i = 0; i < common->keymax; i++) 568 for (i = 0; i < common->keymax; i++)
567 ath9k_hw_keyreset(priv->ah, (u16) i); 569 ath_hw_keyreset(common, (u16) i);
568} 570}
569 571
570static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv) 572static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
@@ -599,13 +601,36 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
599 common->tx_chainmask = priv->ah->caps.tx_chainmask; 601 common->tx_chainmask = priv->ah->caps.tx_chainmask;
600 common->rx_chainmask = priv->ah->caps.rx_chainmask; 602 common->rx_chainmask = priv->ah->caps.rx_chainmask;
601 603
602 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 604 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
603 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
604 605
605 priv->ah->opmode = NL80211_IFTYPE_STATION; 606 priv->ah->opmode = NL80211_IFTYPE_STATION;
606} 607}
607 608
608static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid) 609static void ath9k_init_btcoex(struct ath9k_htc_priv *priv)
610{
611 int qnum;
612
613 switch (priv->ah->btcoex_hw.scheme) {
614 case ATH_BTCOEX_CFG_NONE:
615 break;
616 case ATH_BTCOEX_CFG_3WIRE:
617 priv->ah->btcoex_hw.btactive_gpio = 7;
618 priv->ah->btcoex_hw.btpriority_gpio = 6;
619 priv->ah->btcoex_hw.wlanactive_gpio = 8;
620 priv->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
621 ath9k_hw_btcoex_init_3wire(priv->ah);
622 ath_htc_init_btcoex_work(priv);
623 qnum = priv->hwq_map[WME_AC_BE];
624 ath9k_hw_init_btcoex_hw(priv->ah, qnum);
625 break;
626 default:
627 WARN_ON(1);
628 break;
629 }
630}
631
632static int ath9k_init_priv(struct ath9k_htc_priv *priv,
633 u16 devid, char *product)
609{ 634{
610 struct ath_hw *ah = NULL; 635 struct ath_hw *ah = NULL;
611 struct ath_common *common; 636 struct ath_common *common;
@@ -672,6 +697,11 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
672 ath9k_init_channels_rates(priv); 697 ath9k_init_channels_rates(priv);
673 ath9k_init_misc(priv); 698 ath9k_init_misc(priv);
674 699
700 if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
701 ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
702 ath9k_init_btcoex(priv);
703 }
704
675 return 0; 705 return 0;
676 706
677err_queues: 707err_queues:
@@ -734,7 +764,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
734 SET_IEEE80211_PERM_ADDR(hw, common->macaddr); 764 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
735} 765}
736 766
737static int ath9k_init_device(struct ath9k_htc_priv *priv, u16 devid) 767static int ath9k_init_device(struct ath9k_htc_priv *priv,
768 u16 devid, char *product)
738{ 769{
739 struct ieee80211_hw *hw = priv->hw; 770 struct ieee80211_hw *hw = priv->hw;
740 struct ath_common *common; 771 struct ath_common *common;
@@ -743,7 +774,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv, u16 devid)
743 struct ath_regulatory *reg; 774 struct ath_regulatory *reg;
744 775
745 /* Bring up device */ 776 /* Bring up device */
746 error = ath9k_init_priv(priv, devid); 777 error = ath9k_init_priv(priv, devid, product);
747 if (error != 0) 778 if (error != 0)
748 goto err_init; 779 goto err_init;
749 780
@@ -801,7 +832,7 @@ err_init:
801} 832}
802 833
803int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, 834int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
804 u16 devid) 835 u16 devid, char *product)
805{ 836{
806 struct ieee80211_hw *hw; 837 struct ieee80211_hw *hw;
807 struct ath9k_htc_priv *priv; 838 struct ath9k_htc_priv *priv;
@@ -835,7 +866,7 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
835 /* The device may have been unplugged earlier. */ 866 /* The device may have been unplugged earlier. */
836 priv->op_flags &= ~OP_UNPLUGGED; 867 priv->op_flags &= ~OP_UNPLUGGED;
837 868
838 ret = ath9k_init_device(priv, devid); 869 ret = ath9k_init_device(priv, devid, product);
839 if (ret) 870 if (ret)
840 goto err_init; 871 goto err_init;
841 872
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 7d09b4b17bbd..5124d04b240b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -137,8 +137,6 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
137 if (priv->op_flags & OP_FULL_RESET) 137 if (priv->op_flags & OP_FULL_RESET)
138 fastcc = false; 138 fastcc = false;
139 139
140 /* Fiddle around with fastcc later on, for now just use full reset */
141 fastcc = false;
142 ath9k_htc_ps_wakeup(priv); 140 ath9k_htc_ps_wakeup(priv);
143 htc_stop(priv->htc); 141 htc_stop(priv->htc);
144 WMI_CMD(WMI_DISABLE_INTR_CMDID); 142 WMI_CMD(WMI_DISABLE_INTR_CMDID);
@@ -146,9 +144,10 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
146 WMI_CMD(WMI_STOP_RECV_CMDID); 144 WMI_CMD(WMI_STOP_RECV_CMDID);
147 145
148 ath_print(common, ATH_DBG_CONFIG, 146 ath_print(common, ATH_DBG_CONFIG,
149 "(%u MHz) -> (%u MHz), HT: %d, HT40: %d\n", 147 "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
150 priv->ah->curchan->channel, 148 priv->ah->curchan->channel,
151 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf)); 149 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
150 fastcc);
152 151
153 caldata = &priv->caldata[channel->hw_value]; 152 caldata = &priv->caldata[channel->hw_value];
154 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); 153 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
@@ -1210,6 +1209,12 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
1210 1209
1211 ieee80211_wake_queues(hw); 1210 ieee80211_wake_queues(hw);
1212 1211
1212 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
1213 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1214 AR_STOMP_LOW_WLAN_WGHT);
1215 ath9k_hw_btcoex_enable(ah);
1216 ath_htc_resume_btcoex_work(priv);
1217 }
1213 mutex_unlock(&priv->mutex); 1218 mutex_unlock(&priv->mutex);
1214 1219
1215 return ret; 1220 return ret;
@@ -1233,7 +1238,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1233 1238
1234 /* Cancel all the running timers/work .. */ 1239 /* Cancel all the running timers/work .. */
1235 cancel_work_sync(&priv->ps_work); 1240 cancel_work_sync(&priv->ps_work);
1236 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1237 cancel_delayed_work_sync(&priv->ath9k_led_blink_work); 1241 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1238 ath9k_led_stop_brightness(priv); 1242 ath9k_led_stop_brightness(priv);
1239 1243
@@ -1254,6 +1258,12 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1254 "Monitor interface removed\n"); 1258 "Monitor interface removed\n");
1255 } 1259 }
1256 1260
1261 if (ah->btcoex_hw.enabled) {
1262 ath9k_hw_btcoex_disable(ah);
1263 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
1264 ath_htc_cancel_btcoex_work(priv);
1265 }
1266
1257 ath9k_hw_phy_disable(ah); 1267 ath9k_hw_phy_disable(ah);
1258 ath9k_hw_disable(ah); 1268 ath9k_hw_disable(ah);
1259 ath9k_hw_configpcipowersave(ah, 1, 1); 1269 ath9k_hw_configpcipowersave(ah, 1, 1);
@@ -1580,20 +1590,21 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1580 1590
1581 switch (cmd) { 1591 switch (cmd) {
1582 case SET_KEY: 1592 case SET_KEY:
1583 ret = ath9k_cmn_key_config(common, vif, sta, key); 1593 ret = ath_key_config(common, vif, sta, key);
1584 if (ret >= 0) { 1594 if (ret >= 0) {
1585 key->hw_key_idx = ret; 1595 key->hw_key_idx = ret;
1586 /* push IV and Michael MIC generation to stack */ 1596 /* push IV and Michael MIC generation to stack */
1587 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1597 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1588 if (key->alg == ALG_TKIP) 1598 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
1589 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1599 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1590 if (priv->ah->sw_mgmt_crypto && key->alg == ALG_CCMP) 1600 if (priv->ah->sw_mgmt_crypto &&
1601 key->cipher == WLAN_CIPHER_SUITE_CCMP)
1591 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; 1602 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
1592 ret = 0; 1603 ret = 0;
1593 } 1604 }
1594 break; 1605 break;
1595 case DISABLE_KEY: 1606 case DISABLE_KEY:
1596 ath9k_cmn_key_delete(common, key); 1607 ath_key_delete(common, key);
1597 break; 1608 break;
1598 default: 1609 default:
1599 ret = -EINVAL; 1610 ret = -EINVAL;
@@ -1774,7 +1785,8 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
1774 priv->op_flags |= OP_SCANNING; 1785 priv->op_flags |= OP_SCANNING;
1775 spin_unlock_bh(&priv->beacon_lock); 1786 spin_unlock_bh(&priv->beacon_lock);
1776 cancel_work_sync(&priv->ps_work); 1787 cancel_work_sync(&priv->ps_work);
1777 cancel_delayed_work_sync(&priv->ath9k_ani_work); 1788 if (priv->op_flags & OP_ASSOCIATED)
1789 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1778 mutex_unlock(&priv->mutex); 1790 mutex_unlock(&priv->mutex);
1779} 1791}
1780 1792
@@ -1788,9 +1800,10 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1788 priv->op_flags &= ~OP_SCANNING; 1800 priv->op_flags &= ~OP_SCANNING;
1789 spin_unlock_bh(&priv->beacon_lock); 1801 spin_unlock_bh(&priv->beacon_lock);
1790 priv->op_flags |= OP_FULL_RESET; 1802 priv->op_flags |= OP_FULL_RESET;
1791 if (priv->op_flags & OP_ASSOCIATED) 1803 if (priv->op_flags & OP_ASSOCIATED) {
1792 ath9k_htc_beacon_config(priv, priv->vif); 1804 ath9k_htc_beacon_config(priv, priv->vif);
1793 ath_start_ani(priv); 1805 ath_start_ani(priv);
1806 }
1794 ath9k_htc_ps_restore(priv); 1807 ath9k_htc_ps_restore(priv);
1795 mutex_unlock(&priv->mutex); 1808 mutex_unlock(&priv->mutex);
1796} 1809}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 2a6e45a293a9..c99600aff76d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -415,8 +415,7 @@ static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
415 ath9k_hw_setrxfilter(ah, rfilt); 415 ath9k_hw_setrxfilter(ah, rfilt);
416 416
417 /* configure bssid mask */ 417 /* configure bssid mask */
418 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 418 ath_hw_setbssidmask(common);
419 ath_hw_setbssidmask(common);
420 419
421 /* configure operational mode */ 420 /* configure operational mode */
422 ath9k_hw_setopmode(ah); 421 ath9k_hw_setopmode(ah);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 705c0f342e1c..861ec9269309 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -462,9 +462,9 @@ void ath9k_htc_hw_free(struct htc_target *htc)
462} 462}
463 463
464int ath9k_htc_hw_init(struct htc_target *target, 464int ath9k_htc_hw_init(struct htc_target *target,
465 struct device *dev, u16 devid) 465 struct device *dev, u16 devid, char *product)
466{ 466{
467 if (ath9k_htc_probe_device(target, dev, devid)) { 467 if (ath9k_htc_probe_device(target, dev, devid, product)) {
468 printk(KERN_ERR "Failed to initialize the device\n"); 468 printk(KERN_ERR "Failed to initialize the device\n");
469 return -ENODEV; 469 return -ENODEV;
470 } 470 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index faba6790328b..07b6509d5896 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -239,7 +239,7 @@ struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
239 struct device *dev); 239 struct device *dev);
240void ath9k_htc_hw_free(struct htc_target *htc); 240void ath9k_htc_hw_free(struct htc_target *htc);
241int ath9k_htc_hw_init(struct htc_target *target, 241int ath9k_htc_hw_init(struct htc_target *target,
242 struct device *dev, u16 devid); 242 struct device *dev, u16 devid, char *product);
243void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug); 243void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
244 244
245#endif /* HTC_HST_H */ 245#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 3384ca164562..0b2ff98b6f33 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1258,11 +1258,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1258 (chan->channel != ah->curchan->channel) && 1258 (chan->channel != ah->curchan->channel) &&
1259 ((chan->channelFlags & CHANNEL_ALL) == 1259 ((chan->channelFlags & CHANNEL_ALL) ==
1260 (ah->curchan->channelFlags & CHANNEL_ALL)) && 1260 (ah->curchan->channelFlags & CHANNEL_ALL)) &&
1261 !AR_SREV_9280(ah)) { 1261 (!AR_SREV_9280(ah) || AR_DEVID_7010(ah))) {
1262 1262
1263 if (ath9k_hw_channel_change(ah, chan)) { 1263 if (ath9k_hw_channel_change(ah, chan)) {
1264 ath9k_hw_loadnf(ah, ah->curchan); 1264 ath9k_hw_loadnf(ah, ah->curchan);
1265 ath9k_hw_start_nfcal(ah, true); 1265 ath9k_hw_start_nfcal(ah, true);
1266 if (AR_SREV_9271(ah))
1267 ar9002_hw_load_ani_reg(ah, chan);
1266 return 0; 1268 return 0;
1267 } 1269 }
1268 } 1270 }
@@ -1474,283 +1476,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1474} 1476}
1475EXPORT_SYMBOL(ath9k_hw_reset); 1477EXPORT_SYMBOL(ath9k_hw_reset);
1476 1478
1477/************************/
1478/* Key Cache Management */
1479/************************/
1480
1481bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
1482{
1483 u32 keyType;
1484
1485 if (entry >= ah->caps.keycache_size) {
1486 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1487 "keychache entry %u out of range\n", entry);
1488 return false;
1489 }
1490
1491 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
1492
1493 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
1494 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
1495 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
1496 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
1497 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
1498 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
1499 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
1500 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
1501
1502 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
1503 u16 micentry = entry + 64;
1504
1505 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
1506 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
1507 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
1508 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
1509
1510 }
1511
1512 return true;
1513}
1514EXPORT_SYMBOL(ath9k_hw_keyreset);
1515
1516static bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
1517{
1518 u32 macHi, macLo;
1519 u32 unicast_flag = AR_KEYTABLE_VALID;
1520
1521 if (entry >= ah->caps.keycache_size) {
1522 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1523 "keychache entry %u out of range\n", entry);
1524 return false;
1525 }
1526
1527 if (mac != NULL) {
1528 /*
1529 * AR_KEYTABLE_VALID indicates that the address is a unicast
1530 * address, which must match the transmitter address for
1531 * decrypting frames.
1532 * Not setting this bit allows the hardware to use the key
1533 * for multicast frame decryption.
1534 */
1535 if (mac[0] & 0x01)
1536 unicast_flag = 0;
1537
1538 macHi = (mac[5] << 8) | mac[4];
1539 macLo = (mac[3] << 24) |
1540 (mac[2] << 16) |
1541 (mac[1] << 8) |
1542 mac[0];
1543 macLo >>= 1;
1544 macLo |= (macHi & 1) << 31;
1545 macHi >>= 1;
1546 } else {
1547 macLo = macHi = 0;
1548 }
1549 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
1550 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag);
1551
1552 return true;
1553}
1554
1555bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
1556 const struct ath9k_keyval *k,
1557 const u8 *mac)
1558{
1559 const struct ath9k_hw_capabilities *pCap = &ah->caps;
1560 struct ath_common *common = ath9k_hw_common(ah);
1561 u32 key0, key1, key2, key3, key4;
1562 u32 keyType;
1563
1564 if (entry >= pCap->keycache_size) {
1565 ath_print(common, ATH_DBG_FATAL,
1566 "keycache entry %u out of range\n", entry);
1567 return false;
1568 }
1569
1570 switch (k->kv_type) {
1571 case ATH9K_CIPHER_AES_OCB:
1572 keyType = AR_KEYTABLE_TYPE_AES;
1573 break;
1574 case ATH9K_CIPHER_AES_CCM:
1575 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
1576 ath_print(common, ATH_DBG_ANY,
1577 "AES-CCM not supported by mac rev 0x%x\n",
1578 ah->hw_version.macRev);
1579 return false;
1580 }
1581 keyType = AR_KEYTABLE_TYPE_CCM;
1582 break;
1583 case ATH9K_CIPHER_TKIP:
1584 keyType = AR_KEYTABLE_TYPE_TKIP;
1585 if (ATH9K_IS_MIC_ENABLED(ah)
1586 && entry + 64 >= pCap->keycache_size) {
1587 ath_print(common, ATH_DBG_ANY,
1588 "entry %u inappropriate for TKIP\n", entry);
1589 return false;
1590 }
1591 break;
1592 case ATH9K_CIPHER_WEP:
1593 if (k->kv_len < WLAN_KEY_LEN_WEP40) {
1594 ath_print(common, ATH_DBG_ANY,
1595 "WEP key length %u too small\n", k->kv_len);
1596 return false;
1597 }
1598 if (k->kv_len <= WLAN_KEY_LEN_WEP40)
1599 keyType = AR_KEYTABLE_TYPE_40;
1600 else if (k->kv_len <= WLAN_KEY_LEN_WEP104)
1601 keyType = AR_KEYTABLE_TYPE_104;
1602 else
1603 keyType = AR_KEYTABLE_TYPE_128;
1604 break;
1605 case ATH9K_CIPHER_CLR:
1606 keyType = AR_KEYTABLE_TYPE_CLR;
1607 break;
1608 default:
1609 ath_print(common, ATH_DBG_FATAL,
1610 "cipher %u not supported\n", k->kv_type);
1611 return false;
1612 }
1613
1614 key0 = get_unaligned_le32(k->kv_val + 0);
1615 key1 = get_unaligned_le16(k->kv_val + 4);
1616 key2 = get_unaligned_le32(k->kv_val + 6);
1617 key3 = get_unaligned_le16(k->kv_val + 10);
1618 key4 = get_unaligned_le32(k->kv_val + 12);
1619 if (k->kv_len <= WLAN_KEY_LEN_WEP104)
1620 key4 &= 0xff;
1621
1622 /*
1623 * Note: Key cache registers access special memory area that requires
1624 * two 32-bit writes to actually update the values in the internal
1625 * memory. Consequently, the exact order and pairs used here must be
1626 * maintained.
1627 */
1628
1629 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
1630 u16 micentry = entry + 64;
1631
1632 /*
1633 * Write inverted key[47:0] first to avoid Michael MIC errors
1634 * on frames that could be sent or received at the same time.
1635 * The correct key will be written in the end once everything
1636 * else is ready.
1637 */
1638 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
1639 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
1640
1641 /* Write key[95:48] */
1642 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
1643 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
1644
1645 /* Write key[127:96] and key type */
1646 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
1647 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
1648
1649 /* Write MAC address for the entry */
1650 (void) ath9k_hw_keysetmac(ah, entry, mac);
1651
1652 if (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) {
1653 /*
1654 * TKIP uses two key cache entries:
1655 * Michael MIC TX/RX keys in the same key cache entry
1656 * (idx = main index + 64):
1657 * key0 [31:0] = RX key [31:0]
1658 * key1 [15:0] = TX key [31:16]
1659 * key1 [31:16] = reserved
1660 * key2 [31:0] = RX key [63:32]
1661 * key3 [15:0] = TX key [15:0]
1662 * key3 [31:16] = reserved
1663 * key4 [31:0] = TX key [63:32]
1664 */
1665 u32 mic0, mic1, mic2, mic3, mic4;
1666
1667 mic0 = get_unaligned_le32(k->kv_mic + 0);
1668 mic2 = get_unaligned_le32(k->kv_mic + 4);
1669 mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
1670 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
1671 mic4 = get_unaligned_le32(k->kv_txmic + 4);
1672
1673 /* Write RX[31:0] and TX[31:16] */
1674 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
1675 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
1676
1677 /* Write RX[63:32] and TX[15:0] */
1678 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
1679 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
1680
1681 /* Write TX[63:32] and keyType(reserved) */
1682 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
1683 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
1684 AR_KEYTABLE_TYPE_CLR);
1685
1686 } else {
1687 /*
1688 * TKIP uses four key cache entries (two for group
1689 * keys):
1690 * Michael MIC TX/RX keys are in different key cache
1691 * entries (idx = main index + 64 for TX and
1692 * main index + 32 + 96 for RX):
1693 * key0 [31:0] = TX/RX MIC key [31:0]
1694 * key1 [31:0] = reserved
1695 * key2 [31:0] = TX/RX MIC key [63:32]
1696 * key3 [31:0] = reserved
1697 * key4 [31:0] = reserved
1698 *
1699 * Upper layer code will call this function separately
1700 * for TX and RX keys when these registers offsets are
1701 * used.
1702 */
1703 u32 mic0, mic2;
1704
1705 mic0 = get_unaligned_le32(k->kv_mic + 0);
1706 mic2 = get_unaligned_le32(k->kv_mic + 4);
1707
1708 /* Write MIC key[31:0] */
1709 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
1710 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
1711
1712 /* Write MIC key[63:32] */
1713 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
1714 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
1715
1716 /* Write TX[63:32] and keyType(reserved) */
1717 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
1718 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
1719 AR_KEYTABLE_TYPE_CLR);
1720 }
1721
1722 /* MAC address registers are reserved for the MIC entry */
1723 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
1724 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
1725
1726 /*
1727 * Write the correct (un-inverted) key[47:0] last to enable
1728 * TKIP now that all other registers are set with correct
1729 * values.
1730 */
1731 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
1732 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
1733 } else {
1734 /* Write key[47:0] */
1735 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
1736 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
1737
1738 /* Write key[95:48] */
1739 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
1740 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
1741
1742 /* Write key[127:96] and key type */
1743 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
1744 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
1745
1746 /* Write MAC address for the entry */
1747 (void) ath9k_hw_keysetmac(ah, entry, mac);
1748 }
1749
1750 return true;
1751}
1752EXPORT_SYMBOL(ath9k_hw_set_keycache_entry);
1753
1754/******************************/ 1479/******************************/
1755/* Power Management (Chipset) */ 1480/* Power Management (Chipset) */
1756/******************************/ 1481/******************************/
@@ -2056,6 +1781,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2056 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 1781 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
2057 1782
2058 u16 capField = 0, eeval; 1783 u16 capField = 0, eeval;
1784 u8 ant_div_ctl1;
2059 1785
2060 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0); 1786 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
2061 regulatory->current_rd = eeval; 1787 regulatory->current_rd = eeval;
@@ -2140,24 +1866,13 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2140 pCap->low_5ghz_chan = 4920; 1866 pCap->low_5ghz_chan = 4920;
2141 pCap->high_5ghz_chan = 6100; 1867 pCap->high_5ghz_chan = 6100;
2142 1868
2143 pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP; 1869 common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
2144 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP;
2145 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM;
2146
2147 pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP;
2148 pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP;
2149 pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM;
2150 1870
2151 if (ah->config.ht_enable) 1871 if (ah->config.ht_enable)
2152 pCap->hw_caps |= ATH9K_HW_CAP_HT; 1872 pCap->hw_caps |= ATH9K_HW_CAP_HT;
2153 else 1873 else
2154 pCap->hw_caps &= ~ATH9K_HW_CAP_HT; 1874 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
2155 1875
2156 pCap->hw_caps |= ATH9K_HW_CAP_GTT;
2157 pCap->hw_caps |= ATH9K_HW_CAP_VEOL;
2158 pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK;
2159 pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH;
2160
2161 if (capField & AR_EEPROM_EEPCAP_MAXQCU) 1876 if (capField & AR_EEPROM_EEPCAP_MAXQCU)
2162 pCap->total_queues = 1877 pCap->total_queues =
2163 MS(capField, AR_EEPROM_EEPCAP_MAXQCU); 1878 MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
@@ -2170,8 +1885,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2170 else 1885 else
2171 pCap->keycache_size = AR_KEYTABLE_SIZE; 1886 pCap->keycache_size = AR_KEYTABLE_SIZE;
2172 1887
2173 pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
2174
2175 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) 1888 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
2176 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1; 1889 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1;
2177 else 1890 else
@@ -2280,6 +1993,14 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2280 if (AR_SREV_9287_10_OR_LATER(ah) || AR_SREV_9271(ah)) 1993 if (AR_SREV_9287_10_OR_LATER(ah) || AR_SREV_9271(ah))
2281 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20; 1994 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
2282 1995
1996 if (AR_SREV_9285(ah))
1997 if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) {
1998 ant_div_ctl1 =
1999 ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
2000 if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1))
2001 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
2002 }
2003
2283 return 0; 2004 return 0;
2284} 2005}
2285 2006
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 399f7c1283cd..df47f792cf4e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -181,29 +181,19 @@ enum wireless_mode {
181}; 181};
182 182
183enum ath9k_hw_caps { 183enum ath9k_hw_caps {
184 ATH9K_HW_CAP_MIC_AESCCM = BIT(0), 184 ATH9K_HW_CAP_HT = BIT(0),
185 ATH9K_HW_CAP_MIC_CKIP = BIT(1), 185 ATH9K_HW_CAP_RFSILENT = BIT(1),
186 ATH9K_HW_CAP_MIC_TKIP = BIT(2), 186 ATH9K_HW_CAP_CST = BIT(2),
187 ATH9K_HW_CAP_CIPHER_AESCCM = BIT(3), 187 ATH9K_HW_CAP_ENHANCEDPM = BIT(3),
188 ATH9K_HW_CAP_CIPHER_CKIP = BIT(4), 188 ATH9K_HW_CAP_AUTOSLEEP = BIT(4),
189 ATH9K_HW_CAP_CIPHER_TKIP = BIT(5), 189 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5),
190 ATH9K_HW_CAP_VEOL = BIT(6), 190 ATH9K_HW_CAP_EDMA = BIT(6),
191 ATH9K_HW_CAP_BSSIDMASK = BIT(7), 191 ATH9K_HW_CAP_RAC_SUPPORTED = BIT(7),
192 ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(8), 192 ATH9K_HW_CAP_LDPC = BIT(8),
193 ATH9K_HW_CAP_HT = BIT(9), 193 ATH9K_HW_CAP_FASTCLOCK = BIT(9),
194 ATH9K_HW_CAP_GTT = BIT(10), 194 ATH9K_HW_CAP_SGI_20 = BIT(10),
195 ATH9K_HW_CAP_FASTCC = BIT(11), 195 ATH9K_HW_CAP_PAPRD = BIT(11),
196 ATH9K_HW_CAP_RFSILENT = BIT(12), 196 ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12),
197 ATH9K_HW_CAP_CST = BIT(13),
198 ATH9K_HW_CAP_ENHANCEDPM = BIT(14),
199 ATH9K_HW_CAP_AUTOSLEEP = BIT(15),
200 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16),
201 ATH9K_HW_CAP_EDMA = BIT(17),
202 ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18),
203 ATH9K_HW_CAP_LDPC = BIT(19),
204 ATH9K_HW_CAP_FASTCLOCK = BIT(20),
205 ATH9K_HW_CAP_SGI_20 = BIT(21),
206 ATH9K_HW_CAP_PAPRD = BIT(22),
207}; 197};
208 198
209struct ath9k_hw_capabilities { 199struct ath9k_hw_capabilities {
@@ -355,6 +345,7 @@ struct ath9k_hw_cal_data {
355 int16_t rawNoiseFloor; 345 int16_t rawNoiseFloor;
356 bool paprd_done; 346 bool paprd_done;
357 bool nfcal_pending; 347 bool nfcal_pending;
348 bool nfcal_interference;
358 u16 small_signal_gain[AR9300_MAX_CHAINS]; 349 u16 small_signal_gain[AR9300_MAX_CHAINS];
359 u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ]; 350 u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
360 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; 351 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
@@ -494,6 +485,12 @@ struct ath_gen_timer_table {
494 } timer_mask; 485 } timer_mask;
495}; 486};
496 487
488struct ath_hw_antcomb_conf {
489 u8 main_lna_conf;
490 u8 alt_lna_conf;
491 u8 fast_div_bias;
492};
493
497/** 494/**
498 * struct ath_hw_private_ops - callbacks used internally by hardware code 495 * struct ath_hw_private_ops - callbacks used internally by hardware code
499 * 496 *
@@ -873,12 +870,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
873int ath9k_hw_fill_cap_info(struct ath_hw *ah); 870int ath9k_hw_fill_cap_info(struct ath_hw *ah);
874u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan); 871u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
875 872
876/* Key Cache Management */
877bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
878bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
879 const struct ath9k_keyval *k,
880 const u8 *mac);
881
882/* GPIO / RFKILL / Antennae */ 873/* GPIO / RFKILL / Antennae */
883void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio); 874void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio);
884u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio); 875u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
@@ -887,6 +878,10 @@ void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
887void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val); 878void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
888u32 ath9k_hw_getdefantenna(struct ath_hw *ah); 879u32 ath9k_hw_getdefantenna(struct ath_hw *ah);
889void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); 880void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
881void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah,
882 struct ath_hw_antcomb_conf *antconf);
883void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
884 struct ath_hw_antcomb_conf *antconf);
890 885
891/* General Operation */ 886/* General Operation */
892bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); 887bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
@@ -984,6 +979,7 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
984void ar9002_hw_attach_ops(struct ath_hw *ah); 979void ar9002_hw_attach_ops(struct ath_hw *ah);
985void ar9003_hw_attach_ops(struct ath_hw *ah); 980void ar9003_hw_attach_ops(struct ath_hw *ah);
986 981
982void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan);
987/* 983/*
988 * ANI work can be shared between all families but a next 984 * ANI work can be shared between all families but a next
989 * generation implementation of ANI will be used only for AR9003 only 985 * generation implementation of ANI will be used only for AR9003 only
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 243c1775f343..573899e27b3d 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -33,7 +33,7 @@ int modparam_nohwcrypt;
33module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); 33module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 34MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35 35
36int led_blink = 1; 36int led_blink;
37module_param_named(blink, led_blink, int, 0444); 37module_param_named(blink, led_blink, int, 0444);
38MODULE_PARM_DESC(blink, "Enable LED blink on activity"); 38MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39 39
@@ -381,7 +381,7 @@ static void ath9k_init_crypto(struct ath_softc *sc)
381 * reset the contents on initial power up. 381 * reset the contents on initial power up.
382 */ 382 */
383 for (i = 0; i < common->keymax; i++) 383 for (i = 0; i < common->keymax; i++)
384 ath9k_hw_keyreset(sc->sc_ah, (u16) i); 384 ath_hw_keyreset(common, (u16) i);
385 385
386 /* 386 /*
387 * Check whether the separate key cache entries 387 * Check whether the separate key cache entries
@@ -389,8 +389,8 @@ static void ath9k_init_crypto(struct ath_softc *sc)
389 * With split mic keys the number of stations is limited 389 * With split mic keys the number of stations is limited
390 * to 27 otherwise 59. 390 * to 27 otherwise 59.
391 */ 391 */
392 if (!(sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)) 392 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
393 common->splitmic = 1; 393 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
394} 394}
395 395
396static int ath9k_init_btcoex(struct ath_softc *sc) 396static int ath9k_init_btcoex(struct ath_softc *sc)
@@ -522,8 +522,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
522 ath9k_hw_set_diversity(sc->sc_ah, true); 522 ath9k_hw_set_diversity(sc->sc_ah, true);
523 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah); 523 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
524 524
525 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 525 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
526 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
527 526
528 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 527 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
529 528
@@ -531,6 +530,9 @@ static void ath9k_init_misc(struct ath_softc *sc)
531 sc->beacon.bslot[i] = NULL; 530 sc->beacon.bslot[i] = NULL;
532 sc->beacon.bslot_aphy[i] = NULL; 531 sc->beacon.bslot_aphy[i] = NULL;
533 } 532 }
533
534 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
535 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
534} 536}
535 537
536static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, 538static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
@@ -641,7 +643,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
641 BIT(NL80211_IFTYPE_ADHOC) | 643 BIT(NL80211_IFTYPE_ADHOC) |
642 BIT(NL80211_IFTYPE_MESH_POINT); 644 BIT(NL80211_IFTYPE_MESH_POINT);
643 645
644 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 646 if (AR_SREV_5416(sc->sc_ah))
647 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
645 648
646 hw->queues = 4; 649 hw->queues = 4;
647 hw->max_rates = 4; 650 hw->max_rates = 4;
@@ -651,7 +654,9 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
651 hw->sta_data_size = sizeof(struct ath_node); 654 hw->sta_data_size = sizeof(struct ath_node);
652 hw->vif_data_size = sizeof(struct ath_vif); 655 hw->vif_data_size = sizeof(struct ath_vif);
653 656
657#ifdef CONFIG_ATH9K_RATE_CONTROL
654 hw->rate_control_algorithm = "ath9k_rate_control"; 658 hw->rate_control_algorithm = "ath9k_rate_control";
659#endif
655 660
656 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) 661 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
657 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 662 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index e955bb9d98cb..3efda8a8a3c1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -711,8 +711,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
711 rs->rs_phyerr = phyerr; 711 rs->rs_phyerr = phyerr;
712 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 712 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
713 rs->rs_status |= ATH9K_RXERR_DECRYPT; 713 rs->rs_status |= ATH9K_RXERR_DECRYPT;
714 else if (ads.ds_rxstatus8 & AR_MichaelErr) 714 else if ((ads.ds_rxstatus8 & AR_MichaelErr) &&
715 rs->rs_keyix != ATH9K_RXKEYIX_INVALID)
715 rs->rs_status |= ATH9K_RXERR_MIC; 716 rs->rs_status |= ATH9K_RXERR_MIC;
717 else if (ads.ds_rxstatus8 & AR_KeyMiss)
718 rs->rs_status |= ATH9K_RXERR_DECRYPT;
716 } 719 }
717 720
718 return 0; 721 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 2633896d3998..7c1a34d64f6d 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -660,17 +660,6 @@ struct ath9k_11n_rate_series {
660 u32 RateFlags; 660 u32 RateFlags;
661}; 661};
662 662
663struct ath9k_keyval {
664 u8 kv_type;
665 u8 kv_pad;
666 u16 kv_len;
667 u8 kv_val[16]; /* TK */
668 u8 kv_mic[8]; /* Michael MIC key */
669 u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware
670 * supports both MIC keys in the same key cache entry;
671 * in that case, kv_mic is the RX key) */
672};
673
674enum ath9k_key_type { 663enum ath9k_key_type {
675 ATH9K_KEY_TYPE_CLEAR, 664 ATH9K_KEY_TYPE_CLEAR,
676 ATH9K_KEY_TYPE_WEP, 665 ATH9K_KEY_TYPE_WEP,
@@ -678,16 +667,6 @@ enum ath9k_key_type {
678 ATH9K_KEY_TYPE_TKIP, 667 ATH9K_KEY_TYPE_TKIP,
679}; 668};
680 669
681enum ath9k_cipher {
682 ATH9K_CIPHER_WEP = 0,
683 ATH9K_CIPHER_AES_OCB = 1,
684 ATH9K_CIPHER_AES_CCM = 2,
685 ATH9K_CIPHER_CKIP = 3,
686 ATH9K_CIPHER_TKIP = 4,
687 ATH9K_CIPHER_CLR = 5,
688 ATH9K_CIPHER_MIC = 127
689};
690
691struct ath_hw; 670struct ath_hw;
692struct ath9k_channel; 671struct ath9k_channel;
693 672
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3caa32316e7b..8b327bcad695 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -226,9 +226,10 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
226 caldata = &aphy->caldata; 226 caldata = &aphy->caldata;
227 227
228 ath_print(common, ATH_DBG_CONFIG, 228 ath_print(common, ATH_DBG_CONFIG,
229 "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n", 229 "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
230 sc->sc_ah->curchan->channel, 230 sc->sc_ah->curchan->channel,
231 channel->center_freq, conf_is_ht40(conf)); 231 channel->center_freq, conf_is_ht40(conf),
232 fastcc);
232 233
233 spin_lock_bh(&sc->sc_resetlock); 234 spin_lock_bh(&sc->sc_resetlock);
234 235
@@ -254,10 +255,10 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
254 ath_update_txpow(sc); 255 ath_update_txpow(sc);
255 ath9k_hw_set_interrupts(ah, ah->imask); 256 ath9k_hw_set_interrupts(ah, ah->imask);
256 257
257 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL | SC_OP_SCANNING))) { 258 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
258 ath_start_ani(common);
259 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
260 ath_beacon_config(sc, NULL); 259 ath_beacon_config(sc, NULL);
260 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
261 ath_start_ani(common);
261 } 262 }
262 263
263 ps_restore: 264 ps_restore:
@@ -395,7 +396,12 @@ void ath_ani_calibrate(unsigned long data)
395 bool shortcal = false; 396 bool shortcal = false;
396 bool aniflag = false; 397 bool aniflag = false;
397 unsigned int timestamp = jiffies_to_msecs(jiffies); 398 unsigned int timestamp = jiffies_to_msecs(jiffies);
398 u32 cal_interval, short_cal_interval; 399 u32 cal_interval, short_cal_interval, long_cal_interval;
400
401 if (ah->caldata && ah->caldata->nfcal_interference)
402 long_cal_interval = ATH_LONG_CALINTERVAL_INT;
403 else
404 long_cal_interval = ATH_LONG_CALINTERVAL;
399 405
400 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ? 406 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
401 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL; 407 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
@@ -407,7 +413,7 @@ void ath_ani_calibrate(unsigned long data)
407 ath9k_ps_wakeup(sc); 413 ath9k_ps_wakeup(sc);
408 414
409 /* Long calibration runs independently of short calibration. */ 415 /* Long calibration runs independently of short calibration. */
410 if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) { 416 if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
411 longcal = true; 417 longcal = true;
412 ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies); 418 ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
413 common->ani.longcal_timer = timestamp; 419 common->ani.longcal_timer = timestamp;
@@ -951,7 +957,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
951 957
952 ath_update_txpow(sc); 958 ath_update_txpow(sc);
953 959
954 if (sc->sc_flags & SC_OP_BEACONS) 960 if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
955 ath_beacon_config(sc, NULL); /* restart beacons */ 961 ath_beacon_config(sc, NULL); /* restart beacons */
956 962
957 ath9k_hw_set_interrupts(ah, ah->imask); 963 ath9k_hw_set_interrupts(ah, ah->imask);
@@ -1150,8 +1156,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
1150 else 1156 else
1151 ah->imask |= ATH9K_INT_RX; 1157 ah->imask |= ATH9K_INT_RX;
1152 1158
1153 if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT) 1159 ah->imask |= ATH9K_INT_GTT;
1154 ah->imask |= ATH9K_INT_GTT;
1155 1160
1156 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) 1161 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1157 ah->imask |= ATH9K_INT_CST; 1162 ah->imask |= ATH9K_INT_CST;
@@ -1373,12 +1378,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1373 1378
1374 mutex_lock(&sc->mutex); 1379 mutex_lock(&sc->mutex);
1375 1380
1376 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
1377 sc->nvifs > 0) {
1378 ret = -ENOBUFS;
1379 goto out;
1380 }
1381
1382 switch (vif->type) { 1381 switch (vif->type) {
1383 case NL80211_IFTYPE_STATION: 1382 case NL80211_IFTYPE_STATION:
1384 ic_opmode = NL80211_IFTYPE_STATION; 1383 ic_opmode = NL80211_IFTYPE_STATION;
@@ -1408,8 +1407,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1408 1407
1409 sc->nvifs++; 1408 sc->nvifs++;
1410 1409
1411 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 1410 ath9k_set_bssid_mask(hw, vif);
1412 ath9k_set_bssid_mask(hw);
1413 1411
1414 if (sc->nvifs > 1) 1412 if (sc->nvifs > 1)
1415 goto out; /* skip global settings for secondary vif */ 1413 goto out; /* skip global settings for secondary vif */
@@ -1556,6 +1554,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1556 * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode. 1554 * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
1557 */ 1555 */
1558 if (changed & IEEE80211_CONF_CHANGE_PS) { 1556 if (changed & IEEE80211_CONF_CHANGE_PS) {
1557 unsigned long flags;
1558 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1559 if (conf->flags & IEEE80211_CONF_PS) { 1559 if (conf->flags & IEEE80211_CONF_PS) {
1560 sc->ps_flags |= PS_ENABLED; 1560 sc->ps_flags |= PS_ENABLED;
1561 /* 1561 /*
@@ -1570,7 +1570,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1570 sc->ps_enabled = false; 1570 sc->ps_enabled = false;
1571 sc->ps_flags &= ~(PS_ENABLED | 1571 sc->ps_flags &= ~(PS_ENABLED |
1572 PS_NULLFUNC_COMPLETED); 1572 PS_NULLFUNC_COMPLETED);
1573 ath9k_setpower(sc, ATH9K_PM_AWAKE); 1573 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1574 if (!(ah->caps.hw_caps & 1574 if (!(ah->caps.hw_caps &
1575 ATH9K_HW_CAP_AUTOSLEEP)) { 1575 ATH9K_HW_CAP_AUTOSLEEP)) {
1576 ath9k_hw_setrxabort(sc->sc_ah, 0); 1576 ath9k_hw_setrxabort(sc->sc_ah, 0);
@@ -1585,6 +1585,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1585 } 1585 }
1586 } 1586 }
1587 } 1587 }
1588 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1588 } 1589 }
1589 1590
1590 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1591 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
@@ -1771,20 +1772,21 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1771 1772
1772 switch (cmd) { 1773 switch (cmd) {
1773 case SET_KEY: 1774 case SET_KEY:
1774 ret = ath9k_cmn_key_config(common, vif, sta, key); 1775 ret = ath_key_config(common, vif, sta, key);
1775 if (ret >= 0) { 1776 if (ret >= 0) {
1776 key->hw_key_idx = ret; 1777 key->hw_key_idx = ret;
1777 /* push IV and Michael MIC generation to stack */ 1778 /* push IV and Michael MIC generation to stack */
1778 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1779 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1779 if (key->alg == ALG_TKIP) 1780 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
1780 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1781 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1781 if (sc->sc_ah->sw_mgmt_crypto && key->alg == ALG_CCMP) 1782 if (sc->sc_ah->sw_mgmt_crypto &&
1783 key->cipher == WLAN_CIPHER_SUITE_CCMP)
1782 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; 1784 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
1783 ret = 0; 1785 ret = 0;
1784 } 1786 }
1785 break; 1787 break;
1786 case DISABLE_KEY: 1788 case DISABLE_KEY:
1787 ath9k_cmn_key_delete(common, key); 1789 ath_key_delete(common, key);
1788 break; 1790 break;
1789 default: 1791 default:
1790 ret = -EINVAL; 1792 ret = -EINVAL;
@@ -1968,8 +1970,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1968 break; 1970 break;
1969 case IEEE80211_AMPDU_TX_START: 1971 case IEEE80211_AMPDU_TX_START:
1970 ath9k_ps_wakeup(sc); 1972 ath9k_ps_wakeup(sc);
1971 ath_tx_aggr_start(sc, sta, tid, ssn); 1973 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
1972 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1974 if (!ret)
1975 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1973 ath9k_ps_restore(sc); 1976 ath9k_ps_restore(sc);
1974 break; 1977 break;
1975 case IEEE80211_AMPDU_TX_STOP: 1978 case IEEE80211_AMPDU_TX_STOP:
@@ -2032,7 +2035,6 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
2032 2035
2033 aphy->state = ATH_WIPHY_SCAN; 2036 aphy->state = ATH_WIPHY_SCAN;
2034 ath9k_wiphy_pause_all_forced(sc, aphy); 2037 ath9k_wiphy_pause_all_forced(sc, aphy);
2035 sc->sc_flags |= SC_OP_SCANNING;
2036 mutex_unlock(&sc->mutex); 2038 mutex_unlock(&sc->mutex);
2037} 2039}
2038 2040
@@ -2047,7 +2049,6 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2047 2049
2048 mutex_lock(&sc->mutex); 2050 mutex_lock(&sc->mutex);
2049 aphy->state = ATH_WIPHY_ACTIVE; 2051 aphy->state = ATH_WIPHY_ACTIVE;
2050 sc->sc_flags &= ~SC_OP_SCANNING;
2051 mutex_unlock(&sc->mutex); 2052 mutex_unlock(&sc->mutex);
2052} 2053}
2053 2054
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index e724c2c1ae2a..17969af842f6 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -45,9 +45,6 @@
45 } \ 45 } \
46 } while (0) 46 } while (0)
47 47
48#define ATH9K_IS_MIC_ENABLED(ah) \
49 ((ah)->sta_id1_defaults & AR_STA_ID1_CRPT_MIC_ENABLE)
50
51#define ANTSWAP_AB 0x0001 48#define ANTSWAP_AB 0x0001
52#define REDUCE_CHAIN_0 0x00000050 49#define REDUCE_CHAIN_0 0x00000050
53#define REDUCE_CHAIN_1 0x00000051 50#define REDUCE_CHAIN_1 0x00000051
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index e49be733d546..ce1cd6d85847 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1320,6 +1320,22 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
1320 return caps; 1320 return caps;
1321} 1321}
1322 1322
1323static bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an,
1324 u8 tidno)
1325{
1326 struct ath_atx_tid *txtid;
1327
1328 if (!(sc->sc_flags & SC_OP_TXAGGR))
1329 return false;
1330
1331 txtid = ATH_AN_2_TID(an, tidno);
1332
1333 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
1334 return true;
1335 return false;
1336}
1337
1338
1323/***********************************/ 1339/***********************************/
1324/* mac80211 Rate Control callbacks */ 1340/* mac80211 Rate Control callbacks */
1325/***********************************/ 1341/***********************************/
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index dc1082654501..268072fd3c1c 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -224,7 +224,18 @@ enum ath9k_internal_frame_type {
224 ATH9K_IFT_UNPAUSE 224 ATH9K_IFT_UNPAUSE
225}; 225};
226 226
227#ifdef CONFIG_ATH9K_RATE_CONTROL
227int ath_rate_control_register(void); 228int ath_rate_control_register(void);
228void ath_rate_control_unregister(void); 229void ath_rate_control_unregister(void);
230#else
231static inline int ath_rate_control_register(void)
232{
233 return 0;
234}
235
236static inline void ath_rate_control_unregister(void)
237{
238}
239#endif
229 240
230#endif /* RC_H */ 241#endif /* RC_H */
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a3fc987ebab0..c5e7af4f51ab 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -19,6 +19,15 @@
19 19
20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
21 21
22static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
23 int mindelta, int main_rssi_avg,
24 int alt_rssi_avg, int pkt_count)
25{
26 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
27 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
28 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
29}
30
22static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 31static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
23{ 32{
24 return sc->ps_enabled && 33 return sc->ps_enabled &&
@@ -110,8 +119,7 @@ static void ath_opmode_init(struct ath_softc *sc)
110 ath9k_hw_setrxfilter(ah, rfilt); 119 ath9k_hw_setrxfilter(ah, rfilt);
111 120
112 /* configure bssid mask */ 121 /* configure bssid mask */
113 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 122 ath_hw_setbssidmask(common);
114 ath_hw_setbssidmask(common);
115 123
116 /* configure operational mode */ 124 /* configure operational mode */
117 ath9k_hw_setopmode(ah); 125 ath9k_hw_setopmode(ah);
@@ -292,7 +300,7 @@ static void ath_edma_start_recv(struct ath_softc *sc)
292 300
293 ath_opmode_init(sc); 301 ath_opmode_init(sc);
294 302
295 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING)); 303 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
296} 304}
297 305
298static void ath_edma_stop_recv(struct ath_softc *sc) 306static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -440,6 +448,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
440 rfilt |= ATH9K_RX_FILTER_CONTROL; 448 rfilt |= ATH9K_RX_FILTER_CONTROL;
441 449
442 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 450 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
451 (sc->nvifs <= 1) &&
443 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 452 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
444 rfilt |= ATH9K_RX_FILTER_MYBEACON; 453 rfilt |= ATH9K_RX_FILTER_MYBEACON;
445 else 454 else
@@ -454,9 +463,8 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
454 if (conf_is_ht(&sc->hw->conf)) 463 if (conf_is_ht(&sc->hw->conf))
455 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 464 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
456 465
457 if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 466 if (sc->sec_wiphy || (sc->nvifs > 1) ||
458 /* TODO: only needed if more than one BSSID is in use in 467 (sc->rx.rxfilter & FIF_OTHER_BSS)) {
459 * station/adhoc mode */
460 /* The following may also be needed for other older chips */ 468 /* The following may also be needed for other older chips */
461 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 469 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
462 rfilt |= ATH9K_RX_FILTER_PROM; 470 rfilt |= ATH9K_RX_FILTER_PROM;
@@ -498,7 +506,7 @@ int ath_startrecv(struct ath_softc *sc)
498start_recv: 506start_recv:
499 spin_unlock_bh(&sc->rx.rxbuflock); 507 spin_unlock_bh(&sc->rx.rxbuflock);
500 ath_opmode_init(sc); 508 ath_opmode_init(sc);
501 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING)); 509 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
502 510
503 return 0; 511 return 0;
504} 512}
@@ -631,7 +639,7 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
631 * No more broadcast/multicast frames to be received at this 639 * No more broadcast/multicast frames to be received at this
632 * point. 640 * point.
633 */ 641 */
634 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 642 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
635 ath_print(common, ATH_DBG_PS, 643 ath_print(common, ATH_DBG_PS,
636 "All PS CAB frames received, back to sleep\n"); 644 "All PS CAB frames received, back to sleep\n");
637 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 645 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
@@ -870,15 +878,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
870 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { 878 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
871 *decrypt_error = true; 879 *decrypt_error = true;
872 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { 880 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
873 if (ieee80211_is_ctl(fc)) 881 /*
874 /* 882 * The MIC error bit is only valid if the frame
875 * Sometimes, we get invalid 883 * is not a control frame or fragment, and it was
876 * MIC failures on valid control frames. 884 * decrypted using a valid TKIP key.
877 * Remove these mic errors. 885 */
878 */ 886 if (!ieee80211_is_ctl(fc) &&
879 rx_stats->rs_status &= ~ATH9K_RXERR_MIC; 887 !ieee80211_has_morefrags(fc) &&
880 else 888 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
889 test_bit(rx_stats->rs_keyix, common->tkip_keymap))
881 rxs->flag |= RX_FLAG_MMIC_ERROR; 890 rxs->flag |= RX_FLAG_MMIC_ERROR;
891 else
892 rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
882 } 893 }
883 /* 894 /*
884 * Reject error frames with the exception of 895 * Reject error frames with the exception of
@@ -1073,6 +1084,539 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
1073 rxs->flag &= ~RX_FLAG_DECRYPTED; 1084 rxs->flag &= ~RX_FLAG_DECRYPTED;
1074} 1085}
1075 1086
1087static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1088 struct ath_hw_antcomb_conf ant_conf,
1089 int main_rssi_avg)
1090{
1091 antcomb->quick_scan_cnt = 0;
1092
1093 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1094 antcomb->rssi_lna2 = main_rssi_avg;
1095 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1096 antcomb->rssi_lna1 = main_rssi_avg;
1097
1098 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1099 case (0x10): /* LNA2 A-B */
1100 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1101 antcomb->first_quick_scan_conf =
1102 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1103 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1104 break;
1105 case (0x20): /* LNA1 A-B */
1106 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1107 antcomb->first_quick_scan_conf =
1108 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1109 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1110 break;
1111 case (0x21): /* LNA1 LNA2 */
1112 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1113 antcomb->first_quick_scan_conf =
1114 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1115 antcomb->second_quick_scan_conf =
1116 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1117 break;
1118 case (0x12): /* LNA2 LNA1 */
1119 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1120 antcomb->first_quick_scan_conf =
1121 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1122 antcomb->second_quick_scan_conf =
1123 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1124 break;
1125 case (0x13): /* LNA2 A+B */
1126 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1127 antcomb->first_quick_scan_conf =
1128 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1129 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1130 break;
1131 case (0x23): /* LNA1 A+B */
1132 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1133 antcomb->first_quick_scan_conf =
1134 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1135 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1136 break;
1137 default:
1138 break;
1139 }
1140}
1141
1142static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1143 struct ath_hw_antcomb_conf *div_ant_conf,
1144 int main_rssi_avg, int alt_rssi_avg,
1145 int alt_ratio)
1146{
1147 /* alt_good */
1148 switch (antcomb->quick_scan_cnt) {
1149 case 0:
1150 /* set alt to main, and alt to first conf */
1151 div_ant_conf->main_lna_conf = antcomb->main_conf;
1152 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1153 break;
1154 case 1:
1155 /* set alt to main, and alt to first conf */
1156 div_ant_conf->main_lna_conf = antcomb->main_conf;
1157 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1158 antcomb->rssi_first = main_rssi_avg;
1159 antcomb->rssi_second = alt_rssi_avg;
1160
1161 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1162 /* main is LNA1 */
1163 if (ath_is_alt_ant_ratio_better(alt_ratio,
1164 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1165 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1166 main_rssi_avg, alt_rssi_avg,
1167 antcomb->total_pkt_count))
1168 antcomb->first_ratio = true;
1169 else
1170 antcomb->first_ratio = false;
1171 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1172 if (ath_is_alt_ant_ratio_better(alt_ratio,
1173 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1174 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1175 main_rssi_avg, alt_rssi_avg,
1176 antcomb->total_pkt_count))
1177 antcomb->first_ratio = true;
1178 else
1179 antcomb->first_ratio = false;
1180 } else {
1181 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1182 (alt_rssi_avg > main_rssi_avg +
1183 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1184 (alt_rssi_avg > main_rssi_avg)) &&
1185 (antcomb->total_pkt_count > 50))
1186 antcomb->first_ratio = true;
1187 else
1188 antcomb->first_ratio = false;
1189 }
1190 break;
1191 case 2:
1192 antcomb->alt_good = false;
1193 antcomb->scan_not_start = false;
1194 antcomb->scan = false;
1195 antcomb->rssi_first = main_rssi_avg;
1196 antcomb->rssi_third = alt_rssi_avg;
1197
1198 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1199 antcomb->rssi_lna1 = alt_rssi_avg;
1200 else if (antcomb->second_quick_scan_conf ==
1201 ATH_ANT_DIV_COMB_LNA2)
1202 antcomb->rssi_lna2 = alt_rssi_avg;
1203 else if (antcomb->second_quick_scan_conf ==
1204 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1205 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1206 antcomb->rssi_lna2 = main_rssi_avg;
1207 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1208 antcomb->rssi_lna1 = main_rssi_avg;
1209 }
1210
1211 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1212 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1213 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1214 else
1215 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1216
1217 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1218 if (ath_is_alt_ant_ratio_better(alt_ratio,
1219 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1220 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1221 main_rssi_avg, alt_rssi_avg,
1222 antcomb->total_pkt_count))
1223 antcomb->second_ratio = true;
1224 else
1225 antcomb->second_ratio = false;
1226 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1227 if (ath_is_alt_ant_ratio_better(alt_ratio,
1228 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1229 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1230 main_rssi_avg, alt_rssi_avg,
1231 antcomb->total_pkt_count))
1232 antcomb->second_ratio = true;
1233 else
1234 antcomb->second_ratio = false;
1235 } else {
1236 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1237 (alt_rssi_avg > main_rssi_avg +
1238 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1239 (alt_rssi_avg > main_rssi_avg)) &&
1240 (antcomb->total_pkt_count > 50))
1241 antcomb->second_ratio = true;
1242 else
1243 antcomb->second_ratio = false;
1244 }
1245
1246 /* set alt to the conf with maximun ratio */
1247 if (antcomb->first_ratio && antcomb->second_ratio) {
1248 if (antcomb->rssi_second > antcomb->rssi_third) {
1249 /* first alt*/
1250 if ((antcomb->first_quick_scan_conf ==
1251 ATH_ANT_DIV_COMB_LNA1) ||
1252 (antcomb->first_quick_scan_conf ==
1253 ATH_ANT_DIV_COMB_LNA2))
1254 /* Set alt LNA1 or LNA2*/
1255 if (div_ant_conf->main_lna_conf ==
1256 ATH_ANT_DIV_COMB_LNA2)
1257 div_ant_conf->alt_lna_conf =
1258 ATH_ANT_DIV_COMB_LNA1;
1259 else
1260 div_ant_conf->alt_lna_conf =
1261 ATH_ANT_DIV_COMB_LNA2;
1262 else
1263 /* Set alt to A+B or A-B */
1264 div_ant_conf->alt_lna_conf =
1265 antcomb->first_quick_scan_conf;
1266 } else if ((antcomb->second_quick_scan_conf ==
1267 ATH_ANT_DIV_COMB_LNA1) ||
1268 (antcomb->second_quick_scan_conf ==
1269 ATH_ANT_DIV_COMB_LNA2)) {
1270 /* Set alt LNA1 or LNA2 */
1271 if (div_ant_conf->main_lna_conf ==
1272 ATH_ANT_DIV_COMB_LNA2)
1273 div_ant_conf->alt_lna_conf =
1274 ATH_ANT_DIV_COMB_LNA1;
1275 else
1276 div_ant_conf->alt_lna_conf =
1277 ATH_ANT_DIV_COMB_LNA2;
1278 } else {
1279 /* Set alt to A+B or A-B */
1280 div_ant_conf->alt_lna_conf =
1281 antcomb->second_quick_scan_conf;
1282 }
1283 } else if (antcomb->first_ratio) {
1284 /* first alt */
1285 if ((antcomb->first_quick_scan_conf ==
1286 ATH_ANT_DIV_COMB_LNA1) ||
1287 (antcomb->first_quick_scan_conf ==
1288 ATH_ANT_DIV_COMB_LNA2))
1289 /* Set alt LNA1 or LNA2 */
1290 if (div_ant_conf->main_lna_conf ==
1291 ATH_ANT_DIV_COMB_LNA2)
1292 div_ant_conf->alt_lna_conf =
1293 ATH_ANT_DIV_COMB_LNA1;
1294 else
1295 div_ant_conf->alt_lna_conf =
1296 ATH_ANT_DIV_COMB_LNA2;
1297 else
1298 /* Set alt to A+B or A-B */
1299 div_ant_conf->alt_lna_conf =
1300 antcomb->first_quick_scan_conf;
1301 } else if (antcomb->second_ratio) {
1302 /* second alt */
1303 if ((antcomb->second_quick_scan_conf ==
1304 ATH_ANT_DIV_COMB_LNA1) ||
1305 (antcomb->second_quick_scan_conf ==
1306 ATH_ANT_DIV_COMB_LNA2))
1307 /* Set alt LNA1 or LNA2 */
1308 if (div_ant_conf->main_lna_conf ==
1309 ATH_ANT_DIV_COMB_LNA2)
1310 div_ant_conf->alt_lna_conf =
1311 ATH_ANT_DIV_COMB_LNA1;
1312 else
1313 div_ant_conf->alt_lna_conf =
1314 ATH_ANT_DIV_COMB_LNA2;
1315 else
1316 /* Set alt to A+B or A-B */
1317 div_ant_conf->alt_lna_conf =
1318 antcomb->second_quick_scan_conf;
1319 } else {
1320 /* main is largest */
1321 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1322 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1323 /* Set alt LNA1 or LNA2 */
1324 if (div_ant_conf->main_lna_conf ==
1325 ATH_ANT_DIV_COMB_LNA2)
1326 div_ant_conf->alt_lna_conf =
1327 ATH_ANT_DIV_COMB_LNA1;
1328 else
1329 div_ant_conf->alt_lna_conf =
1330 ATH_ANT_DIV_COMB_LNA2;
1331 else
1332 /* Set alt to A+B or A-B */
1333 div_ant_conf->alt_lna_conf = antcomb->main_conf;
1334 }
1335 break;
1336 default:
1337 break;
1338 }
1339}
1340
1341static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf)
1342{
1343 /* Adjust the fast_div_bias based on main and alt lna conf */
1344 switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) {
1345 case (0x01): /* A-B LNA2 */
1346 ant_conf->fast_div_bias = 0x3b;
1347 break;
1348 case (0x02): /* A-B LNA1 */
1349 ant_conf->fast_div_bias = 0x3d;
1350 break;
1351 case (0x03): /* A-B A+B */
1352 ant_conf->fast_div_bias = 0x1;
1353 break;
1354 case (0x10): /* LNA2 A-B */
1355 ant_conf->fast_div_bias = 0x7;
1356 break;
1357 case (0x12): /* LNA2 LNA1 */
1358 ant_conf->fast_div_bias = 0x2;
1359 break;
1360 case (0x13): /* LNA2 A+B */
1361 ant_conf->fast_div_bias = 0x7;
1362 break;
1363 case (0x20): /* LNA1 A-B */
1364 ant_conf->fast_div_bias = 0x6;
1365 break;
1366 case (0x21): /* LNA1 LNA2 */
1367 ant_conf->fast_div_bias = 0x0;
1368 break;
1369 case (0x23): /* LNA1 A+B */
1370 ant_conf->fast_div_bias = 0x6;
1371 break;
1372 case (0x30): /* A+B A-B */
1373 ant_conf->fast_div_bias = 0x1;
1374 break;
1375 case (0x31): /* A+B LNA2 */
1376 ant_conf->fast_div_bias = 0x3b;
1377 break;
1378 case (0x32): /* A+B LNA1 */
1379 ant_conf->fast_div_bias = 0x3d;
1380 break;
1381 default:
1382 break;
1383 }
1384}
1385
1386/* Antenna diversity and combining */
1387static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1388{
1389 struct ath_hw_antcomb_conf div_ant_conf;
1390 struct ath_ant_comb *antcomb = &sc->ant_comb;
1391 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1392 int curr_main_set, curr_bias;
1393 int main_rssi = rs->rs_rssi_ctl0;
1394 int alt_rssi = rs->rs_rssi_ctl1;
1395 int rx_ant_conf, main_ant_conf;
1396 bool short_scan = false;
1397
1398 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1399 ATH_ANT_RX_MASK;
1400 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1401 ATH_ANT_RX_MASK;
1402
1403 /* Record packet only when alt_rssi is positive */
1404 if (alt_rssi > 0) {
1405 antcomb->total_pkt_count++;
1406 antcomb->main_total_rssi += main_rssi;
1407 antcomb->alt_total_rssi += alt_rssi;
1408 if (main_ant_conf == rx_ant_conf)
1409 antcomb->main_recv_cnt++;
1410 else
1411 antcomb->alt_recv_cnt++;
1412 }
1413
1414 /* Short scan check */
1415 if (antcomb->scan && antcomb->alt_good) {
1416 if (time_after(jiffies, antcomb->scan_start_time +
1417 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1418 short_scan = true;
1419 else
1420 if (antcomb->total_pkt_count ==
1421 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1422 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1423 antcomb->total_pkt_count);
1424 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1425 short_scan = true;
1426 }
1427 }
1428
1429 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1430 rs->rs_moreaggr) && !short_scan)
1431 return;
1432
1433 if (antcomb->total_pkt_count) {
1434 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1435 antcomb->total_pkt_count);
1436 main_rssi_avg = (antcomb->main_total_rssi /
1437 antcomb->total_pkt_count);
1438 alt_rssi_avg = (antcomb->alt_total_rssi /
1439 antcomb->total_pkt_count);
1440 }
1441
1442
1443 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1444 curr_alt_set = div_ant_conf.alt_lna_conf;
1445 curr_main_set = div_ant_conf.main_lna_conf;
1446 curr_bias = div_ant_conf.fast_div_bias;
1447
1448 antcomb->count++;
1449
1450 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1451 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1452 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1453 main_rssi_avg);
1454 antcomb->alt_good = true;
1455 } else {
1456 antcomb->alt_good = false;
1457 }
1458
1459 antcomb->count = 0;
1460 antcomb->scan = true;
1461 antcomb->scan_not_start = true;
1462 }
1463
1464 if (!antcomb->scan) {
1465 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1466 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1467 /* Switch main and alt LNA */
1468 div_ant_conf.main_lna_conf =
1469 ATH_ANT_DIV_COMB_LNA2;
1470 div_ant_conf.alt_lna_conf =
1471 ATH_ANT_DIV_COMB_LNA1;
1472 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1473 div_ant_conf.main_lna_conf =
1474 ATH_ANT_DIV_COMB_LNA1;
1475 div_ant_conf.alt_lna_conf =
1476 ATH_ANT_DIV_COMB_LNA2;
1477 }
1478
1479 goto div_comb_done;
1480 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1481 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1482 /* Set alt to another LNA */
1483 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1484 div_ant_conf.alt_lna_conf =
1485 ATH_ANT_DIV_COMB_LNA1;
1486 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1487 div_ant_conf.alt_lna_conf =
1488 ATH_ANT_DIV_COMB_LNA2;
1489
1490 goto div_comb_done;
1491 }
1492
1493 if ((alt_rssi_avg < (main_rssi_avg +
1494 ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA)))
1495 goto div_comb_done;
1496 }
1497
1498 if (!antcomb->scan_not_start) {
1499 switch (curr_alt_set) {
1500 case ATH_ANT_DIV_COMB_LNA2:
1501 antcomb->rssi_lna2 = alt_rssi_avg;
1502 antcomb->rssi_lna1 = main_rssi_avg;
1503 antcomb->scan = true;
1504 /* set to A+B */
1505 div_ant_conf.main_lna_conf =
1506 ATH_ANT_DIV_COMB_LNA1;
1507 div_ant_conf.alt_lna_conf =
1508 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1509 break;
1510 case ATH_ANT_DIV_COMB_LNA1:
1511 antcomb->rssi_lna1 = alt_rssi_avg;
1512 antcomb->rssi_lna2 = main_rssi_avg;
1513 antcomb->scan = true;
1514 /* set to A+B */
1515 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1516 div_ant_conf.alt_lna_conf =
1517 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1518 break;
1519 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1520 antcomb->rssi_add = alt_rssi_avg;
1521 antcomb->scan = true;
1522 /* set to A-B */
1523 div_ant_conf.alt_lna_conf =
1524 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1525 break;
1526 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1527 antcomb->rssi_sub = alt_rssi_avg;
1528 antcomb->scan = false;
1529 if (antcomb->rssi_lna2 >
1530 (antcomb->rssi_lna1 +
1531 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1532 /* use LNA2 as main LNA */
1533 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1534 (antcomb->rssi_add > antcomb->rssi_sub)) {
1535 /* set to A+B */
1536 div_ant_conf.main_lna_conf =
1537 ATH_ANT_DIV_COMB_LNA2;
1538 div_ant_conf.alt_lna_conf =
1539 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1540 } else if (antcomb->rssi_sub >
1541 antcomb->rssi_lna1) {
1542 /* set to A-B */
1543 div_ant_conf.main_lna_conf =
1544 ATH_ANT_DIV_COMB_LNA2;
1545 div_ant_conf.alt_lna_conf =
1546 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1547 } else {
1548 /* set to LNA1 */
1549 div_ant_conf.main_lna_conf =
1550 ATH_ANT_DIV_COMB_LNA2;
1551 div_ant_conf.alt_lna_conf =
1552 ATH_ANT_DIV_COMB_LNA1;
1553 }
1554 } else {
1555 /* use LNA1 as main LNA */
1556 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1557 (antcomb->rssi_add > antcomb->rssi_sub)) {
1558 /* set to A+B */
1559 div_ant_conf.main_lna_conf =
1560 ATH_ANT_DIV_COMB_LNA1;
1561 div_ant_conf.alt_lna_conf =
1562 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1563 } else if (antcomb->rssi_sub >
1564 antcomb->rssi_lna1) {
1565 /* set to A-B */
1566 div_ant_conf.main_lna_conf =
1567 ATH_ANT_DIV_COMB_LNA1;
1568 div_ant_conf.alt_lna_conf =
1569 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1570 } else {
1571 /* set to LNA2 */
1572 div_ant_conf.main_lna_conf =
1573 ATH_ANT_DIV_COMB_LNA1;
1574 div_ant_conf.alt_lna_conf =
1575 ATH_ANT_DIV_COMB_LNA2;
1576 }
1577 }
1578 break;
1579 default:
1580 break;
1581 }
1582 } else {
1583 if (!antcomb->alt_good) {
1584 antcomb->scan_not_start = false;
1585 /* Set alt to another LNA */
1586 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1587 div_ant_conf.main_lna_conf =
1588 ATH_ANT_DIV_COMB_LNA2;
1589 div_ant_conf.alt_lna_conf =
1590 ATH_ANT_DIV_COMB_LNA1;
1591 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1592 div_ant_conf.main_lna_conf =
1593 ATH_ANT_DIV_COMB_LNA1;
1594 div_ant_conf.alt_lna_conf =
1595 ATH_ANT_DIV_COMB_LNA2;
1596 }
1597 goto div_comb_done;
1598 }
1599 }
1600
1601 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1602 main_rssi_avg, alt_rssi_avg,
1603 alt_ratio);
1604
1605 antcomb->quick_scan_cnt++;
1606
1607div_comb_done:
1608 ath_ant_div_conf_fast_divbias(&div_ant_conf);
1609
1610 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1611
1612 antcomb->scan_start_time = jiffies;
1613 antcomb->total_pkt_count = 0;
1614 antcomb->main_total_rssi = 0;
1615 antcomb->alt_total_rssi = 0;
1616 antcomb->main_recv_cnt = 0;
1617 antcomb->alt_recv_cnt = 0;
1618}
1619
1076int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1620int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1077{ 1621{
1078 struct ath_buf *bf; 1622 struct ath_buf *bf;
@@ -1096,6 +1640,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1096 u8 rx_status_len = ah->caps.rx_status_len; 1640 u8 rx_status_len = ah->caps.rx_status_len;
1097 u64 tsf = 0; 1641 u64 tsf = 0;
1098 u32 tsf_lower = 0; 1642 u32 tsf_lower = 0;
1643 unsigned long flags;
1099 1644
1100 if (edma) 1645 if (edma)
1101 dma_type = DMA_BIDIRECTIONAL; 1646 dma_type = DMA_BIDIRECTIONAL;
@@ -1204,11 +1749,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1204 sc->rx.rxotherant = 0; 1749 sc->rx.rxotherant = 0;
1205 } 1750 }
1206 1751
1752 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1207 if (unlikely(ath9k_check_auto_sleep(sc) || 1753 if (unlikely(ath9k_check_auto_sleep(sc) ||
1208 (sc->ps_flags & (PS_WAIT_FOR_BEACON | 1754 (sc->ps_flags & (PS_WAIT_FOR_BEACON |
1209 PS_WAIT_FOR_CAB | 1755 PS_WAIT_FOR_CAB |
1210 PS_WAIT_FOR_PSPOLL_DATA)))) 1756 PS_WAIT_FOR_PSPOLL_DATA))))
1211 ath_rx_ps(sc, skb); 1757 ath_rx_ps(sc, skb);
1758 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1759
1760 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
1761 ath_ant_comb_scan(sc, &rs);
1212 1762
1213 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1763 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
1214 1764
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index fd20241f57d8..ec7cf5ee56bc 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -19,45 +19,36 @@
19#include "ath9k.h" 19#include "ath9k.h"
20 20
21struct ath9k_vif_iter_data { 21struct ath9k_vif_iter_data {
22 int count; 22 const u8 *hw_macaddr;
23 u8 *addr; 23 u8 mask[ETH_ALEN];
24}; 24};
25 25
26static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 26static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
27{ 27{
28 struct ath9k_vif_iter_data *iter_data = data; 28 struct ath9k_vif_iter_data *iter_data = data;
29 u8 *nbuf; 29 int i;
30
31 nbuf = krealloc(iter_data->addr, (iter_data->count + 1) * ETH_ALEN,
32 GFP_ATOMIC);
33 if (nbuf == NULL)
34 return;
35 30
36 memcpy(nbuf + iter_data->count * ETH_ALEN, mac, ETH_ALEN); 31 for (i = 0; i < ETH_ALEN; i++)
37 iter_data->addr = nbuf; 32 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
38 iter_data->count++;
39} 33}
40 34
41void ath9k_set_bssid_mask(struct ieee80211_hw *hw) 35void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
42{ 36{
43 struct ath_wiphy *aphy = hw->priv; 37 struct ath_wiphy *aphy = hw->priv;
44 struct ath_softc *sc = aphy->sc; 38 struct ath_softc *sc = aphy->sc;
45 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 39 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
46 struct ath9k_vif_iter_data iter_data; 40 struct ath9k_vif_iter_data iter_data;
47 int i, j; 41 int i;
48 u8 mask[ETH_ALEN];
49 42
50 /* 43 /*
51 * Add primary MAC address even if it is not in active use since it 44 * Use the hardware MAC address as reference, the hardware uses it
52 * will be configured to the hardware as the starting point and the 45 * together with the BSSID mask when matching addresses.
53 * BSSID mask will need to be changed if another address is active.
54 */ 46 */
55 iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC); 47 iter_data.hw_macaddr = common->macaddr;
56 if (iter_data.addr) { 48 memset(&iter_data.mask, 0xff, ETH_ALEN);
57 memcpy(iter_data.addr, common->macaddr, ETH_ALEN); 49
58 iter_data.count = 1; 50 if (vif)
59 } else 51 ath9k_vif_iter(&iter_data, vif->addr, vif);
60 iter_data.count = 0;
61 52
62 /* Get list of all active MAC addresses */ 53 /* Get list of all active MAC addresses */
63 spin_lock_bh(&sc->wiphy_lock); 54 spin_lock_bh(&sc->wiphy_lock);
@@ -71,31 +62,7 @@ void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
71 } 62 }
72 spin_unlock_bh(&sc->wiphy_lock); 63 spin_unlock_bh(&sc->wiphy_lock);
73 64
74 /* Generate an address mask to cover all active addresses */ 65 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
75 memset(mask, 0, ETH_ALEN);
76 for (i = 0; i < iter_data.count; i++) {
77 u8 *a1 = iter_data.addr + i * ETH_ALEN;
78 for (j = i + 1; j < iter_data.count; j++) {
79 u8 *a2 = iter_data.addr + j * ETH_ALEN;
80 mask[0] |= a1[0] ^ a2[0];
81 mask[1] |= a1[1] ^ a2[1];
82 mask[2] |= a1[2] ^ a2[2];
83 mask[3] |= a1[3] ^ a2[3];
84 mask[4] |= a1[4] ^ a2[4];
85 mask[5] |= a1[5] ^ a2[5];
86 }
87 }
88
89 kfree(iter_data.addr);
90
91 /* Invert the mask and configure hardware */
92 common->bssidmask[0] = ~mask[0];
93 common->bssidmask[1] = ~mask[1];
94 common->bssidmask[2] = ~mask[2];
95 common->bssidmask[3] = ~mask[3];
96 common->bssidmask[4] = ~mask[4];
97 common->bssidmask[5] = ~mask[5];
98
99 ath_hw_setbssidmask(common); 66 ath_hw_setbssidmask(common);
100} 67}
101 68
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 6260faa658a2..93a8bda09c25 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -85,6 +85,8 @@ static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
85 return "WMI_TGT_DETACH_CMDID"; 85 return "WMI_TGT_DETACH_CMDID";
86 case WMI_TGT_TXQ_ENABLE_CMDID: 86 case WMI_TGT_TXQ_ENABLE_CMDID:
87 return "WMI_TGT_TXQ_ENABLE_CMDID"; 87 return "WMI_TGT_TXQ_ENABLE_CMDID";
88 case WMI_AGGR_LIMIT_CMD:
89 return "WMI_AGGR_LIMIT_CMD";
88 } 90 }
89 91
90 return "Bogus"; 92 return "Bogus";
@@ -122,55 +124,11 @@ void ath9k_wmi_tasklet(unsigned long data)
122{ 124{
123 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; 125 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
124 struct ath_common *common = ath9k_hw_common(priv->ah); 126 struct ath_common *common = ath9k_hw_common(priv->ah);
125 struct wmi_cmd_hdr *hdr;
126 struct wmi_swba *swba_hdr;
127 enum wmi_event_id event;
128 struct sk_buff *skb;
129 void *wmi_event;
130 unsigned long flags;
131#ifdef CONFIG_ATH9K_HTC_DEBUGFS
132 __be32 txrate;
133#endif
134 127
135 spin_lock_irqsave(&priv->wmi->wmi_lock, flags); 128 ath_print(common, ATH_DBG_WMI, "SWBA Event received\n");
136 skb = priv->wmi->wmi_skb;
137 spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
138 129
139 hdr = (struct wmi_cmd_hdr *) skb->data; 130 ath9k_htc_swba(priv, priv->wmi->beacon_pending);
140 event = be16_to_cpu(hdr->command_id);
141 wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
142 131
143 ath_print(common, ATH_DBG_WMI,
144 "WMI Event: 0x%x\n", event);
145
146 switch (event) {
147 case WMI_TGT_RDY_EVENTID:
148 break;
149 case WMI_SWBA_EVENTID:
150 swba_hdr = (struct wmi_swba *) wmi_event;
151 ath9k_htc_swba(priv, swba_hdr->beacon_pending);
152 break;
153 case WMI_FATAL_EVENTID:
154 break;
155 case WMI_TXTO_EVENTID:
156 break;
157 case WMI_BMISS_EVENTID:
158 break;
159 case WMI_WLAN_TXCOMP_EVENTID:
160 break;
161 case WMI_DELBA_EVENTID:
162 break;
163 case WMI_TXRATE_EVENTID:
164#ifdef CONFIG_ATH9K_HTC_DEBUGFS
165 txrate = ((struct wmi_event_txrate *)wmi_event)->txrate;
166 priv->debug.txrate = be32_to_cpu(txrate);
167#endif
168 break;
169 default:
170 break;
171 }
172
173 kfree_skb(skb);
174} 132}
175 133
176static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb) 134static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
@@ -189,6 +147,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
189 struct wmi *wmi = (struct wmi *) priv; 147 struct wmi *wmi = (struct wmi *) priv;
190 struct wmi_cmd_hdr *hdr; 148 struct wmi_cmd_hdr *hdr;
191 u16 cmd_id; 149 u16 cmd_id;
150 void *wmi_event;
151#ifdef CONFIG_ATH9K_HTC_DEBUGFS
152 __be32 txrate;
153#endif
192 154
193 if (unlikely(wmi->stopped)) 155 if (unlikely(wmi->stopped))
194 goto free_skb; 156 goto free_skb;
@@ -197,10 +159,22 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
197 cmd_id = be16_to_cpu(hdr->command_id); 159 cmd_id = be16_to_cpu(hdr->command_id);
198 160
199 if (cmd_id & 0x1000) { 161 if (cmd_id & 0x1000) {
200 spin_lock(&wmi->wmi_lock); 162 wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
201 wmi->wmi_skb = skb; 163 switch (cmd_id) {
202 spin_unlock(&wmi->wmi_lock); 164 case WMI_SWBA_EVENTID:
203 tasklet_schedule(&wmi->drv_priv->wmi_tasklet); 165 wmi->beacon_pending = *(u8 *)wmi_event;
166 tasklet_schedule(&wmi->drv_priv->wmi_tasklet);
167 break;
168 case WMI_TXRATE_EVENTID:
169#ifdef CONFIG_ATH9K_HTC_DEBUGFS
170 txrate = ((struct wmi_event_txrate *)wmi_event)->txrate;
171 wmi->drv_priv->debug.txrate = be32_to_cpu(txrate);
172#endif
173 break;
174 default:
175 break;
176 }
177 kfree_skb(skb);
204 return; 178 return;
205 } 179 }
206 180
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index 765db5faa2d3..ac61074af8ac 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -31,10 +31,6 @@ struct wmi_cmd_hdr {
31 __be16 seq_no; 31 __be16 seq_no;
32} __packed; 32} __packed;
33 33
34struct wmi_swba {
35 u8 beacon_pending;
36} __packed;
37
38enum wmi_cmd_id { 34enum wmi_cmd_id {
39 WMI_ECHO_CMDID = 0x0001, 35 WMI_ECHO_CMDID = 0x0001,
40 WMI_ACCESS_MEMORY_CMDID, 36 WMI_ACCESS_MEMORY_CMDID,
@@ -71,6 +67,7 @@ enum wmi_cmd_id {
71 WMI_TX_AGGR_ENABLE_CMDID, 67 WMI_TX_AGGR_ENABLE_CMDID,
72 WMI_TGT_DETACH_CMDID, 68 WMI_TGT_DETACH_CMDID,
73 WMI_TGT_TXQ_ENABLE_CMDID, 69 WMI_TGT_TXQ_ENABLE_CMDID,
70 WMI_AGGR_LIMIT_CMD = 0x0026,
74}; 71};
75 72
76enum wmi_event_id { 73enum wmi_event_id {
@@ -103,7 +100,7 @@ struct wmi {
103 u32 cmd_rsp_len; 100 u32 cmd_rsp_len;
104 bool stopped; 101 bool stopped;
105 102
106 struct sk_buff *wmi_skb; 103 u8 beacon_pending;
107 spinlock_t wmi_lock; 104 spinlock_t wmi_lock;
108 105
109 atomic_t mwrite_cnt; 106 atomic_t mwrite_cnt;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 4dda14e36227..85a7323a04ef 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -61,6 +61,8 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
61 struct ath_tx_status *ts, int txok); 61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
63 int nbad, int txok, bool update_rc); 63 int nbad, int txok, bool update_rc);
64static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno);
64 66
65enum { 67enum {
66 MCS_HT20, 68 MCS_HT20,
@@ -143,18 +145,23 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
143 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
144 struct ath_buf *bf; 146 struct ath_buf *bf;
145 struct list_head bf_head; 147 struct list_head bf_head;
146 INIT_LIST_HEAD(&bf_head); 148 struct ath_tx_status ts;
147 149
148 WARN_ON(!tid->paused); 150 INIT_LIST_HEAD(&bf_head);
149 151
152 memset(&ts, 0, sizeof(ts));
150 spin_lock_bh(&txq->axq_lock); 153 spin_lock_bh(&txq->axq_lock);
151 tid->paused = false;
152 154
153 while (!list_empty(&tid->buf_q)) { 155 while (!list_empty(&tid->buf_q)) {
154 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
155 BUG_ON(bf_isretried(bf));
156 list_move_tail(&bf->list, &bf_head); 157 list_move_tail(&bf->list, &bf_head);
157 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 158
159 if (bf_isretried(bf)) {
160 ath_tx_update_baw(sc, tid, bf->bf_seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
164 }
158 } 165 }
159 166
160 spin_unlock_bh(&txq->axq_lock); 167 spin_unlock_bh(&txq->axq_lock);
@@ -168,9 +175,9 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
168 index = ATH_BA_INDEX(tid->seq_start, seqno); 175 index = ATH_BA_INDEX(tid->seq_start, seqno);
169 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
170 177
171 tid->tx_buf[cindex] = NULL; 178 __clear_bit(cindex, tid->tx_buf);
172 179
173 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { 180 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
174 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 181 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
175 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 182 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
176 } 183 }
@@ -186,9 +193,7 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 193
187 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
188 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
189 196 __set_bit(cindex, tid->tx_buf);
190 BUG_ON(tid->tx_buf[cindex] != NULL);
191 tid->tx_buf[cindex] = bf;
192 197
193 if (index >= ((tid->baw_tail - tid->baw_head) & 198 if (index >= ((tid->baw_tail - tid->baw_head) &
194 (ATH_TID_MAX_BUFS - 1))) { 199 (ATH_TID_MAX_BUFS - 1))) {
@@ -431,7 +436,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
431 list_move_tail(&bf->list, &bf_head); 436 list_move_tail(&bf->list, &bf_head);
432 } 437 }
433 438
434 if (!txpending) { 439 if (!txpending || (tid->state & AGGR_CLEANUP)) {
435 /* 440 /*
436 * complete the acked-ones/xretried ones; update 441 * complete the acked-ones/xretried ones; update
437 * block-ack window 442 * block-ack window
@@ -510,15 +515,12 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
510 } 515 }
511 516
512 if (tid->state & AGGR_CLEANUP) { 517 if (tid->state & AGGR_CLEANUP) {
518 ath_tx_flush_tid(sc, tid);
519
513 if (tid->baw_head == tid->baw_tail) { 520 if (tid->baw_head == tid->baw_tail) {
514 tid->state &= ~AGGR_ADDBA_COMPLETE; 521 tid->state &= ~AGGR_ADDBA_COMPLETE;
515 tid->state &= ~AGGR_CLEANUP; 522 tid->state &= ~AGGR_CLEANUP;
516
517 /* send buffered frames as singles */
518 ath_tx_flush_tid(sc, tid);
519 } 523 }
520 rcu_read_unlock();
521 return;
522 } 524 }
523 525
524 rcu_read_unlock(); 526 rcu_read_unlock();
@@ -785,17 +787,23 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
785 status != ATH_AGGR_BAW_CLOSED); 787 status != ATH_AGGR_BAW_CLOSED);
786} 788}
787 789
788void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 790int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
789 u16 tid, u16 *ssn) 791 u16 tid, u16 *ssn)
790{ 792{
791 struct ath_atx_tid *txtid; 793 struct ath_atx_tid *txtid;
792 struct ath_node *an; 794 struct ath_node *an;
793 795
794 an = (struct ath_node *)sta->drv_priv; 796 an = (struct ath_node *)sta->drv_priv;
795 txtid = ATH_AN_2_TID(an, tid); 797 txtid = ATH_AN_2_TID(an, tid);
798
799 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
800 return -EAGAIN;
801
796 txtid->state |= AGGR_ADDBA_PROGRESS; 802 txtid->state |= AGGR_ADDBA_PROGRESS;
797 txtid->paused = true; 803 txtid->paused = true;
798 *ssn = txtid->seq_start; 804 *ssn = txtid->seq_start;
805
806 return 0;
799} 807}
800 808
801void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 809void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
@@ -803,12 +811,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
803 struct ath_node *an = (struct ath_node *)sta->drv_priv; 811 struct ath_node *an = (struct ath_node *)sta->drv_priv;
804 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 812 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
805 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 813 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
806 struct ath_tx_status ts;
807 struct ath_buf *bf;
808 struct list_head bf_head;
809
810 memset(&ts, 0, sizeof(ts));
811 INIT_LIST_HEAD(&bf_head);
812 814
813 if (txtid->state & AGGR_CLEANUP) 815 if (txtid->state & AGGR_CLEANUP)
814 return; 816 return;
@@ -818,31 +820,22 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
818 return; 820 return;
819 } 821 }
820 822
821 /* drop all software retried frames and mark this TID */
822 spin_lock_bh(&txq->axq_lock); 823 spin_lock_bh(&txq->axq_lock);
823 txtid->paused = true; 824 txtid->paused = true;
824 while (!list_empty(&txtid->buf_q)) {
825 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
826 if (!bf_isretried(bf)) {
827 /*
828 * NB: it's based on the assumption that
829 * software retried frame will always stay
830 * at the head of software queue.
831 */
832 break;
833 }
834 list_move_tail(&bf->list, &bf_head);
835 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
836 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
837 }
838 spin_unlock_bh(&txq->axq_lock);
839 825
840 if (txtid->baw_head != txtid->baw_tail) { 826 /*
827 * If frames are still being transmitted for this TID, they will be
828 * cleaned up during tx completion. To prevent race conditions, this
829 * TID can only be reused after all in-progress subframes have been
830 * completed.
831 */
832 if (txtid->baw_head != txtid->baw_tail)
841 txtid->state |= AGGR_CLEANUP; 833 txtid->state |= AGGR_CLEANUP;
842 } else { 834 else
843 txtid->state &= ~AGGR_ADDBA_COMPLETE; 835 txtid->state &= ~AGGR_ADDBA_COMPLETE;
844 ath_tx_flush_tid(sc, txtid); 836 spin_unlock_bh(&txq->axq_lock);
845 } 837
838 ath_tx_flush_tid(sc, txtid);
846} 839}
847 840
848void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 841void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
@@ -862,20 +855,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
862 } 855 }
863} 856}
864 857
865bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
866{
867 struct ath_atx_tid *txtid;
868
869 if (!(sc->sc_flags & SC_OP_TXAGGR))
870 return false;
871
872 txtid = ATH_AN_2_TID(an, tidno);
873
874 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
875 return true;
876 return false;
877}
878
879/********************/ 858/********************/
880/* Queue Management */ 859/* Queue Management */
881/********************/ 860/********************/
@@ -1407,22 +1386,6 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1407 return htype; 1386 return htype;
1408} 1387}
1409 1388
1410static int get_hw_crypto_keytype(struct sk_buff *skb)
1411{
1412 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1413
1414 if (tx_info->control.hw_key) {
1415 if (tx_info->control.hw_key->alg == ALG_WEP)
1416 return ATH9K_KEY_TYPE_WEP;
1417 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1418 return ATH9K_KEY_TYPE_TKIP;
1419 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1420 return ATH9K_KEY_TYPE_AES;
1421 }
1422
1423 return ATH9K_KEY_TYPE_CLEAR;
1424}
1425
1426static void assign_aggr_tid_seqno(struct sk_buff *skb, 1389static void assign_aggr_tid_seqno(struct sk_buff *skb,
1427 struct ath_buf *bf) 1390 struct ath_buf *bf)
1428{ 1391{
@@ -1661,7 +1624,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1661 bf->bf_state.bfs_paprd_timestamp = jiffies; 1624 bf->bf_state.bfs_paprd_timestamp = jiffies;
1662 bf->bf_flags = setup_tx_flags(skb, use_ldpc); 1625 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
1663 1626
1664 bf->bf_keytype = get_hw_crypto_keytype(skb); 1627 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1665 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { 1628 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1666 bf->bf_frmlen += tx_info->control.hw_key->icv_len; 1629 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1667 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx; 1630 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
new file mode 100644
index 000000000000..c5d3a3f2e55b
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -0,0 +1,41 @@
1config CARL9170
2 tristate "Linux Community AR9170 802.11n USB support"
3 depends on USB && MAC80211 && EXPERIMENTAL
4 select FW_LOADER
5 select CRC32
6 help
7 This is another driver for the Atheros "otus" 802.11n USB devices.
8
9 This driver provides more features than the original,
10 but it needs a special firmware (carl9170-1.fw) to do that.
11
12 The firmware can be downloaded from our wiki here:
13 http://wireless.kernel.org/en/users/Drivers/carl9170
14
15 If you choose to build a module, it'll be called carl9170.
16
17config CARL9170_LEDS
18 bool "SoftLED Support"
19 depends on CARL9170
20 select MAC80211_LEDS
21 select LEDS_CLASS
22 select NEW_LEDS
23 default y
24 help
25 This option is necessary, if you want your device' LEDs to blink
26
27 Say Y, unless you need the LEDs for firmware debugging.
28
29config CARL9170_DEBUGFS
30 bool "DebugFS Support"
31 depends on CARL9170 && DEBUG_FS && MAC80211_DEBUGFS
32 default n
33 help
34 Export several driver and device internals to user space.
35
36 Say N.
37
38config CARL9170_WPC
39 bool
40 depends on CARL9170 && (INPUT = y || INPUT = CARL9170)
41 default y
diff --git a/drivers/net/wireless/ath/carl9170/Makefile b/drivers/net/wireless/ath/carl9170/Makefile
new file mode 100644
index 000000000000..f64ed76af8ad
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/Makefile
@@ -0,0 +1,4 @@
1carl9170-objs := main.o usb.o cmd.o mac.o phy.o led.o fw.o tx.o rx.o
2carl9170-$(CONFIG_CARL9170_DEBUGFS) += debug.o
3
4obj-$(CONFIG_CARL9170) += carl9170.o
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
new file mode 100644
index 000000000000..d7c5482d74ce
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -0,0 +1,627 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * Driver specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39#ifndef __CARL9170_H
40#define __CARL9170_H
41
42#include <linux/kernel.h>
43#include <linux/firmware.h>
44#include <linux/completion.h>
45#include <linux/spinlock.h>
46#include <net/cfg80211.h>
47#include <net/mac80211.h>
48#include <linux/usb.h>
49#ifdef CONFIG_CARL9170_LEDS
50#include <linux/leds.h>
51#endif /* CONFIG_CARL170_LEDS */
52#ifdef CONFIG_CARL9170_WPC
53#include <linux/input.h>
54#endif /* CONFIG_CARL9170_WPC */
55#include "eeprom.h"
56#include "wlan.h"
57#include "hw.h"
58#include "fwdesc.h"
59#include "fwcmd.h"
60#include "../regd.h"
61
62#ifdef CONFIG_CARL9170_DEBUGFS
63#include "debug.h"
64#endif /* CONFIG_CARL9170_DEBUGFS */
65
66#define CARL9170FW_NAME "carl9170-1.fw"
67
68#define PAYLOAD_MAX (CARL9170_MAX_CMD_LEN / 4 - 1)
69
70enum carl9170_rf_init_mode {
71 CARL9170_RFI_NONE,
72 CARL9170_RFI_WARM,
73 CARL9170_RFI_COLD,
74};
75
76#define CARL9170_MAX_RX_BUFFER_SIZE 8192
77
78enum carl9170_device_state {
79 CARL9170_UNKNOWN_STATE,
80 CARL9170_STOPPED,
81 CARL9170_IDLE,
82 CARL9170_STARTED,
83};
84
85#define CARL9170_NUM_TID 16
86#define WME_BA_BMP_SIZE 64
87#define CARL9170_TX_USER_RATE_TRIES 3
88
89#define WME_AC_BE 2
90#define WME_AC_BK 3
91#define WME_AC_VI 1
92#define WME_AC_VO 0
93
94#define TID_TO_WME_AC(_tid) \
95 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
96 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
97 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
98 WME_AC_VO)
99
100#define SEQ_DIFF(_start, _seq) \
101 (((_start) - (_seq)) & 0x0fff)
102#define SEQ_PREV(_seq) \
103 (((_seq) - 1) & 0x0fff)
104#define SEQ_NEXT(_seq) \
105 (((_seq) + 1) & 0x0fff)
106#define BAW_WITHIN(_start, _bawsz, _seqno) \
107 ((((_seqno) - (_start)) & 0xfff) < (_bawsz))
108
109enum carl9170_tid_state {
110 CARL9170_TID_STATE_INVALID,
111 CARL9170_TID_STATE_KILLED,
112 CARL9170_TID_STATE_SHUTDOWN,
113 CARL9170_TID_STATE_SUSPEND,
114 CARL9170_TID_STATE_PROGRESS,
115 CARL9170_TID_STATE_IDLE,
116 CARL9170_TID_STATE_XMIT,
117};
118
119#define CARL9170_BAW_BITS (2 * WME_BA_BMP_SIZE)
120#define CARL9170_BAW_SIZE (BITS_TO_LONGS(CARL9170_BAW_BITS))
121#define CARL9170_BAW_LEN (DIV_ROUND_UP(CARL9170_BAW_BITS, BITS_PER_BYTE))
122
123struct carl9170_sta_tid {
124 /* must be the first entry! */
125 struct list_head list;
126
127 /* temporary list for RCU unlink procedure */
128 struct list_head tmp_list;
129
130 /* lock for the following data structures */
131 spinlock_t lock;
132
133 unsigned int counter;
134 enum carl9170_tid_state state;
135 u8 tid; /* TID number ( 0 - 15 ) */
136 u16 max; /* max. AMPDU size */
137
138 u16 snx; /* awaiting _next_ frame */
139 u16 hsn; /* highest _queued_ sequence */
140 u16 bsn; /* base of the tx/agg bitmap */
141 unsigned long bitmap[CARL9170_BAW_SIZE];
142
143 /* Preaggregation reorder queue */
144 struct sk_buff_head queue;
145};
146
147#define CARL9170_QUEUE_TIMEOUT 256
148#define CARL9170_BUMP_QUEUE 1000
149#define CARL9170_TX_TIMEOUT 2500
150#define CARL9170_JANITOR_DELAY 128
151#define CARL9170_QUEUE_STUCK_TIMEOUT 5500
152
153#define CARL9170_NUM_TX_AGG_MAX 30
154
155/*
156 * Tradeoff between stability/latency and speed.
157 *
158 * AR9170_TXQ_DEPTH is devised by dividing the amount of available
159 * tx buffers with the size of a full ethernet frame + overhead.
160 *
161 * Naturally: The higher the limit, the faster the device CAN send.
162 * However, even a slight over-commitment at the wrong time and the
163 * hardware is doomed to send all already-queued frames at suboptimal
164 * rates. This in turn leads to an enourmous amount of unsuccessful
165 * retries => Latency goes up, whereas the throughput goes down. CRASH!
166 */
167#define CARL9170_NUM_TX_LIMIT_HARD ((AR9170_TXQ_DEPTH * 3) / 2)
168#define CARL9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH)
169
170struct carl9170_tx_queue_stats {
171 unsigned int count;
172 unsigned int limit;
173 unsigned int len;
174};
175
176struct carl9170_vif {
177 unsigned int id;
178 struct ieee80211_vif *vif;
179};
180
181struct carl9170_vif_info {
182 struct list_head list;
183 bool active;
184 unsigned int id;
185 struct sk_buff *beacon;
186 bool enable_beacon;
187};
188
189#define AR9170_NUM_RX_URBS 16
190#define AR9170_NUM_RX_URBS_MUL 2
191#define AR9170_NUM_TX_URBS 8
192#define AR9170_NUM_RX_URBS_POOL (AR9170_NUM_RX_URBS_MUL * AR9170_NUM_RX_URBS)
193
194enum carl9170_device_features {
195 CARL9170_WPS_BUTTON = BIT(0),
196 CARL9170_ONE_LED = BIT(1),
197};
198
199#ifdef CONFIG_CARL9170_LEDS
200struct ar9170;
201
202struct carl9170_led {
203 struct ar9170 *ar;
204 struct led_classdev l;
205 char name[32];
206 unsigned int toggled;
207 bool last_state;
208 bool registered;
209};
210#endif /* CONFIG_CARL9170_LEDS */
211
212enum carl9170_restart_reasons {
213 CARL9170_RR_NO_REASON = 0,
214 CARL9170_RR_FATAL_FIRMWARE_ERROR,
215 CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS,
216 CARL9170_RR_WATCHDOG,
217 CARL9170_RR_STUCK_TX,
218 CARL9170_RR_SLOW_SYSTEM,
219 CARL9170_RR_COMMAND_TIMEOUT,
220 CARL9170_RR_TOO_MANY_PHY_ERRORS,
221 CARL9170_RR_LOST_RSP,
222 CARL9170_RR_INVALID_RSP,
223 CARL9170_RR_USER_REQUEST,
224
225 __CARL9170_RR_LAST,
226};
227
228enum carl9170_erp_modes {
229 CARL9170_ERP_INVALID,
230 CARL9170_ERP_AUTO,
231 CARL9170_ERP_MAC80211,
232 CARL9170_ERP_OFF,
233 CARL9170_ERP_CTS,
234 CARL9170_ERP_RTS,
235 __CARL9170_ERP_NUM,
236};
237
238struct ar9170 {
239 struct ath_common common;
240 struct ieee80211_hw *hw;
241 struct mutex mutex;
242 enum carl9170_device_state state;
243 spinlock_t state_lock;
244 enum carl9170_restart_reasons last_reason;
245 bool registered;
246
247 /* USB */
248 struct usb_device *udev;
249 struct usb_interface *intf;
250 struct usb_anchor rx_anch;
251 struct usb_anchor rx_work;
252 struct usb_anchor rx_pool;
253 struct usb_anchor tx_wait;
254 struct usb_anchor tx_anch;
255 struct usb_anchor tx_cmd;
256 struct usb_anchor tx_err;
257 struct tasklet_struct usb_tasklet;
258 atomic_t tx_cmd_urbs;
259 atomic_t tx_anch_urbs;
260 atomic_t rx_anch_urbs;
261 atomic_t rx_work_urbs;
262 atomic_t rx_pool_urbs;
263 kernel_ulong_t features;
264
265 /* firmware settings */
266 struct completion fw_load_wait;
267 struct completion fw_boot_wait;
268 struct {
269 const struct carl9170fw_desc_head *desc;
270 const struct firmware *fw;
271 unsigned int offset;
272 unsigned int address;
273 unsigned int cmd_bufs;
274 unsigned int api_version;
275 unsigned int vif_num;
276 unsigned int err_counter;
277 unsigned int bug_counter;
278 u32 beacon_addr;
279 unsigned int beacon_max_len;
280 bool rx_stream;
281 bool tx_stream;
282 unsigned int mem_blocks;
283 unsigned int mem_block_size;
284 unsigned int rx_size;
285 } fw;
286
287 /* reset / stuck frames/queue detection */
288 struct work_struct restart_work;
289 unsigned int restart_counter;
290 unsigned long queue_stop_timeout[__AR9170_NUM_TXQ];
291 unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ];
292 bool needs_full_reset;
293 atomic_t pending_restarts;
294
295 /* interface mode settings */
296 struct list_head vif_list;
297 unsigned long vif_bitmap;
298 unsigned int vifs;
299 struct carl9170_vif vif_priv[AR9170_MAX_VIRTUAL_MAC];
300
301 /* beaconing */
302 spinlock_t beacon_lock;
303 unsigned int global_pretbtt;
304 unsigned int global_beacon_int;
305 struct carl9170_vif_info *beacon_iter;
306 unsigned int beacon_enabled;
307
308 /* cryptographic engine */
309 u64 usedkeys;
310 bool rx_software_decryption;
311 bool disable_offload;
312
313 /* filter settings */
314 u64 cur_mc_hash;
315 u32 cur_filter;
316 unsigned int filter_state;
317 bool sniffer_enabled;
318
319 /* MAC */
320 enum carl9170_erp_modes erp_mode;
321
322 /* PHY */
323 struct ieee80211_channel *channel;
324 int noise[4];
325 unsigned int chan_fail;
326 unsigned int total_chan_fail;
327 u8 heavy_clip;
328 u8 ht_settings;
329
330 /* power calibration data */
331 u8 power_5G_leg[4];
332 u8 power_2G_cck[4];
333 u8 power_2G_ofdm[4];
334 u8 power_5G_ht20[8];
335 u8 power_5G_ht40[8];
336 u8 power_2G_ht20[8];
337 u8 power_2G_ht40[8];
338
339#ifdef CONFIG_CARL9170_LEDS
340 /* LED */
341 struct delayed_work led_work;
342 struct carl9170_led leds[AR9170_NUM_LEDS];
343#endif /* CONFIG_CARL9170_LEDS */
344
345 /* qos queue settings */
346 spinlock_t tx_stats_lock;
347 struct carl9170_tx_queue_stats tx_stats[__AR9170_NUM_TXQ];
348 struct ieee80211_tx_queue_params edcf[5];
349 struct completion tx_flush;
350
351 /* CMD */
352 int cmd_seq;
353 int readlen;
354 u8 *readbuf;
355 spinlock_t cmd_lock;
356 struct completion cmd_wait;
357 union {
358 __le32 cmd_buf[PAYLOAD_MAX + 1];
359 struct carl9170_cmd cmd;
360 struct carl9170_rsp rsp;
361 };
362
363 /* statistics */
364 unsigned int tx_dropped;
365 unsigned int tx_ack_failures;
366 unsigned int tx_fcs_errors;
367 unsigned int tx_ampdu_timeout;
368 unsigned int rx_dropped;
369
370 /* EEPROM */
371 struct ar9170_eeprom eeprom;
372
373 /* tx queuing */
374 struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
375 struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
376 struct delayed_work tx_janitor;
377 unsigned long tx_janitor_last_run;
378 bool tx_schedule;
379
380 /* tx ampdu */
381 struct work_struct ampdu_work;
382 spinlock_t tx_ampdu_list_lock;
383 struct carl9170_sta_tid *tx_ampdu_iter;
384 struct list_head tx_ampdu_list;
385 atomic_t tx_ampdu_upload;
386 atomic_t tx_ampdu_scheduler;
387 atomic_t tx_total_pending;
388 atomic_t tx_total_queued;
389 unsigned int tx_ampdu_list_len;
390 int current_density;
391 int current_factor;
392 bool tx_ampdu_schedule;
393
394 /* internal memory management */
395 spinlock_t mem_lock;
396 unsigned long *mem_bitmap;
397 atomic_t mem_free_blocks;
398 atomic_t mem_allocs;
399
400 /* rxstream mpdu merge */
401 struct ar9170_rx_head rx_plcp;
402 bool rx_has_plcp;
403 struct sk_buff *rx_failover;
404 int rx_failover_missing;
405
406#ifdef CONFIG_CARL9170_WPC
407 struct {
408 bool pbc_state;
409 struct input_dev *pbc;
410 char name[32];
411 char phys[32];
412 } wps;
413#endif /* CONFIG_CARL9170_WPC */
414
415#ifdef CONFIG_CARL9170_DEBUGFS
416 struct carl9170_debug debug;
417 struct dentry *debug_dir;
418#endif /* CONFIG_CARL9170_DEBUGFS */
419
420 /* PSM */
421 struct work_struct ps_work;
422 struct {
423 unsigned int dtim_counter;
424 unsigned long last_beacon;
425 unsigned long last_action;
426 unsigned long last_slept;
427 unsigned int sleep_ms;
428 unsigned int off_override;
429 bool state;
430 } ps;
431};
432
433enum carl9170_ps_off_override_reasons {
434 PS_OFF_VIF = BIT(0),
435 PS_OFF_BCN = BIT(1),
436 PS_OFF_5GHZ = BIT(2),
437};
438
439struct carl9170_ba_stats {
440 u8 ampdu_len;
441 u8 ampdu_ack_len;
442 bool clear;
443};
444
445struct carl9170_sta_info {
446 bool ht_sta;
447 unsigned int ampdu_max_len;
448 struct carl9170_sta_tid *agg[CARL9170_NUM_TID];
449 struct carl9170_ba_stats stats[CARL9170_NUM_TID];
450};
451
452struct carl9170_tx_info {
453 unsigned long timeout;
454 struct ar9170 *ar;
455 struct kref ref;
456};
457
458#define CHK_DEV_STATE(a, s) (((struct ar9170 *)a)->state >= (s))
459#define IS_INITIALIZED(a) (CHK_DEV_STATE(a, CARL9170_STOPPED))
460#define IS_ACCEPTING_CMD(a) (CHK_DEV_STATE(a, CARL9170_IDLE))
461#define IS_STARTED(a) (CHK_DEV_STATE(a, CARL9170_STARTED))
462
463static inline void __carl9170_set_state(struct ar9170 *ar,
464 enum carl9170_device_state newstate)
465{
466 ar->state = newstate;
467}
468
469static inline void carl9170_set_state(struct ar9170 *ar,
470 enum carl9170_device_state newstate)
471{
472 unsigned long flags;
473
474 spin_lock_irqsave(&ar->state_lock, flags);
475 __carl9170_set_state(ar, newstate);
476 spin_unlock_irqrestore(&ar->state_lock, flags);
477}
478
479static inline void carl9170_set_state_when(struct ar9170 *ar,
480 enum carl9170_device_state min, enum carl9170_device_state newstate)
481{
482 unsigned long flags;
483
484 spin_lock_irqsave(&ar->state_lock, flags);
485 if (CHK_DEV_STATE(ar, min))
486 __carl9170_set_state(ar, newstate);
487 spin_unlock_irqrestore(&ar->state_lock, flags);
488}
489
490/* exported interface */
491void *carl9170_alloc(size_t priv_size);
492int carl9170_register(struct ar9170 *ar);
493void carl9170_unregister(struct ar9170 *ar);
494void carl9170_free(struct ar9170 *ar);
495void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r);
496void carl9170_ps_check(struct ar9170 *ar);
497
498/* USB back-end */
499int carl9170_usb_open(struct ar9170 *ar);
500void carl9170_usb_stop(struct ar9170 *ar);
501void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb);
502void carl9170_usb_handle_tx_err(struct ar9170 *ar);
503int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids,
504 u32 plen, void *payload, u32 rlen, void *resp);
505int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
506 const bool free_buf);
507int carl9170_usb_restart(struct ar9170 *ar);
508void carl9170_usb_reset(struct ar9170 *ar);
509
510/* MAC */
511int carl9170_init_mac(struct ar9170 *ar);
512int carl9170_set_qos(struct ar9170 *ar);
513int carl9170_update_multicast(struct ar9170 *ar, const u64 mc_hast);
514int carl9170_mod_virtual_mac(struct ar9170 *ar, const unsigned int id,
515 const u8 *mac);
516int carl9170_set_operating_mode(struct ar9170 *ar);
517int carl9170_set_beacon_timers(struct ar9170 *ar);
518int carl9170_set_dyn_sifs_ack(struct ar9170 *ar);
519int carl9170_set_rts_cts_rate(struct ar9170 *ar);
520int carl9170_set_ampdu_settings(struct ar9170 *ar);
521int carl9170_set_slot_time(struct ar9170 *ar);
522int carl9170_set_mac_rates(struct ar9170 *ar);
523int carl9170_set_hwretry_limit(struct ar9170 *ar, const u32 max_retry);
524int carl9170_update_beacon(struct ar9170 *ar, const bool submit);
525int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
526 const u8 ktype, const u8 keyidx, const u8 *keydata, const int keylen);
527int carl9170_disable_key(struct ar9170 *ar, const u8 id);
528
529/* RX */
530void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
531void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
532
533/* TX */
534int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
535void carl9170_tx_janitor(struct work_struct *work);
536void carl9170_tx_process_status(struct ar9170 *ar,
537 const struct carl9170_rsp *cmd);
538void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
539 const bool success);
540void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb);
541void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb);
542void carl9170_tx_scheduler(struct ar9170 *ar);
543void carl9170_tx_get_skb(struct sk_buff *skb);
544int carl9170_tx_put_skb(struct sk_buff *skb);
545
546/* LEDs */
547#ifdef CONFIG_CARL9170_LEDS
548int carl9170_led_register(struct ar9170 *ar);
549void carl9170_led_unregister(struct ar9170 *ar);
550#endif /* CONFIG_CARL9170_LEDS */
551int carl9170_led_init(struct ar9170 *ar);
552int carl9170_led_set_state(struct ar9170 *ar, const u32 led_state);
553
554/* PHY / RF */
555int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
556 enum nl80211_channel_type bw, enum carl9170_rf_init_mode rfi);
557int carl9170_get_noisefloor(struct ar9170 *ar);
558
559/* FW */
560int carl9170_parse_firmware(struct ar9170 *ar);
561int carl9170_fw_fix_eeprom(struct ar9170 *ar);
562
563extern struct ieee80211_rate __carl9170_ratetable[];
564extern int modparam_noht;
565
566static inline struct ar9170 *carl9170_get_priv(struct carl9170_vif *carl_vif)
567{
568 return container_of(carl_vif, struct ar9170,
569 vif_priv[carl_vif->id]);
570}
571
572static inline struct ieee80211_hdr *carl9170_get_hdr(struct sk_buff *skb)
573{
574 return (void *)((struct _carl9170_tx_superframe *)
575 skb->data)->frame_data;
576}
577
578static inline u16 get_seq_h(struct ieee80211_hdr *hdr)
579{
580 return le16_to_cpu(hdr->seq_ctrl) >> 4;
581}
582
583static inline u16 carl9170_get_seq(struct sk_buff *skb)
584{
585 return get_seq_h(carl9170_get_hdr(skb));
586}
587
588static inline u16 get_tid_h(struct ieee80211_hdr *hdr)
589{
590 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
591}
592
593static inline u16 carl9170_get_tid(struct sk_buff *skb)
594{
595 return get_tid_h(carl9170_get_hdr(skb));
596}
597
598static inline struct ieee80211_vif *
599carl9170_get_vif(struct carl9170_vif_info *priv)
600{
601 return container_of((void *)priv, struct ieee80211_vif, drv_priv);
602}
603
604/* Protected by ar->mutex or RCU */
605static inline struct ieee80211_vif *carl9170_get_main_vif(struct ar9170 *ar)
606{
607 struct carl9170_vif_info *cvif;
608
609 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
610 if (cvif->active)
611 return carl9170_get_vif(cvif);
612 }
613
614 return NULL;
615}
616
617static inline bool is_main_vif(struct ar9170 *ar, struct ieee80211_vif *vif)
618{
619 bool ret;
620
621 rcu_read_lock();
622 ret = (carl9170_get_main_vif(ar) == vif);
623 rcu_read_unlock();
624 return ret;
625}
626
627#endif /* __CARL9170_H */
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
new file mode 100644
index 000000000000..c21f3364bfec
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -0,0 +1,188 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * Basic HW register/memory/command access functions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include "carl9170.h"
40#include "cmd.h"
41
42int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
43{
44 __le32 buf[2] = {
45 cpu_to_le32(reg),
46 cpu_to_le32(val),
47 };
48 int err;
49
50 err = carl9170_exec_cmd(ar, CARL9170_CMD_WREG, sizeof(buf),
51 (u8 *) buf, 0, NULL);
52 if (err) {
53 if (net_ratelimit()) {
54 wiphy_err(ar->hw->wiphy, "writing reg %#x "
55 "(val %#x) failed (%d)\n", reg, val, err);
56 }
57 }
58 return err;
59}
60
61int carl9170_read_mreg(struct ar9170 *ar, const int nregs,
62 const u32 *regs, u32 *out)
63{
64 int i, err;
65 __le32 *offs, *res;
66
67 /* abuse "out" for the register offsets, must be same length */
68 offs = (__le32 *)out;
69 for (i = 0; i < nregs; i++)
70 offs[i] = cpu_to_le32(regs[i]);
71
72 /* also use the same buffer for the input */
73 res = (__le32 *)out;
74
75 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
76 4 * nregs, (u8 *)offs,
77 4 * nregs, (u8 *)res);
78 if (err) {
79 if (net_ratelimit()) {
80 wiphy_err(ar->hw->wiphy, "reading regs failed (%d)\n",
81 err);
82 }
83 return err;
84 }
85
86 /* convert result to cpu endian */
87 for (i = 0; i < nregs; i++)
88 out[i] = le32_to_cpu(res[i]);
89
90 return 0;
91}
92
93int carl9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val)
94{
95 return carl9170_read_mreg(ar, 1, &reg, val);
96}
97
98int carl9170_echo_test(struct ar9170 *ar, const u32 v)
99{
100 u32 echores;
101 int err;
102
103 err = carl9170_exec_cmd(ar, CARL9170_CMD_ECHO,
104 4, (u8 *)&v,
105 4, (u8 *)&echores);
106 if (err)
107 return err;
108
109 if (v != echores) {
110 wiphy_info(ar->hw->wiphy, "wrong echo %x != %x", v, echores);
111 return -EINVAL;
112 }
113
114 return 0;
115}
116
117struct carl9170_cmd *carl9170_cmd_buf(struct ar9170 *ar,
118 const enum carl9170_cmd_oids cmd, const unsigned int len)
119{
120 struct carl9170_cmd *tmp;
121
122 tmp = kzalloc(sizeof(struct carl9170_cmd_head) + len, GFP_ATOMIC);
123 if (tmp) {
124 tmp->hdr.cmd = cmd;
125 tmp->hdr.len = len;
126 }
127
128 return tmp;
129}
130
131int carl9170_reboot(struct ar9170 *ar)
132{
133 struct carl9170_cmd *cmd;
134 int err;
135
136 cmd = carl9170_cmd_buf(ar, CARL9170_CMD_REBOOT_ASYNC, 0);
137 if (!cmd)
138 return -ENOMEM;
139
140 err = __carl9170_exec_cmd(ar, (struct carl9170_cmd *)cmd, true);
141 return err;
142}
143
144int carl9170_mac_reset(struct ar9170 *ar)
145{
146 return carl9170_exec_cmd(ar, CARL9170_CMD_SWRST,
147 0, NULL, 0, NULL);
148}
149
150int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id,
151 const u32 mode, const u32 addr, const u32 len)
152{
153 struct carl9170_cmd *cmd;
154
155 cmd = carl9170_cmd_buf(ar, CARL9170_CMD_BCN_CTRL_ASYNC,
156 sizeof(struct carl9170_bcn_ctrl_cmd));
157 if (!cmd)
158 return -ENOMEM;
159
160 cmd->bcn_ctrl.vif_id = cpu_to_le32(vif_id);
161 cmd->bcn_ctrl.mode = cpu_to_le32(mode);
162 cmd->bcn_ctrl.bcn_addr = cpu_to_le32(addr);
163 cmd->bcn_ctrl.bcn_len = cpu_to_le32(len);
164
165 return __carl9170_exec_cmd(ar, cmd, true);
166}
167
168int carl9170_powersave(struct ar9170 *ar, const bool ps)
169{
170 struct carl9170_cmd *cmd;
171 u32 state;
172
173 cmd = carl9170_cmd_buf(ar, CARL9170_CMD_PSM_ASYNC,
174 sizeof(struct carl9170_psm));
175 if (!cmd)
176 return -ENOMEM;
177
178 if (ps) {
179 /* Sleep until next TBTT */
180 state = CARL9170_PSM_SLEEP | 1;
181 } else {
182 /* wake up immediately */
183 state = 1;
184 }
185
186 cmd->psm.state = cpu_to_le32(state);
187 return __carl9170_exec_cmd(ar, cmd, true);
188}
diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h
new file mode 100644
index 000000000000..0fc83d2336fd
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/cmd.h
@@ -0,0 +1,158 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * Basic HW register/memory/command access functions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39#ifndef __CMD_H
40#define __CMD_H
41
42#include "carl9170.h"
43
44/* basic HW access */
45int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
46int carl9170_read_reg(struct ar9170 *ar, const u32 reg, u32 *val);
47int carl9170_read_mreg(struct ar9170 *ar, const int nregs,
48 const u32 *regs, u32 *out);
49int carl9170_echo_test(struct ar9170 *ar, u32 v);
50int carl9170_reboot(struct ar9170 *ar);
51int carl9170_mac_reset(struct ar9170 *ar);
52int carl9170_powersave(struct ar9170 *ar, const bool power_on);
53int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id,
54 const u32 mode, const u32 addr, const u32 len);
55
56static inline int carl9170_flush_cab(struct ar9170 *ar,
57 const unsigned int vif_id)
58{
59 return carl9170_bcn_ctrl(ar, vif_id, CARL9170_BCN_CTRL_DRAIN, 0, 0);
60}
61
62struct carl9170_cmd *carl9170_cmd_buf(struct ar9170 *ar,
63 const enum carl9170_cmd_oids cmd, const unsigned int len);
64
65/*
66 * Macros to facilitate writing multiple registers in a single
67 * write-combining USB command. Note that when the first group
68 * fails the whole thing will fail without any others attempted,
69 * but you won't know which write in the group failed.
70 */
71#define carl9170_regwrite_begin(ar) \
72do { \
73 int __nreg = 0, __err = 0; \
74 struct ar9170 *__ar = ar;
75
76#define carl9170_regwrite(r, v) do { \
77 __ar->cmd_buf[2 * __nreg + 1] = cpu_to_le32(r); \
78 __ar->cmd_buf[2 * __nreg + 2] = cpu_to_le32(v); \
79 __nreg++; \
80 if ((__nreg >= PAYLOAD_MAX/2)) { \
81 if (IS_ACCEPTING_CMD(__ar)) \
82 __err = carl9170_exec_cmd(__ar, \
83 CARL9170_CMD_WREG, 8 * __nreg, \
84 (u8 *) &__ar->cmd_buf[1], 0, NULL); \
85 else \
86 goto __regwrite_out; \
87 \
88 __nreg = 0; \
89 if (__err) \
90 goto __regwrite_out; \
91 } \
92} while (0)
93
94#define carl9170_regwrite_finish() \
95__regwrite_out : \
96 if (__err == 0 && __nreg) { \
97 if (IS_ACCEPTING_CMD(__ar)) \
98 __err = carl9170_exec_cmd(__ar, \
99 CARL9170_CMD_WREG, 8 * __nreg, \
100 (u8 *) &__ar->cmd_buf[1], 0, NULL); \
101 __nreg = 0; \
102 }
103
104#define carl9170_regwrite_result() \
105 __err; \
106} while (0);
107
108
109#define carl9170_async_get_buf() \
110do { \
111 __cmd = carl9170_cmd_buf(__carl, CARL9170_CMD_WREG_ASYNC, \
112 CARL9170_MAX_CMD_PAYLOAD_LEN); \
113 if (__cmd == NULL) { \
114 __err = -ENOMEM; \
115 goto __async_regwrite_out; \
116 } \
117} while (0);
118
119#define carl9170_async_regwrite_begin(carl) \
120do { \
121 int __nreg = 0, __err = 0; \
122 struct ar9170 *__carl = carl; \
123 struct carl9170_cmd *__cmd; \
124 carl9170_async_get_buf(); \
125
126#define carl9170_async_regwrite(r, v) do { \
127 __cmd->wreg.regs[__nreg].addr = cpu_to_le32(r); \
128 __cmd->wreg.regs[__nreg].val = cpu_to_le32(v); \
129 __nreg++; \
130 if ((__nreg >= PAYLOAD_MAX/2)) { \
131 if (IS_ACCEPTING_CMD(__carl)) { \
132 __cmd->hdr.len = 8 * __nreg; \
133 __err = __carl9170_exec_cmd(__carl, __cmd, true);\
134 __cmd = NULL; \
135 carl9170_async_get_buf(); \
136 } else { \
137 goto __async_regwrite_out; \
138 } \
139 __nreg = 0; \
140 if (__err) \
141 goto __async_regwrite_out; \
142 } \
143} while (0)
144
145#define carl9170_async_regwrite_finish() \
146__async_regwrite_out : \
147 if (__err == 0 && __nreg) { \
148 __cmd->hdr.len = 8 * __nreg; \
149 if (IS_ACCEPTING_CMD(__carl)) \
150 __err = __carl9170_exec_cmd(__carl, __cmd, true);\
151 __nreg = 0; \
152 }
153
154#define carl9170_async_regwrite_result() \
155 __err; \
156} while (0);
157
158#endif /* __CMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
new file mode 100644
index 000000000000..19b48369ffed
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -0,0 +1,906 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * debug(fs) probing
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2008-2009 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/seq_file.h>
44#include <linux/vmalloc.h>
45#include "carl9170.h"
46#include "cmd.h"
47
48#define ADD(buf, off, max, fmt, args...) \
49 off += snprintf(&buf[off], max - off, fmt, ##args);
50
51static int carl9170_debugfs_open(struct inode *inode, struct file *file)
52{
53 file->private_data = inode->i_private;
54 return 0;
55}
56
57struct carl9170_debugfs_fops {
58 unsigned int read_bufsize;
59 mode_t attr;
60 char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
61 ssize_t *len);
62 ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size);
63 const struct file_operations fops;
64
65 enum carl9170_device_state req_dev_state;
66};
67
68static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf,
69 size_t count, loff_t *ppos)
70{
71 struct carl9170_debugfs_fops *dfops;
72 struct ar9170 *ar;
73 char *buf = NULL, *res_buf = NULL;
74 ssize_t ret = 0;
75 int err = 0;
76
77 if (!count)
78 return 0;
79
80 ar = file->private_data;
81
82 if (!ar)
83 return -ENODEV;
84 dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops);
85
86 if (!dfops->read)
87 return -ENOSYS;
88
89 if (dfops->read_bufsize) {
90 buf = vmalloc(dfops->read_bufsize);
91 if (!buf)
92 return -ENOMEM;
93 }
94
95 mutex_lock(&ar->mutex);
96 if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) {
97 err = -ENODEV;
98 res_buf = buf;
99 goto out_free;
100 }
101
102 res_buf = dfops->read(ar, buf, dfops->read_bufsize, &ret);
103
104 if (ret > 0)
105 err = simple_read_from_buffer(userbuf, count, ppos,
106 res_buf, ret);
107 else
108 err = ret;
109
110 WARN_ON_ONCE(dfops->read_bufsize && (res_buf != buf));
111
112out_free:
113 vfree(res_buf);
114 mutex_unlock(&ar->mutex);
115 return err;
116}
117
118static ssize_t carl9170_debugfs_write(struct file *file,
119 const char __user *userbuf, size_t count, loff_t *ppos)
120{
121 struct carl9170_debugfs_fops *dfops;
122 struct ar9170 *ar;
123 char *buf = NULL;
124 int err = 0;
125
126 if (!count)
127 return 0;
128
129 if (count > PAGE_SIZE)
130 return -E2BIG;
131
132 ar = file->private_data;
133
134 if (!ar)
135 return -ENODEV;
136 dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops);
137
138 if (!dfops->write)
139 return -ENOSYS;
140
141 buf = vmalloc(count);
142 if (!buf)
143 return -ENOMEM;
144
145 if (copy_from_user(buf, userbuf, count)) {
146 err = -EFAULT;
147 goto out_free;
148 }
149
150 if (mutex_trylock(&ar->mutex) == 0) {
151 err = -EAGAIN;
152 goto out_free;
153 }
154
155 if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) {
156 err = -ENODEV;
157 goto out_unlock;
158 }
159
160 err = dfops->write(ar, buf, count);
161 if (err)
162 goto out_unlock;
163
164out_unlock:
165 mutex_unlock(&ar->mutex);
166
167out_free:
168 vfree(buf);
169 return err;
170}
171
172#define __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \
173 _attr, _dstate) \
174static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\
175 .read_bufsize = _read_bufsize, \
176 .read = _read, \
177 .write = _write, \
178 .attr = _attr, \
179 .req_dev_state = _dstate, \
180 .fops = { \
181 .open = carl9170_debugfs_open, \
182 .read = carl9170_debugfs_read, \
183 .write = carl9170_debugfs_write, \
184 .owner = THIS_MODULE \
185 }, \
186}
187
188#define DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, _attr) \
189 __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \
190 _attr, CARL9170_STARTED) \
191
192#define DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize) \
193 DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
194 NULL, _read_bufsize, S_IRUSR)
195
196#define DEBUGFS_DECLARE_WO_FILE(name) \
197 DEBUGFS_DECLARE_FILE(name, NULL, carl9170_debugfs_##name ##_write,\
198 0, S_IWUSR)
199
200#define DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize) \
201 DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
202 carl9170_debugfs_##name ##_write, \
203 _read_bufsize, S_IRUSR | S_IWUSR)
204
205#define __DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize, _dstate) \
206 __DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
207 carl9170_debugfs_##name ##_write, \
208 _read_bufsize, S_IRUSR | S_IWUSR, _dstate)
209
210#define DEBUGFS_READONLY_FILE(name, _read_bufsize, fmt, value...) \
211static char *carl9170_debugfs_ ##name ## _read(struct ar9170 *ar, \
212 char *buf, size_t buf_size,\
213 ssize_t *len) \
214{ \
215 ADD(buf, *len, buf_size, fmt "\n", ##value); \
216 return buf; \
217} \
218DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize)
219
220static char *carl9170_debugfs_mem_usage_read(struct ar9170 *ar, char *buf,
221 size_t bufsize, ssize_t *len)
222{
223 ADD(buf, *len, bufsize, "jar: [");
224
225 spin_lock_bh(&ar->mem_lock);
226
227 *len += bitmap_scnprintf(&buf[*len], bufsize - *len,
228 ar->mem_bitmap, ar->fw.mem_blocks);
229
230 ADD(buf, *len, bufsize, "]\n");
231
232 ADD(buf, *len, bufsize, "cookies: used:%3d / total:%3d, allocs:%d\n",
233 bitmap_weight(ar->mem_bitmap, ar->fw.mem_blocks),
234 ar->fw.mem_blocks, atomic_read(&ar->mem_allocs));
235
236 ADD(buf, *len, bufsize, "memory: free:%3d (%3d KiB) / total:%3d KiB)\n",
237 atomic_read(&ar->mem_free_blocks),
238 (atomic_read(&ar->mem_free_blocks) * ar->fw.mem_block_size) / 1024,
239 (ar->fw.mem_blocks * ar->fw.mem_block_size) / 1024);
240
241 spin_unlock_bh(&ar->mem_lock);
242
243 return buf;
244}
245DEBUGFS_DECLARE_RO_FILE(mem_usage, 512);
246
247static char *carl9170_debugfs_qos_stat_read(struct ar9170 *ar, char *buf,
248 size_t bufsize, ssize_t *len)
249{
250 ADD(buf, *len, bufsize, "%s QoS AC\n", modparam_noht ? "Hardware" :
251 "Software");
252
253 ADD(buf, *len, bufsize, "[ VO VI "
254 " BE BK ]\n");
255
256 spin_lock_bh(&ar->tx_stats_lock);
257 ADD(buf, *len, bufsize, "[length/limit length/limit "
258 "length/limit length/limit ]\n"
259 "[ %3d/%3d %3d/%3d "
260 " %3d/%3d %3d/%3d ]\n\n",
261 ar->tx_stats[0].len, ar->tx_stats[0].limit,
262 ar->tx_stats[1].len, ar->tx_stats[1].limit,
263 ar->tx_stats[2].len, ar->tx_stats[2].limit,
264 ar->tx_stats[3].len, ar->tx_stats[3].limit);
265
266 ADD(buf, *len, bufsize, "[ total total "
267 " total total ]\n"
268 "[%10d %10d %10d %10d ]\n\n",
269 ar->tx_stats[0].count, ar->tx_stats[1].count,
270 ar->tx_stats[2].count, ar->tx_stats[3].count);
271
272 spin_unlock_bh(&ar->tx_stats_lock);
273
274 ADD(buf, *len, bufsize, "[ pend/waittx pend/waittx "
275 " pend/waittx pend/waittx]\n"
276 "[ %3d/%3d %3d/%3d "
277 " %3d/%3d %3d/%3d ]\n\n",
278 skb_queue_len(&ar->tx_pending[0]),
279 skb_queue_len(&ar->tx_status[0]),
280 skb_queue_len(&ar->tx_pending[1]),
281 skb_queue_len(&ar->tx_status[1]),
282 skb_queue_len(&ar->tx_pending[2]),
283 skb_queue_len(&ar->tx_status[2]),
284 skb_queue_len(&ar->tx_pending[3]),
285 skb_queue_len(&ar->tx_status[3]));
286
287 return buf;
288}
289DEBUGFS_DECLARE_RO_FILE(qos_stat, 512);
290
291static void carl9170_debugfs_format_frame(struct ar9170 *ar,
292 struct sk_buff *skb, const char *prefix, char *buf,
293 ssize_t *off, ssize_t bufsize)
294{
295 struct _carl9170_tx_superframe *txc = (void *) skb->data;
296 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
297 struct carl9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
298 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
299
300 ADD(buf, *off, bufsize, "%s %p, c:%2x, DA:%pM, sq:%4d, mc:%.4x, "
301 "pc:%.8x, to:%d ms\n", prefix, skb, txc->s.cookie,
302 ieee80211_get_DA(hdr), get_seq_h(hdr),
303 le16_to_cpu(txc->f.mac_control), le32_to_cpu(txc->f.phy_control),
304 jiffies_to_msecs(jiffies - arinfo->timeout));
305}
306
307
308static char *carl9170_debugfs_ampdu_state_read(struct ar9170 *ar, char *buf,
309 size_t bufsize, ssize_t *len)
310{
311 struct carl9170_sta_tid *iter;
312 struct sk_buff *skb;
313 int cnt = 0, fc;
314 int offset;
315
316 rcu_read_lock();
317 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
318
319 spin_lock_bh(&iter->lock);
320 ADD(buf, *len, bufsize, "Entry: #%2d TID:%1d, BSN:%4d, "
321 "SNX:%4d, HSN:%4d, BAW:%2d, state:%1d, toggles:%d\n",
322 cnt, iter->tid, iter->bsn, iter->snx, iter->hsn,
323 iter->max, iter->state, iter->counter);
324
325 ADD(buf, *len, bufsize, "\tWindow: [");
326
327 *len += bitmap_scnprintf(&buf[*len], bufsize - *len,
328 iter->bitmap, CARL9170_BAW_BITS);
329
330#define BM_STR_OFF(offset) \
331 ((CARL9170_BAW_BITS - (offset) - 1) / 4 + \
332 (CARL9170_BAW_BITS - (offset) - 1) / 32 + 1)
333
334 ADD(buf, *len, bufsize, ",W]\n");
335
336 offset = BM_STR_OFF(0);
337 ADD(buf, *len, bufsize, "\tBase Seq: %*s\n", offset, "T");
338
339 offset = BM_STR_OFF(SEQ_DIFF(iter->snx, iter->bsn));
340 ADD(buf, *len, bufsize, "\tNext Seq: %*s\n", offset, "W");
341
342 offset = BM_STR_OFF(((int)iter->hsn - (int)iter->bsn) %
343 CARL9170_BAW_BITS);
344 ADD(buf, *len, bufsize, "\tLast Seq: %*s\n", offset, "N");
345
346 ADD(buf, *len, bufsize, "\tPre-Aggregation reorder buffer: "
347 " currently queued:%d\n", skb_queue_len(&iter->queue));
348
349 fc = 0;
350 skb_queue_walk(&iter->queue, skb) {
351 char prefix[32];
352
353 snprintf(prefix, sizeof(prefix), "\t\t%3d :", fc);
354 carl9170_debugfs_format_frame(ar, skb, prefix, buf,
355 len, bufsize);
356
357 fc++;
358 }
359 spin_unlock_bh(&iter->lock);
360 cnt++;
361 }
362 rcu_read_unlock();
363
364 return buf;
365}
366DEBUGFS_DECLARE_RO_FILE(ampdu_state, 8000);
367
368static void carl9170_debugfs_queue_dump(struct ar9170 *ar, char *buf,
369 ssize_t *len, size_t bufsize, struct sk_buff_head *queue)
370{
371 struct sk_buff *skb;
372 char prefix[16];
373 int fc = 0;
374
375 spin_lock_bh(&queue->lock);
376 skb_queue_walk(queue, skb) {
377 snprintf(prefix, sizeof(prefix), "%3d :", fc);
378 carl9170_debugfs_format_frame(ar, skb, prefix, buf,
379 len, bufsize);
380 fc++;
381 }
382 spin_unlock_bh(&queue->lock);
383}
384
385#define DEBUGFS_QUEUE_DUMP(q, qi) \
386static char *carl9170_debugfs_##q ##_##qi ##_read(struct ar9170 *ar, \
387 char *buf, size_t bufsize, ssize_t *len) \
388{ \
389 carl9170_debugfs_queue_dump(ar, buf, len, bufsize, &ar->q[qi]); \
390 return buf; \
391} \
392DEBUGFS_DECLARE_RO_FILE(q##_##qi, 8000);
393
394static char *carl9170_debugfs_sta_psm_read(struct ar9170 *ar, char *buf,
395 size_t bufsize, ssize_t *len)
396{
397 ADD(buf, *len, bufsize, "psm state: %s\n", (ar->ps.off_override ?
398 "FORCE CAM" : (ar->ps.state ? "PSM" : "CAM")));
399
400 ADD(buf, *len, bufsize, "sleep duration: %d ms.\n", ar->ps.sleep_ms);
401 ADD(buf, *len, bufsize, "last power-state transition: %d ms ago.\n",
402 jiffies_to_msecs(jiffies - ar->ps.last_action));
403 ADD(buf, *len, bufsize, "last CAM->PSM transition: %d ms ago.\n",
404 jiffies_to_msecs(jiffies - ar->ps.last_slept));
405
406 return buf;
407}
408DEBUGFS_DECLARE_RO_FILE(sta_psm, 160);
409
410static char *carl9170_debugfs_tx_stuck_read(struct ar9170 *ar, char *buf,
411 size_t bufsize, ssize_t *len)
412{
413 int i;
414
415 for (i = 0; i < ar->hw->queues; i++) {
416 ADD(buf, *len, bufsize, "TX queue [%d]: %10d max:%10d ms.\n",
417 i, ieee80211_queue_stopped(ar->hw, i) ?
418 jiffies_to_msecs(jiffies - ar->queue_stop_timeout[i]) : 0,
419 jiffies_to_msecs(ar->max_queue_stop_timeout[i]));
420
421 ar->max_queue_stop_timeout[i] = 0;
422 }
423
424 return buf;
425}
426DEBUGFS_DECLARE_RO_FILE(tx_stuck, 180);
427
428static char *carl9170_debugfs_phy_noise_read(struct ar9170 *ar, char *buf,
429 size_t bufsize, ssize_t *len)
430{
431 int err;
432
433 err = carl9170_get_noisefloor(ar);
434 if (err) {
435 *len = err;
436 return buf;
437 }
438
439 ADD(buf, *len, bufsize, "Chain 0: %10d dBm, ext. chan.:%10d dBm\n",
440 ar->noise[0], ar->noise[2]);
441 ADD(buf, *len, bufsize, "Chain 2: %10d dBm, ext. chan.:%10d dBm\n",
442 ar->noise[1], ar->noise[3]);
443
444 return buf;
445}
446DEBUGFS_DECLARE_RO_FILE(phy_noise, 180);
447
448static char *carl9170_debugfs_vif_dump_read(struct ar9170 *ar, char *buf,
449 size_t bufsize, ssize_t *len)
450{
451 struct carl9170_vif_info *iter;
452 int i = 0;
453
454 ADD(buf, *len, bufsize, "registered VIFs:%d \\ %d\n",
455 ar->vifs, ar->fw.vif_num);
456
457 ADD(buf, *len, bufsize, "VIF bitmap: [");
458
459 *len += bitmap_scnprintf(&buf[*len], bufsize - *len,
460 &ar->vif_bitmap, ar->fw.vif_num);
461
462 ADD(buf, *len, bufsize, "]\n");
463
464 rcu_read_lock();
465 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
466 struct ieee80211_vif *vif = carl9170_get_vif(iter);
467 ADD(buf, *len, bufsize, "\t%d = [%s VIF, id:%d, type:%x "
468 " mac:%pM %s]\n", i, (carl9170_get_main_vif(ar) == vif ?
469 "Master" : " Slave"), iter->id, vif->type, vif->addr,
470 iter->enable_beacon ? "beaconing " : "");
471 i++;
472 }
473 rcu_read_unlock();
474
475 return buf;
476}
477DEBUGFS_DECLARE_RO_FILE(vif_dump, 8000);
478
479#define UPDATE_COUNTER(ar, name) ({ \
480 u32 __tmp[ARRAY_SIZE(name##_regs)]; \
481 unsigned int __i, __err = -ENODEV; \
482 \
483 for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \
484 __tmp[__i] = name##_regs[__i].reg; \
485 ar->debug.stats.name##_counter[__i] = 0; \
486 } \
487 \
488 if (IS_STARTED(ar)) \
489 __err = carl9170_read_mreg(ar, ARRAY_SIZE(name##_regs), \
490 __tmp, ar->debug.stats.name##_counter); \
491 (__err); })
492
493#define TALLY_SUM_UP(ar, name) do { \
494 unsigned int __i; \
495 \
496 for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \
497 ar->debug.stats.name##_sum[__i] += \
498 ar->debug.stats.name##_counter[__i]; \
499 } \
500} while (0)
501
502#define DEBUGFS_HW_TALLY_FILE(name, f) \
503static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \
504 char *dum, size_t bufsize, ssize_t *ret) \
505{ \
506 char *buf; \
507 int i, max_len, err; \
508 \
509 max_len = ARRAY_SIZE(name##_regs) * 80; \
510 buf = vmalloc(max_len); \
511 if (!buf) \
512 return NULL; \
513 \
514 err = UPDATE_COUNTER(ar, name); \
515 if (err) { \
516 *ret = err; \
517 return buf; \
518 } \
519 \
520 TALLY_SUM_UP(ar, name); \
521 \
522 for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \
523 ADD(buf, *ret, max_len, "%22s = %" f "[+%" f "]\n", \
524 name##_regs[i].nreg, ar->debug.stats.name ##_sum[i],\
525 ar->debug.stats.name ##_counter[i]); \
526 } \
527 \
528 return buf; \
529} \
530DEBUGFS_DECLARE_RO_FILE(name, 0);
531
532#define DEBUGFS_HW_REG_FILE(name, f) \
533static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \
534 char *dum, size_t bufsize, ssize_t *ret) \
535{ \
536 char *buf; \
537 int i, max_len, err; \
538 \
539 max_len = ARRAY_SIZE(name##_regs) * 80; \
540 buf = vmalloc(max_len); \
541 if (!buf) \
542 return NULL; \
543 \
544 err = UPDATE_COUNTER(ar, name); \
545 if (err) { \
546 *ret = err; \
547 return buf; \
548 } \
549 \
550 for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \
551 ADD(buf, *ret, max_len, "%22s = %" f "\n", \
552 name##_regs[i].nreg, \
553 ar->debug.stats.name##_counter[i]); \
554 } \
555 \
556 return buf; \
557} \
558DEBUGFS_DECLARE_RO_FILE(name, 0);
559
560static ssize_t carl9170_debugfs_hw_ioread32_write(struct ar9170 *ar,
561 const char *buf, size_t count)
562{
563 int err = 0, i, n = 0, max_len = 32, res;
564 unsigned int reg, tmp;
565
566 if (!count)
567 return 0;
568
569 if (count > max_len)
570 return -E2BIG;
571
572 res = sscanf(buf, "0x%X %d", &reg, &n);
573 if (res < 1) {
574 err = -EINVAL;
575 goto out;
576 }
577
578 if (res == 1)
579 n = 1;
580
581 if (n > 15) {
582 err = -EMSGSIZE;
583 goto out;
584 }
585
586 if ((reg >= 0x280000) || ((reg + (n << 2)) >= 0x280000)) {
587 err = -EADDRNOTAVAIL;
588 goto out;
589 }
590
591 if (reg & 3) {
592 err = -EINVAL;
593 goto out;
594 }
595
596 for (i = 0; i < n; i++) {
597 err = carl9170_read_reg(ar, reg + (i << 2), &tmp);
598 if (err)
599 goto out;
600
601 ar->debug.ring[ar->debug.ring_tail].reg = reg + (i << 2);
602 ar->debug.ring[ar->debug.ring_tail].value = tmp;
603 ar->debug.ring_tail++;
604 ar->debug.ring_tail %= CARL9170_DEBUG_RING_SIZE;
605 }
606
607out:
608 return err ? err : count;
609}
610
611static char *carl9170_debugfs_hw_ioread32_read(struct ar9170 *ar, char *buf,
612 size_t bufsize, ssize_t *ret)
613{
614 int i = 0;
615
616 while (ar->debug.ring_head != ar->debug.ring_tail) {
617 ADD(buf, *ret, bufsize, "%.8x = %.8x\n",
618 ar->debug.ring[ar->debug.ring_head].reg,
619 ar->debug.ring[ar->debug.ring_head].value);
620
621 ar->debug.ring_head++;
622 ar->debug.ring_head %= CARL9170_DEBUG_RING_SIZE;
623
624 if (i++ == 64)
625 break;
626 }
627 ar->debug.ring_head = ar->debug.ring_tail;
628 return buf;
629}
630DEBUGFS_DECLARE_RW_FILE(hw_ioread32, CARL9170_DEBUG_RING_SIZE * 40);
631
632static ssize_t carl9170_debugfs_bug_write(struct ar9170 *ar, const char *buf,
633 size_t count)
634{
635 int err;
636
637 if (count < 1)
638 return -EINVAL;
639
640 switch (buf[0]) {
641 case 'F':
642 ar->needs_full_reset = true;
643 break;
644
645 case 'R':
646 if (!IS_STARTED(ar)) {
647 err = -EAGAIN;
648 goto out;
649 }
650
651 ar->needs_full_reset = false;
652 break;
653
654 case 'M':
655 err = carl9170_mac_reset(ar);
656 if (err < 0)
657 count = err;
658
659 goto out;
660
661 case 'P':
662 err = carl9170_set_channel(ar, ar->hw->conf.channel,
663 ar->hw->conf.channel_type, CARL9170_RFI_COLD);
664 if (err < 0)
665 count = err;
666
667 goto out;
668
669 default:
670 return -EINVAL;
671 }
672
673 carl9170_restart(ar, CARL9170_RR_USER_REQUEST);
674
675out:
676 return count;
677}
678
679static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf,
680 size_t bufsize, ssize_t *ret)
681{
682 ADD(buf, *ret, bufsize, "[P]hy reinit, [R]estart, [F]ull usb reset, "
683 "[M]ac reset\n");
684 ADD(buf, *ret, bufsize, "firmware restarts:%d, last reason:%d\n",
685 ar->restart_counter, ar->last_reason);
686 ADD(buf, *ret, bufsize, "phy reinit errors:%d (%d)\n",
687 ar->total_chan_fail, ar->chan_fail);
688 ADD(buf, *ret, bufsize, "reported firmware errors:%d\n",
689 ar->fw.err_counter);
690 ADD(buf, *ret, bufsize, "reported firmware BUGs:%d\n",
691 ar->fw.bug_counter);
692 ADD(buf, *ret, bufsize, "pending restart requests:%d\n",
693 atomic_read(&ar->pending_restarts));
694 return buf;
695}
696__DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED);
697
698static const char *erp_modes[] = {
699 [CARL9170_ERP_INVALID] = "INVALID",
700 [CARL9170_ERP_AUTO] = "Automatic",
701 [CARL9170_ERP_MAC80211] = "Set by MAC80211",
702 [CARL9170_ERP_OFF] = "Force Off",
703 [CARL9170_ERP_RTS] = "Force RTS",
704 [CARL9170_ERP_CTS] = "Force CTS"
705};
706
707static char *carl9170_debugfs_erp_read(struct ar9170 *ar, char *buf,
708 size_t bufsize, ssize_t *ret)
709{
710 ADD(buf, *ret, bufsize, "ERP Setting: (%d) -> %s\n", ar->erp_mode,
711 erp_modes[ar->erp_mode]);
712 return buf;
713}
714
715static ssize_t carl9170_debugfs_erp_write(struct ar9170 *ar, const char *buf,
716 size_t count)
717{
718 int res, val;
719
720 if (count < 1)
721 return -EINVAL;
722
723 res = sscanf(buf, "%d", &val);
724 if (res != 1)
725 return -EINVAL;
726
727 if (!((val > CARL9170_ERP_INVALID) &&
728 (val < __CARL9170_ERP_NUM)))
729 return -EINVAL;
730
731 ar->erp_mode = val;
732 return count;
733}
734
735DEBUGFS_DECLARE_RW_FILE(erp, 80);
736
737static ssize_t carl9170_debugfs_hw_iowrite32_write(struct ar9170 *ar,
738 const char *buf, size_t count)
739{
740 int err = 0, max_len = 22, res;
741 u32 reg, val;
742
743 if (!count)
744 return 0;
745
746 if (count > max_len)
747 return -E2BIG;
748
749 res = sscanf(buf, "0x%X 0x%X", &reg, &val);
750 if (res != 2) {
751 err = -EINVAL;
752 goto out;
753 }
754
755 if (reg <= 0x100000 || reg >= 0x280000) {
756 err = -EADDRNOTAVAIL;
757 goto out;
758 }
759
760 if (reg & 3) {
761 err = -EINVAL;
762 goto out;
763 }
764
765 err = carl9170_write_reg(ar, reg, val);
766 if (err)
767 goto out;
768
769out:
770 return err ? err : count;
771}
772DEBUGFS_DECLARE_WO_FILE(hw_iowrite32);
773
774DEBUGFS_HW_TALLY_FILE(hw_tx_tally, "u");
775DEBUGFS_HW_TALLY_FILE(hw_rx_tally, "u");
776DEBUGFS_HW_TALLY_FILE(hw_phy_errors, "u");
777DEBUGFS_HW_REG_FILE(hw_wlan_queue, ".8x");
778DEBUGFS_HW_REG_FILE(hw_pta_queue, ".8x");
779DEBUGFS_HW_REG_FILE(hw_ampdu_info, ".8x");
780DEBUGFS_QUEUE_DUMP(tx_status, 0);
781DEBUGFS_QUEUE_DUMP(tx_status, 1);
782DEBUGFS_QUEUE_DUMP(tx_status, 2);
783DEBUGFS_QUEUE_DUMP(tx_status, 3);
784DEBUGFS_QUEUE_DUMP(tx_pending, 0);
785DEBUGFS_QUEUE_DUMP(tx_pending, 1);
786DEBUGFS_QUEUE_DUMP(tx_pending, 2);
787DEBUGFS_QUEUE_DUMP(tx_pending, 3);
788DEBUGFS_READONLY_FILE(usb_tx_anch_urbs, 20, "%d",
789 atomic_read(&ar->tx_anch_urbs));
790DEBUGFS_READONLY_FILE(usb_rx_anch_urbs, 20, "%d",
791 atomic_read(&ar->rx_anch_urbs));
792DEBUGFS_READONLY_FILE(usb_rx_work_urbs, 20, "%d",
793 atomic_read(&ar->rx_work_urbs));
794DEBUGFS_READONLY_FILE(usb_rx_pool_urbs, 20, "%d",
795 atomic_read(&ar->rx_pool_urbs));
796
797DEBUGFS_READONLY_FILE(tx_total_queued, 20, "%d",
798 atomic_read(&ar->tx_total_queued));
799DEBUGFS_READONLY_FILE(tx_ampdu_scheduler, 20, "%d",
800 atomic_read(&ar->tx_ampdu_scheduler));
801DEBUGFS_READONLY_FILE(tx_ampdu_timeout, 20, "%d",
802 ar->tx_ampdu_timeout);
803
804DEBUGFS_READONLY_FILE(tx_total_pending, 20, "%d",
805 atomic_read(&ar->tx_total_pending));
806
807DEBUGFS_READONLY_FILE(tx_ampdu_list_len, 20, "%d",
808 ar->tx_ampdu_list_len);
809
810DEBUGFS_READONLY_FILE(tx_ampdu_upload, 20, "%d",
811 atomic_read(&ar->tx_ampdu_upload));
812
813DEBUGFS_READONLY_FILE(tx_janitor_last_run, 64, "last run:%d ms ago",
814 jiffies_to_msecs(jiffies - ar->tx_janitor_last_run));
815
816DEBUGFS_READONLY_FILE(tx_dropped, 20, "%d", ar->tx_dropped);
817
818DEBUGFS_READONLY_FILE(rx_dropped, 20, "%d", ar->rx_dropped);
819
820DEBUGFS_READONLY_FILE(sniffer_enabled, 20, "%d", ar->sniffer_enabled);
821DEBUGFS_READONLY_FILE(rx_software_decryption, 20, "%d",
822 ar->rx_software_decryption);
823DEBUGFS_READONLY_FILE(ampdu_factor, 20, "%d",
824 ar->current_factor);
825DEBUGFS_READONLY_FILE(ampdu_density, 20, "%d",
826 ar->current_density);
827
828DEBUGFS_READONLY_FILE(beacon_int, 20, "%d TU", ar->global_beacon_int);
829DEBUGFS_READONLY_FILE(pretbtt, 20, "%d TU", ar->global_pretbtt);
830
831void carl9170_debugfs_register(struct ar9170 *ar)
832{
833 ar->debug_dir = debugfs_create_dir(KBUILD_MODNAME,
834 ar->hw->wiphy->debugfsdir);
835
836#define DEBUGFS_ADD(name) \
837 debugfs_create_file(#name, carl_debugfs_##name ##_ops.attr, \
838 ar->debug_dir, ar, \
839 &carl_debugfs_##name ## _ops.fops);
840
841 DEBUGFS_ADD(usb_tx_anch_urbs);
842 DEBUGFS_ADD(usb_rx_pool_urbs);
843 DEBUGFS_ADD(usb_rx_anch_urbs);
844 DEBUGFS_ADD(usb_rx_work_urbs);
845
846 DEBUGFS_ADD(tx_total_queued);
847 DEBUGFS_ADD(tx_total_pending);
848 DEBUGFS_ADD(tx_dropped);
849 DEBUGFS_ADD(tx_stuck);
850 DEBUGFS_ADD(tx_ampdu_upload);
851 DEBUGFS_ADD(tx_ampdu_scheduler);
852 DEBUGFS_ADD(tx_ampdu_list_len);
853
854 DEBUGFS_ADD(rx_dropped);
855 DEBUGFS_ADD(sniffer_enabled);
856 DEBUGFS_ADD(rx_software_decryption);
857
858 DEBUGFS_ADD(mem_usage);
859 DEBUGFS_ADD(qos_stat);
860 DEBUGFS_ADD(sta_psm);
861 DEBUGFS_ADD(ampdu_state);
862
863 DEBUGFS_ADD(hw_tx_tally);
864 DEBUGFS_ADD(hw_rx_tally);
865 DEBUGFS_ADD(hw_phy_errors);
866 DEBUGFS_ADD(phy_noise);
867
868 DEBUGFS_ADD(hw_wlan_queue);
869 DEBUGFS_ADD(hw_pta_queue);
870 DEBUGFS_ADD(hw_ampdu_info);
871
872 DEBUGFS_ADD(ampdu_density);
873 DEBUGFS_ADD(ampdu_factor);
874
875 DEBUGFS_ADD(tx_ampdu_timeout);
876
877 DEBUGFS_ADD(tx_janitor_last_run);
878
879 DEBUGFS_ADD(tx_status_0);
880 DEBUGFS_ADD(tx_status_1);
881 DEBUGFS_ADD(tx_status_2);
882 DEBUGFS_ADD(tx_status_3);
883
884 DEBUGFS_ADD(tx_pending_0);
885 DEBUGFS_ADD(tx_pending_1);
886 DEBUGFS_ADD(tx_pending_2);
887 DEBUGFS_ADD(tx_pending_3);
888
889 DEBUGFS_ADD(hw_ioread32);
890 DEBUGFS_ADD(hw_iowrite32);
891 DEBUGFS_ADD(bug);
892
893 DEBUGFS_ADD(erp);
894
895 DEBUGFS_ADD(vif_dump);
896
897 DEBUGFS_ADD(beacon_int);
898 DEBUGFS_ADD(pretbtt);
899
900#undef DEBUGFS_ADD
901}
902
903void carl9170_debugfs_unregister(struct ar9170 *ar)
904{
905 debugfs_remove_recursive(ar->debug_dir);
906}
diff --git a/drivers/net/wireless/ath/carl9170/debug.h b/drivers/net/wireless/ath/carl9170/debug.h
new file mode 100644
index 000000000000..ea4b97524122
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/debug.h
@@ -0,0 +1,134 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * debug header
5 *
6 * Copyright 2010, Christian Lamparter <chunkeey@googlemail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __DEBUG_H
39#define __DEBUG_H
40
41#include "eeprom.h"
42#include "wlan.h"
43#include "hw.h"
44#include "fwdesc.h"
45#include "fwcmd.h"
46#include "../regd.h"
47
48struct hw_stat_reg_entry {
49 u32 reg;
50 char nreg[32];
51};
52
53#define STAT_MAC_REG(reg) \
54 { (AR9170_MAC_REG_##reg), #reg }
55
56#define STAT_PTA_REG(reg) \
57 { (AR9170_PTA_REG_##reg), #reg }
58
59#define STAT_USB_REG(reg) \
60 { (AR9170_USB_REG_##reg), #reg }
61
62static const struct hw_stat_reg_entry hw_rx_tally_regs[] = {
63 STAT_MAC_REG(RX_CRC32), STAT_MAC_REG(RX_CRC16),
64 STAT_MAC_REG(RX_TIMEOUT_COUNT), STAT_MAC_REG(RX_ERR_DECRYPTION_UNI),
65 STAT_MAC_REG(RX_ERR_DECRYPTION_MUL), STAT_MAC_REG(RX_MPDU),
66 STAT_MAC_REG(RX_DROPPED_MPDU), STAT_MAC_REG(RX_DEL_MPDU),
67};
68
69static const struct hw_stat_reg_entry hw_phy_errors_regs[] = {
70 STAT_MAC_REG(RX_PHY_MISC_ERROR), STAT_MAC_REG(RX_PHY_XR_ERROR),
71 STAT_MAC_REG(RX_PHY_OFDM_ERROR), STAT_MAC_REG(RX_PHY_CCK_ERROR),
72 STAT_MAC_REG(RX_PHY_HT_ERROR), STAT_MAC_REG(RX_PHY_TOTAL),
73};
74
75static const struct hw_stat_reg_entry hw_tx_tally_regs[] = {
76 STAT_MAC_REG(TX_TOTAL), STAT_MAC_REG(TX_UNDERRUN),
77 STAT_MAC_REG(TX_RETRY),
78};
79
80static const struct hw_stat_reg_entry hw_wlan_queue_regs[] = {
81 STAT_MAC_REG(DMA_STATUS), STAT_MAC_REG(DMA_TRIGGER),
82 STAT_MAC_REG(DMA_TXQ0_ADDR), STAT_MAC_REG(DMA_TXQ0_CURR_ADDR),
83 STAT_MAC_REG(DMA_TXQ1_ADDR), STAT_MAC_REG(DMA_TXQ1_CURR_ADDR),
84 STAT_MAC_REG(DMA_TXQ2_ADDR), STAT_MAC_REG(DMA_TXQ2_CURR_ADDR),
85 STAT_MAC_REG(DMA_TXQ3_ADDR), STAT_MAC_REG(DMA_TXQ3_CURR_ADDR),
86 STAT_MAC_REG(DMA_RXQ_ADDR), STAT_MAC_REG(DMA_RXQ_CURR_ADDR),
87};
88
89static const struct hw_stat_reg_entry hw_ampdu_info_regs[] = {
90 STAT_MAC_REG(AMPDU_DENSITY), STAT_MAC_REG(AMPDU_FACTOR),
91};
92
93static const struct hw_stat_reg_entry hw_pta_queue_regs[] = {
94 STAT_PTA_REG(DN_CURR_ADDRH), STAT_PTA_REG(DN_CURR_ADDRL),
95 STAT_PTA_REG(UP_CURR_ADDRH), STAT_PTA_REG(UP_CURR_ADDRL),
96 STAT_PTA_REG(DMA_STATUS), STAT_PTA_REG(DMA_MODE_CTRL),
97};
98
99#define DEFINE_TALLY(name) \
100 u32 name##_sum[ARRAY_SIZE(name##_regs)], \
101 name##_counter[ARRAY_SIZE(name##_regs)] \
102
103#define DEFINE_STAT(name) \
104 u32 name##_counter[ARRAY_SIZE(name##_regs)] \
105
106struct ath_stats {
107 DEFINE_TALLY(hw_tx_tally);
108 DEFINE_TALLY(hw_rx_tally);
109 DEFINE_TALLY(hw_phy_errors);
110 DEFINE_STAT(hw_wlan_queue);
111 DEFINE_STAT(hw_pta_queue);
112 DEFINE_STAT(hw_ampdu_info);
113};
114
115struct carl9170_debug_mem_rbe {
116 u32 reg;
117 u32 value;
118};
119
120#define CARL9170_DEBUG_RING_SIZE 64
121
122struct carl9170_debug {
123 struct ath_stats stats;
124 struct carl9170_debug_mem_rbe ring[CARL9170_DEBUG_RING_SIZE];
125 struct mutex ring_lock;
126 unsigned int ring_head, ring_tail;
127 struct delayed_work update_tally;
128};
129
130struct ar9170;
131
132void carl9170_debugfs_register(struct ar9170 *ar);
133void carl9170_debugfs_unregister(struct ar9170 *ar);
134#endif /* __DEBUG_H */
diff --git a/drivers/net/wireless/ath/carl9170/eeprom.h b/drivers/net/wireless/ath/carl9170/eeprom.h
new file mode 100644
index 000000000000..7cff40ac7759
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/eeprom.h
@@ -0,0 +1,216 @@
1/*
2 * Shared Atheros AR9170 Header
3 *
4 * EEPROM layout
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __CARL9170_SHARED_EEPROM_H
39#define __CARL9170_SHARED_EEPROM_H
40
41#define AR9170_EEPROM_START 0x1600
42
43#define AR5416_MAX_CHAINS 2
44#define AR5416_MODAL_SPURS 5
45
46struct ar9170_eeprom_modal {
47 __le32 antCtrlChain[AR5416_MAX_CHAINS];
48 __le32 antCtrlCommon;
49 s8 antennaGainCh[AR5416_MAX_CHAINS];
50 u8 switchSettling;
51 u8 txRxAttenCh[AR5416_MAX_CHAINS];
52 u8 rxTxMarginCh[AR5416_MAX_CHAINS];
53 s8 adcDesiredSize;
54 s8 pgaDesiredSize;
55 u8 xlnaGainCh[AR5416_MAX_CHAINS];
56 u8 txEndToXpaOff;
57 u8 txEndToRxOn;
58 u8 txFrameToXpaOn;
59 u8 thresh62;
60 s8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
61 u8 xpdGain;
62 u8 xpd;
63 s8 iqCalICh[AR5416_MAX_CHAINS];
64 s8 iqCalQCh[AR5416_MAX_CHAINS];
65 u8 pdGainOverlap;
66 u8 ob;
67 u8 db;
68 u8 xpaBiasLvl;
69 u8 pwrDecreaseFor2Chain;
70 u8 pwrDecreaseFor3Chain;
71 u8 txFrameToDataStart;
72 u8 txFrameToPaOn;
73 u8 ht40PowerIncForPdadc;
74 u8 bswAtten[AR5416_MAX_CHAINS];
75 u8 bswMargin[AR5416_MAX_CHAINS];
76 u8 swSettleHt40;
77 u8 reserved[22];
78 struct spur_channel {
79 __le16 spurChan;
80 u8 spurRangeLow;
81 u8 spurRangeHigh;
82 } __packed spur_channels[AR5416_MODAL_SPURS];
83} __packed;
84
85#define AR5416_NUM_PD_GAINS 4
86#define AR5416_PD_GAIN_ICEPTS 5
87
88struct ar9170_calibration_data_per_freq {
89 u8 pwr_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
90 u8 vpd_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
91} __packed;
92
93#define AR5416_NUM_5G_CAL_PIERS 8
94#define AR5416_NUM_2G_CAL_PIERS 4
95
96#define AR5416_NUM_5G_TARGET_PWRS 8
97#define AR5416_NUM_2G_CCK_TARGET_PWRS 3
98#define AR5416_NUM_2G_OFDM_TARGET_PWRS 4
99#define AR5416_MAX_NUM_TGT_PWRS 8
100
101struct ar9170_calibration_target_power_legacy {
102 u8 freq;
103 u8 power[4];
104} __packed;
105
106struct ar9170_calibration_target_power_ht {
107 u8 freq;
108 u8 power[8];
109} __packed;
110
111#define AR5416_NUM_CTLS 24
112
113struct ar9170_calctl_edges {
114 u8 channel;
115#define AR9170_CALCTL_EDGE_FLAGS 0xC0
116 u8 power_flags;
117} __packed;
118
119#define AR5416_NUM_BAND_EDGES 8
120
121struct ar9170_calctl_data {
122 struct ar9170_calctl_edges
123 control_edges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
124} __packed;
125
126struct ar9170_eeprom {
127 __le16 length;
128 __le16 checksum;
129 __le16 version;
130 u8 operating_flags;
131#define AR9170_OPFLAG_5GHZ 1
132#define AR9170_OPFLAG_2GHZ 2
133 u8 misc;
134 __le16 reg_domain[2];
135 u8 mac_address[6];
136 u8 rx_mask;
137 u8 tx_mask;
138 __le16 rf_silent;
139 __le16 bluetooth_options;
140 __le16 device_capabilities;
141 __le32 build_number;
142 u8 deviceType;
143 u8 reserved[33];
144
145 u8 customer_data[64];
146
147 struct ar9170_eeprom_modal
148 modal_header[2];
149
150 u8 cal_freq_pier_5G[AR5416_NUM_5G_CAL_PIERS];
151 u8 cal_freq_pier_2G[AR5416_NUM_2G_CAL_PIERS];
152
153 struct ar9170_calibration_data_per_freq
154 cal_pier_data_5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS],
155 cal_pier_data_2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
156
157 /* power calibration data */
158 struct ar9170_calibration_target_power_legacy
159 cal_tgt_pwr_5G[AR5416_NUM_5G_TARGET_PWRS];
160 struct ar9170_calibration_target_power_ht
161 cal_tgt_pwr_5G_ht20[AR5416_NUM_5G_TARGET_PWRS],
162 cal_tgt_pwr_5G_ht40[AR5416_NUM_5G_TARGET_PWRS];
163
164 struct ar9170_calibration_target_power_legacy
165 cal_tgt_pwr_2G_cck[AR5416_NUM_2G_CCK_TARGET_PWRS],
166 cal_tgt_pwr_2G_ofdm[AR5416_NUM_2G_OFDM_TARGET_PWRS];
167 struct ar9170_calibration_target_power_ht
168 cal_tgt_pwr_2G_ht20[AR5416_NUM_2G_OFDM_TARGET_PWRS],
169 cal_tgt_pwr_2G_ht40[AR5416_NUM_2G_OFDM_TARGET_PWRS];
170
171 /* conformance testing limits */
172 u8 ctl_index[AR5416_NUM_CTLS];
173 struct ar9170_calctl_data
174 ctl_data[AR5416_NUM_CTLS];
175
176 u8 pad;
177 __le16 subsystem_id;
178} __packed;
179
180#define AR9170_LED_MODE_POWER_ON 0x0001
181#define AR9170_LED_MODE_RESERVED 0x0002
182#define AR9170_LED_MODE_DISABLE_STATE 0x0004
183#define AR9170_LED_MODE_OFF_IN_PSM 0x0008
184
185/* AR9170_LED_MODE BIT is set */
186#define AR9170_LED_MODE_FREQUENCY_S 4
187#define AR9170_LED_MODE_FREQUENCY 0x0030
188#define AR9170_LED_MODE_FREQUENCY_1HZ 0x0000
189#define AR9170_LED_MODE_FREQUENCY_0_5HZ 0x0010
190#define AR9170_LED_MODE_FREQUENCY_0_25HZ 0x0020
191#define AR9170_LED_MODE_FREQUENCY_0_125HZ 0x0030
192
193/* AR9170_LED_MODE BIT is not set */
194#define AR9170_LED_MODE_CONN_STATE_S 4
195#define AR9170_LED_MODE_CONN_STATE 0x0030
196#define AR9170_LED_MODE_CONN_STATE_FORCE_OFF 0x0000
197#define AR9170_LED_MODE_CONN_STATE_FORCE_ON 0x0010
198/* Idle off / Active on */
199#define AR9170_LED_MODE_CONN_STATE_IOFF_AON 0x0020
200/* Idle on / Active off */
201#define AR9170_LED_MODE_CONN_STATE_ION_AOFF 0x0010
202
203#define AR9170_LED_MODE_MODE 0x0040
204#define AR9170_LED_MODE_RESERVED2 0x0080
205
206#define AR9170_LED_MODE_TON_SCAN_S 8
207#define AR9170_LED_MODE_TON_SCAN 0x0f00
208
209#define AR9170_LED_MODE_TOFF_SCAN_S 12
210#define AR9170_LED_MODE_TOFF_SCAN 0xf000
211
212struct ar9170_led_mode {
213 __le16 led;
214};
215
216#endif /* __CARL9170_SHARED_EEPROM_H */
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
new file mode 100644
index 000000000000..36615462b87a
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -0,0 +1,395 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * firmware parser
5 *
6 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 */
22
23#include <linux/kernel.h>
24#include <linux/firmware.h>
25#include <linux/crc32.h>
26#include "carl9170.h"
27#include "fwcmd.h"
28#include "version.h"
29
30#define MAKE_STR(symbol) #symbol
31#define TO_STR(symbol) MAKE_STR(symbol)
32#define CARL9170FW_API_VER_STR TO_STR(CARL9170FW_API_MAX_VER)
33MODULE_VERSION(CARL9170FW_API_VER_STR ":" CARL9170FW_VERSION_GIT);
34
35static const u8 otus_magic[4] = { OTUS_MAGIC };
36
37static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4],
38 const unsigned int len, const u8 compatible_revision)
39{
40 const struct carl9170fw_desc_head *iter;
41
42 carl9170fw_for_each_hdr(iter, ar->fw.desc) {
43 if (carl9170fw_desc_cmp(iter, descid, len,
44 compatible_revision))
45 return (void *)iter;
46 }
47
48 /* needed to find the LAST desc */
49 if (carl9170fw_desc_cmp(iter, descid, len,
50 compatible_revision))
51 return (void *)iter;
52
53 return NULL;
54}
55
56static int carl9170_fw_verify_descs(struct ar9170 *ar,
57 const struct carl9170fw_desc_head *head, unsigned int max_len)
58{
59 const struct carl9170fw_desc_head *pos;
60 unsigned long pos_addr, end_addr;
61 unsigned int pos_length;
62
63 if (max_len < sizeof(*pos))
64 return -ENODATA;
65
66 max_len = min_t(unsigned int, CARL9170FW_DESC_MAX_LENGTH, max_len);
67
68 pos = head;
69 pos_addr = (unsigned long) pos;
70 end_addr = pos_addr + max_len;
71
72 while (pos_addr < end_addr) {
73 if (pos_addr + sizeof(*head) > end_addr)
74 return -E2BIG;
75
76 pos_length = le16_to_cpu(pos->length);
77
78 if (pos_length < sizeof(*head))
79 return -EBADMSG;
80
81 if (pos_length > max_len)
82 return -EOVERFLOW;
83
84 if (pos_addr + pos_length > end_addr)
85 return -EMSGSIZE;
86
87 if (carl9170fw_desc_cmp(pos, LAST_MAGIC,
88 CARL9170FW_LAST_DESC_SIZE,
89 CARL9170FW_LAST_DESC_CUR_VER))
90 return 0;
91
92 pos_addr += pos_length;
93 pos = (void *)pos_addr;
94 max_len -= pos_length;
95 }
96 return -EINVAL;
97}
98
99static void carl9170_fw_info(struct ar9170 *ar)
100{
101 const struct carl9170fw_motd_desc *motd_desc;
102 unsigned int str_ver_len;
103 u32 fw_date;
104
105 dev_info(&ar->udev->dev, "driver API: %s 2%03d-%02d-%02d [%d-%d]\n",
106 CARL9170FW_VERSION_GIT, CARL9170FW_VERSION_YEAR,
107 CARL9170FW_VERSION_MONTH, CARL9170FW_VERSION_DAY,
108 CARL9170FW_API_MIN_VER, CARL9170FW_API_MAX_VER);
109
110 motd_desc = carl9170_fw_find_desc(ar, MOTD_MAGIC,
111 sizeof(*motd_desc), CARL9170FW_MOTD_DESC_CUR_VER);
112
113 if (motd_desc) {
114 str_ver_len = strnlen(motd_desc->release,
115 CARL9170FW_MOTD_RELEASE_LEN);
116
117 fw_date = le32_to_cpu(motd_desc->fw_year_month_day);
118
119 dev_info(&ar->udev->dev, "firmware API: %.*s 2%03d-%02d-%02d\n",
120 str_ver_len, motd_desc->release,
121 CARL9170FW_GET_YEAR(fw_date),
122 CARL9170FW_GET_MONTH(fw_date),
123 CARL9170FW_GET_DAY(fw_date));
124
125 strlcpy(ar->hw->wiphy->fw_version, motd_desc->release,
126 sizeof(ar->hw->wiphy->fw_version));
127 }
128}
129
130static bool valid_dma_addr(const u32 address)
131{
132 if (address >= AR9170_SRAM_OFFSET &&
133 address < (AR9170_SRAM_OFFSET + AR9170_SRAM_SIZE))
134 return true;
135
136 return false;
137}
138
139static bool valid_cpu_addr(const u32 address)
140{
141 if (valid_dma_addr(address) || (address >= AR9170_PRAM_OFFSET &&
142 address < (AR9170_PRAM_OFFSET + AR9170_PRAM_SIZE)))
143 return true;
144
145 return false;
146}
147
148static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
149{
150 const struct carl9170fw_otus_desc *otus_desc;
151 const struct carl9170fw_chk_desc *chk_desc;
152 const struct carl9170fw_last_desc *last_desc;
153
154 last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
155 sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
156 if (!last_desc)
157 return -EINVAL;
158
159 otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC,
160 sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER);
161 if (!otus_desc) {
162 dev_err(&ar->udev->dev, "failed to find compatible firmware "
163 "descriptor.\n");
164 return -ENODATA;
165 }
166
167 chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC,
168 sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER);
169
170 if (chk_desc) {
171 unsigned long fin, diff;
172 unsigned int dsc_len;
173 u32 crc32;
174
175 dsc_len = min_t(unsigned int, len,
176 (unsigned long)chk_desc - (unsigned long)otus_desc);
177
178 fin = (unsigned long) last_desc + sizeof(*last_desc);
179 diff = fin - (unsigned long) otus_desc;
180
181 if (diff < len)
182 len -= diff;
183
184 if (len < 256)
185 return -EIO;
186
187 crc32 = crc32_le(~0, data, len);
188 if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
189 dev_err(&ar->udev->dev, "fw checksum test failed.\n");
190 return -ENOEXEC;
191 }
192
193 crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
194 if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
195 dev_err(&ar->udev->dev, "descriptor check failed.\n");
196 return -EINVAL;
197 }
198 } else {
199 dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
200 }
201
202#define SUPP(feat) \
203 (carl9170fw_supports(otus_desc->feature_set, feat))
204
205 if (!SUPP(CARL9170FW_DUMMY_FEATURE)) {
206 dev_err(&ar->udev->dev, "invalid firmware descriptor "
207 "format detected.\n");
208 return -EINVAL;
209 }
210
211 ar->fw.api_version = otus_desc->api_ver;
212
213 if (ar->fw.api_version < CARL9170FW_API_MIN_VER ||
214 ar->fw.api_version > CARL9170FW_API_MAX_VER) {
215 dev_err(&ar->udev->dev, "unsupported firmware api version.\n");
216 return -EINVAL;
217 }
218
219 if (!SUPP(CARL9170FW_COMMAND_PHY) || SUPP(CARL9170FW_UNUSABLE) ||
220 !SUPP(CARL9170FW_HANDLE_BACK_REQ)) {
221 dev_err(&ar->udev->dev, "firmware does support "
222 "mandatory features.\n");
223 return -ECANCELED;
224 }
225
226 if (ilog2(le32_to_cpu(otus_desc->feature_set)) >=
227 __CARL9170FW_FEATURE_NUM) {
228 dev_warn(&ar->udev->dev, "driver does not support all "
229 "firmware features.\n");
230 }
231
232 if (!SUPP(CARL9170FW_COMMAND_CAM)) {
233 dev_info(&ar->udev->dev, "crypto offloading is disabled "
234 "by firmware.\n");
235 ar->disable_offload = true;
236 }
237
238 if (SUPP(CARL9170FW_PSM))
239 ar->hw->flags |= IEEE80211_HW_SUPPORTS_PS;
240
241 if (!SUPP(CARL9170FW_USB_INIT_FIRMWARE)) {
242 dev_err(&ar->udev->dev, "firmware does not provide "
243 "mandatory interfaces.\n");
244 return -EINVAL;
245 }
246
247 if (SUPP(CARL9170FW_MINIBOOT))
248 ar->fw.offset = le16_to_cpu(otus_desc->miniboot_size);
249 else
250 ar->fw.offset = 0;
251
252 if (SUPP(CARL9170FW_USB_DOWN_STREAM)) {
253 ar->hw->extra_tx_headroom += sizeof(struct ar9170_stream);
254 ar->fw.tx_stream = true;
255 }
256
257 if (SUPP(CARL9170FW_USB_UP_STREAM))
258 ar->fw.rx_stream = true;
259
260 ar->fw.vif_num = otus_desc->vif_num;
261 ar->fw.cmd_bufs = otus_desc->cmd_bufs;
262 ar->fw.address = le32_to_cpu(otus_desc->fw_address);
263 ar->fw.rx_size = le16_to_cpu(otus_desc->rx_max_frame_len);
264 ar->fw.mem_blocks = min_t(unsigned int, otus_desc->tx_descs, 0xfe);
265 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
266 ar->fw.mem_block_size = le16_to_cpu(otus_desc->tx_frag_len);
267
268 if (ar->fw.vif_num >= AR9170_MAX_VIRTUAL_MAC || !ar->fw.vif_num ||
269 ar->fw.mem_blocks < 16 || !ar->fw.cmd_bufs ||
270 ar->fw.mem_block_size < 64 || ar->fw.mem_block_size > 512 ||
271 ar->fw.rx_size > 32768 || ar->fw.rx_size < 4096 ||
272 !valid_cpu_addr(ar->fw.address)) {
273 dev_err(&ar->udev->dev, "firmware shows obvious signs of "
274 "malicious tampering.\n");
275 return -EINVAL;
276 }
277
278 ar->fw.beacon_addr = le32_to_cpu(otus_desc->bcn_addr);
279 ar->fw.beacon_max_len = le16_to_cpu(otus_desc->bcn_len);
280
281 if (valid_dma_addr(ar->fw.beacon_addr) && ar->fw.beacon_max_len >=
282 AR9170_MAC_BCN_LENGTH_MAX) {
283 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
284
285 if (SUPP(CARL9170FW_WLANTX_CAB)) {
286 ar->hw->wiphy->interface_modes |=
287 BIT(NL80211_IFTYPE_AP);
288 }
289 }
290
291#undef SUPPORTED
292 return 0;
293}
294
295static struct carl9170fw_desc_head *
296carl9170_find_fw_desc(struct ar9170 *ar, const __u8 *fw_data, const size_t len)
297
298{
299 int scan = 0, found = 0;
300
301 if (!carl9170fw_size_check(len)) {
302 dev_err(&ar->udev->dev, "firmware size is out of bound.\n");
303 return NULL;
304 }
305
306 while (scan < len - sizeof(struct carl9170fw_desc_head)) {
307 if (fw_data[scan++] == otus_magic[found])
308 found++;
309 else
310 found = 0;
311
312 if (scan >= len)
313 break;
314
315 if (found == sizeof(otus_magic))
316 break;
317 }
318
319 if (found != sizeof(otus_magic))
320 return NULL;
321
322 return (void *)&fw_data[scan - found];
323}
324
325int carl9170_fw_fix_eeprom(struct ar9170 *ar)
326{
327 const struct carl9170fw_fix_desc *fix_desc = NULL;
328 unsigned int i, n, off;
329 u32 *data = (void *)&ar->eeprom;
330
331 fix_desc = carl9170_fw_find_desc(ar, FIX_MAGIC,
332 sizeof(*fix_desc), CARL9170FW_FIX_DESC_CUR_VER);
333
334 if (!fix_desc)
335 return 0;
336
337 n = (le16_to_cpu(fix_desc->head.length) - sizeof(*fix_desc)) /
338 sizeof(struct carl9170fw_fix_entry);
339
340 for (i = 0; i < n; i++) {
341 off = le32_to_cpu(fix_desc->data[i].address) -
342 AR9170_EEPROM_START;
343
344 if (off >= sizeof(struct ar9170_eeprom) || (off & 3)) {
345 dev_err(&ar->udev->dev, "Skip invalid entry %d\n", i);
346 continue;
347 }
348
349 data[off / sizeof(*data)] &=
350 le32_to_cpu(fix_desc->data[i].mask);
351 data[off / sizeof(*data)] |=
352 le32_to_cpu(fix_desc->data[i].value);
353 }
354
355 return 0;
356}
357
358int carl9170_parse_firmware(struct ar9170 *ar)
359{
360 const struct carl9170fw_desc_head *fw_desc = NULL;
361 const struct firmware *fw = ar->fw.fw;
362 unsigned long header_offset = 0;
363 int err;
364
365 if (WARN_ON(!fw))
366 return -EINVAL;
367
368 fw_desc = carl9170_find_fw_desc(ar, fw->data, fw->size);
369
370 if (!fw_desc) {
371 dev_err(&ar->udev->dev, "unsupported firmware.\n");
372 return -ENODATA;
373 }
374
375 header_offset = (unsigned long)fw_desc - (unsigned long)fw->data;
376
377 err = carl9170_fw_verify_descs(ar, fw_desc, fw->size - header_offset);
378 if (err) {
379 dev_err(&ar->udev->dev, "damaged firmware (%d).\n", err);
380 return err;
381 }
382
383 ar->fw.desc = fw_desc;
384
385 carl9170_fw_info(ar);
386
387 err = carl9170_fw(ar, fw->data, fw->size);
388 if (err) {
389 dev_err(&ar->udev->dev, "failed to parse firmware (%d).\n",
390 err);
391 return err;
392 }
393
394 return 0;
395}
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
new file mode 100644
index 000000000000..d4a4e1dbef06
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -0,0 +1,268 @@
1/*
2 * Shared Atheros AR9170 Header
3 *
4 * Firmware command interface definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#ifndef __CARL9170_SHARED_FWCMD_H
40#define __CARL9170_SHARED_FWCMD_H
41
42#define CARL9170_MAX_CMD_LEN 64
43#define CARL9170_MAX_CMD_PAYLOAD_LEN 60
44
45#define CARL9170FW_API_MIN_VER 1
46#define CARL9170FW_API_MAX_VER 1
47
48enum carl9170_cmd_oids {
49 CARL9170_CMD_RREG = 0x00,
50 CARL9170_CMD_WREG = 0x01,
51 CARL9170_CMD_ECHO = 0x02,
52 CARL9170_CMD_SWRST = 0x03,
53 CARL9170_CMD_REBOOT = 0x04,
54 CARL9170_CMD_BCN_CTRL = 0x05,
55 CARL9170_CMD_READ_TSF = 0x06,
56
57 /* CAM */
58 CARL9170_CMD_EKEY = 0x10,
59 CARL9170_CMD_DKEY = 0x11,
60
61 /* RF / PHY */
62 CARL9170_CMD_FREQUENCY = 0x20,
63 CARL9170_CMD_RF_INIT = 0x21,
64 CARL9170_CMD_SYNTH = 0x22,
65 CARL9170_CMD_FREQ_START = 0x23,
66 CARL9170_CMD_PSM = 0x24,
67
68 /* Asychronous command flag */
69 CARL9170_CMD_ASYNC_FLAG = 0x40,
70 CARL9170_CMD_WREG_ASYNC = (CARL9170_CMD_WREG |
71 CARL9170_CMD_ASYNC_FLAG),
72 CARL9170_CMD_REBOOT_ASYNC = (CARL9170_CMD_REBOOT |
73 CARL9170_CMD_ASYNC_FLAG),
74 CARL9170_CMD_BCN_CTRL_ASYNC = (CARL9170_CMD_BCN_CTRL |
75 CARL9170_CMD_ASYNC_FLAG),
76 CARL9170_CMD_PSM_ASYNC = (CARL9170_CMD_PSM |
77 CARL9170_CMD_ASYNC_FLAG),
78
79 /* responses and traps */
80 CARL9170_RSP_FLAG = 0xc0,
81 CARL9170_RSP_PRETBTT = 0xc0,
82 CARL9170_RSP_TXCOMP = 0xc1,
83 CARL9170_RSP_BEACON_CONFIG = 0xc2,
84 CARL9170_RSP_ATIM = 0xc3,
85 CARL9170_RSP_WATCHDOG = 0xc6,
86 CARL9170_RSP_TEXT = 0xca,
87 CARL9170_RSP_HEXDUMP = 0xcc,
88 CARL9170_RSP_RADAR = 0xcd,
89 CARL9170_RSP_GPIO = 0xce,
90 CARL9170_RSP_BOOT = 0xcf,
91};
92
93struct carl9170_set_key_cmd {
94 __le16 user;
95 __le16 keyId;
96 __le16 type;
97 u8 macAddr[6];
98 u32 key[4];
99} __packed;
100#define CARL9170_SET_KEY_CMD_SIZE 28
101
102struct carl9170_disable_key_cmd {
103 __le16 user;
104 __le16 padding;
105} __packed;
106#define CARL9170_DISABLE_KEY_CMD_SIZE 4
107
108struct carl9170_u32_list {
109 u32 vals[0];
110} __packed;
111
112struct carl9170_reg_list {
113 __le32 regs[0];
114} __packed;
115
116struct carl9170_write_reg {
117 struct {
118 __le32 addr;
119 __le32 val;
120 } regs[0] __packed;
121} __packed;
122
123#define CARL9170FW_PHY_HT_ENABLE 0x4
124#define CARL9170FW_PHY_HT_DYN2040 0x8
125#define CARL9170FW_PHY_HT_EXT_CHAN_OFF 0x3
126#define CARL9170FW_PHY_HT_EXT_CHAN_OFF_S 2
127
128struct carl9170_rf_init {
129 __le32 freq;
130 u8 ht_settings;
131 u8 padding2[3];
132 __le32 delta_slope_coeff_exp;
133 __le32 delta_slope_coeff_man;
134 __le32 delta_slope_coeff_exp_shgi;
135 __le32 delta_slope_coeff_man_shgi;
136 __le32 finiteLoopCount;
137} __packed;
138#define CARL9170_RF_INIT_SIZE 28
139
140struct carl9170_rf_init_result {
141 __le32 ret; /* AR9170_PHY_REG_AGC_CONTROL */
142} __packed;
143#define CARL9170_RF_INIT_RESULT_SIZE 4
144
145#define CARL9170_PSM_SLEEP 0x1000
146#define CARL9170_PSM_SOFTWARE 0
147#define CARL9170_PSM_WAKE 0 /* internally used. */
148#define CARL9170_PSM_COUNTER 0xfff
149#define CARL9170_PSM_COUNTER_S 0
150
151struct carl9170_psm {
152 __le32 state;
153} __packed;
154#define CARL9170_PSM_SIZE 4
155
156struct carl9170_bcn_ctrl_cmd {
157 __le32 vif_id;
158 __le32 mode;
159 __le32 bcn_addr;
160 __le32 bcn_len;
161} __packed;
162#define CARL9170_BCN_CTRL_CMD_SIZE 16
163
164#define CARL9170_BCN_CTRL_DRAIN 0
165#define CARL9170_BCN_CTRL_CAB_TRIGGER 1
166
167struct carl9170_cmd_head {
168 union {
169 struct {
170 u8 len;
171 u8 cmd;
172 u8 seq;
173 u8 ext;
174 } __packed;
175
176 u32 hdr_data;
177 } __packed;
178} __packed;
179
180struct carl9170_cmd {
181 struct carl9170_cmd_head hdr;
182 union {
183 struct carl9170_set_key_cmd setkey;
184 struct carl9170_disable_key_cmd disablekey;
185 struct carl9170_u32_list echo;
186 struct carl9170_reg_list rreg;
187 struct carl9170_write_reg wreg;
188 struct carl9170_rf_init rf_init;
189 struct carl9170_psm psm;
190 struct carl9170_bcn_ctrl_cmd bcn_ctrl;
191 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
192 } __packed;
193} __packed;
194
195#define CARL9170_TX_STATUS_QUEUE 3
196#define CARL9170_TX_STATUS_QUEUE_S 0
197#define CARL9170_TX_STATUS_RIX_S 2
198#define CARL9170_TX_STATUS_RIX (3 << CARL9170_TX_STATUS_RIX_S)
199#define CARL9170_TX_STATUS_TRIES_S 4
200#define CARL9170_TX_STATUS_TRIES (7 << CARL9170_TX_STATUS_TRIES_S)
201#define CARL9170_TX_STATUS_SUCCESS 0x80
202
203/*
204 * NOTE:
205 * Both structs [carl9170_tx_status and _carl9170_tx_status]
206 * need to be "bit for bit" in sync.
207 */
208struct carl9170_tx_status {
209 /*
210 * Beware of compiler bugs in all gcc pre 4.4!
211 */
212
213 u8 cookie;
214 u8 queue:2;
215 u8 rix:2;
216 u8 tries:3;
217 u8 success:1;
218} __packed;
219struct _carl9170_tx_status {
220 /*
221 * This version should be immune to all alignment bugs.
222 */
223
224 u8 cookie;
225 u8 info;
226} __packed;
227#define CARL9170_TX_STATUS_SIZE 2
228
229#define CARL9170_RSP_TX_STATUS_NUM (CARL9170_MAX_CMD_PAYLOAD_LEN / \
230 sizeof(struct _carl9170_tx_status))
231
232#define CARL9170_TX_MAX_RATE_TRIES 7
233
234#define CARL9170_TX_MAX_RATES 4
235#define CARL9170_TX_MAX_RETRY_RATES (CARL9170_TX_MAX_RATES - 1)
236#define CARL9170_ERR_MAGIC "ERR:"
237#define CARL9170_BUG_MAGIC "BUG:"
238
239struct carl9170_gpio {
240 __le32 gpio;
241} __packed;
242#define CARL9170_GPIO_SIZE 4
243
244struct carl9170_tsf_rsp {
245 union {
246 __le32 tsf[2];
247 __le64 tsf_64;
248 } __packed;
249} __packed;
250#define CARL9170_TSF_RSP_SIZE 8
251
252struct carl9170_rsp {
253 struct carl9170_cmd_head hdr;
254
255 union {
256 struct carl9170_rf_init_result rf_init_res;
257 struct carl9170_u32_list rreg_res;
258 struct carl9170_u32_list echo;
259 struct carl9170_tx_status tx_status[0];
260 struct _carl9170_tx_status _tx_status[0];
261 struct carl9170_gpio gpio;
262 struct carl9170_tsf_rsp tsf;
263 struct carl9170_psm psm;
264 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
265 } __packed;
266} __packed;
267
268#endif /* __CARL9170_SHARED_FWCMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
new file mode 100644
index 000000000000..7cd811708fe5
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -0,0 +1,237 @@
1/*
2 * Shared CARL9170 Header
3 *
4 * Firmware descriptor format
5 *
6 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, see
19 * http://www.gnu.org/licenses/.
20 */
21
22#ifndef __CARL9170_SHARED_FWDESC_H
23#define __CARL9170_SHARED_FWDESC_H
24
25/* NOTE: Don't mess with the order of the flags! */
26enum carl9170fw_feature_list {
27 /* Always set */
28 CARL9170FW_DUMMY_FEATURE,
29
30 /*
31 * Indicates that this image has special boot block which prevents
32 * legacy drivers to drive the firmware.
33 */
34 CARL9170FW_MINIBOOT,
35
36 /* usb registers are initialized by the firmware */
37 CARL9170FW_USB_INIT_FIRMWARE,
38
39 /* command traps & notifications are send through EP2 */
40 CARL9170FW_USB_RESP_EP2,
41
42 /* usb download (app -> fw) stream */
43 CARL9170FW_USB_DOWN_STREAM,
44
45 /* usb upload (fw -> app) stream */
46 CARL9170FW_USB_UP_STREAM,
47
48 /* unusable - reserved to flag non-functional debug firmwares */
49 CARL9170FW_UNUSABLE,
50
51 /* AR9170_CMD_RF_INIT, AR9170_CMD_FREQ_START, AR9170_CMD_FREQUENCY */
52 CARL9170FW_COMMAND_PHY,
53
54 /* AR9170_CMD_EKEY, AR9170_CMD_DKEY */
55 CARL9170FW_COMMAND_CAM,
56
57 /* Firmware has a software Content After Beacon Queueing mechanism */
58 CARL9170FW_WLANTX_CAB,
59
60 /* The firmware is capable of responding to incoming BAR frames */
61 CARL9170FW_HANDLE_BACK_REQ,
62
63 /* GPIO Interrupt | CARL9170_RSP_GPIO */
64 CARL9170FW_GPIO_INTERRUPT,
65
66 /* Firmware PSM support | CARL9170_CMD_PSM */
67 CARL9170FW_PSM,
68
69 /* KEEP LAST */
70 __CARL9170FW_FEATURE_NUM
71};
72
73#define OTUS_MAGIC "OTAR"
74#define MOTD_MAGIC "MOTD"
75#define FIX_MAGIC "FIX\0"
76#define DBG_MAGIC "DBG\0"
77#define CHK_MAGIC "CHK\0"
78#define LAST_MAGIC "LAST"
79
80#define CARL9170FW_SET_DAY(d) (((d) - 1) % 31)
81#define CARL9170FW_SET_MONTH(m) ((((m) - 1) % 12) * 31)
82#define CARL9170FW_SET_YEAR(y) (((y) - 10) * 372)
83
84#define CARL9170FW_GET_DAY(d) (((d) % 31) + 1)
85#define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1)
86#define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10)
87
88struct carl9170fw_desc_head {
89 u8 magic[4];
90 __le16 length;
91 u8 min_ver;
92 u8 cur_ver;
93} __packed;
94#define CARL9170FW_DESC_HEAD_SIZE \
95 (sizeof(struct carl9170fw_desc_head))
96
97#define CARL9170FW_OTUS_DESC_MIN_VER 6
98#define CARL9170FW_OTUS_DESC_CUR_VER 6
99struct carl9170fw_otus_desc {
100 struct carl9170fw_desc_head head;
101 __le32 feature_set;
102 __le32 fw_address;
103 __le32 bcn_addr;
104 __le16 bcn_len;
105 __le16 miniboot_size;
106 __le16 tx_frag_len;
107 __le16 rx_max_frame_len;
108 u8 tx_descs;
109 u8 cmd_bufs;
110 u8 api_ver;
111 u8 vif_num;
112} __packed;
113#define CARL9170FW_OTUS_DESC_SIZE \
114 (sizeof(struct carl9170fw_otus_desc))
115
116#define CARL9170FW_MOTD_STRING_LEN 24
117#define CARL9170FW_MOTD_RELEASE_LEN 20
118#define CARL9170FW_MOTD_DESC_MIN_VER 1
119#define CARL9170FW_MOTD_DESC_CUR_VER 2
120struct carl9170fw_motd_desc {
121 struct carl9170fw_desc_head head;
122 __le32 fw_year_month_day;
123 char desc[CARL9170FW_MOTD_STRING_LEN];
124 char release[CARL9170FW_MOTD_RELEASE_LEN];
125} __packed;
126#define CARL9170FW_MOTD_DESC_SIZE \
127 (sizeof(struct carl9170fw_motd_desc))
128
129#define CARL9170FW_FIX_DESC_MIN_VER 1
130#define CARL9170FW_FIX_DESC_CUR_VER 2
131struct carl9170fw_fix_entry {
132 __le32 address;
133 __le32 mask;
134 __le32 value;
135} __packed;
136
137struct carl9170fw_fix_desc {
138 struct carl9170fw_desc_head head;
139 struct carl9170fw_fix_entry data[0];
140} __packed;
141#define CARL9170FW_FIX_DESC_SIZE \
142 (sizeof(struct carl9170fw_fix_desc))
143
144#define CARL9170FW_DBG_DESC_MIN_VER 1
145#define CARL9170FW_DBG_DESC_CUR_VER 2
146struct carl9170fw_dbg_desc {
147 struct carl9170fw_desc_head head;
148
149 __le32 bogoclock_addr;
150 __le32 counter_addr;
151 __le32 rx_total_addr;
152 __le32 rx_overrun_addr;
153
154 /* Put your debugging definitions here */
155} __packed;
156#define CARL9170FW_DBG_DESC_SIZE \
157 (sizeof(struct carl9170fw_dbg_desc))
158
159#define CARL9170FW_CHK_DESC_MIN_VER 1
160#define CARL9170FW_CHK_DESC_CUR_VER 2
161struct carl9170fw_chk_desc {
162 struct carl9170fw_desc_head head;
163 __le32 fw_crc32;
164 __le32 hdr_crc32;
165} __packed;
166#define CARL9170FW_CHK_DESC_SIZE \
167 (sizeof(struct carl9170fw_chk_desc))
168
169#define CARL9170FW_LAST_DESC_MIN_VER 1
170#define CARL9170FW_LAST_DESC_CUR_VER 2
171struct carl9170fw_last_desc {
172 struct carl9170fw_desc_head head;
173} __packed;
174#define CARL9170FW_LAST_DESC_SIZE \
175 (sizeof(struct carl9170fw_fix_desc))
176
177#define CARL9170FW_DESC_MAX_LENGTH 8192
178
179#define CARL9170FW_FILL_DESC(_magic, _length, _min_ver, _cur_ver) \
180 .head = { \
181 .magic = _magic, \
182 .length = cpu_to_le16(_length), \
183 .min_ver = _min_ver, \
184 .cur_ver = _cur_ver, \
185 }
186
187static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
188 u8 magic[4], __le16 length,
189 u8 min_ver, u8 cur_ver)
190{
191 head->magic[0] = magic[0];
192 head->magic[1] = magic[1];
193 head->magic[2] = magic[2];
194 head->magic[3] = magic[3];
195
196 head->length = length;
197 head->min_ver = min_ver;
198 head->cur_ver = cur_ver;
199}
200
201#define carl9170fw_for_each_hdr(desc, fw_desc) \
202 for (desc = fw_desc; \
203 memcmp(desc->magic, LAST_MAGIC, 4) && \
204 le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \
205 le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \
206 desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length)))
207
208#define CHECK_HDR_VERSION(head, _min_ver) \
209 (((head)->cur_ver < _min_ver) || ((head)->min_ver > _min_ver)) \
210
211static inline bool carl9170fw_supports(__le32 list, u8 feature)
212{
213 return le32_to_cpu(list) & BIT(feature);
214}
215
216static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head,
217 const u8 descid[4], u16 min_len,
218 u8 compatible_revision)
219{
220 if (descid[0] == head->magic[0] && descid[1] == head->magic[1] &&
221 descid[2] == head->magic[2] && descid[3] == head->magic[3] &&
222 !CHECK_HDR_VERSION(head, compatible_revision) &&
223 (le16_to_cpu(head->length) >= min_len))
224 return true;
225
226 return false;
227}
228
229#define CARL9170FW_MIN_SIZE 32
230#define CARL9170FW_MAX_SIZE 16384
231
232static inline bool carl9170fw_size_check(unsigned int len)
233{
234 return (len <= CARL9170FW_MAX_SIZE && len >= CARL9170FW_MIN_SIZE);
235}
236
237#endif /* __CARL9170_SHARED_FWDESC_H */
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
new file mode 100644
index 000000000000..b1292ac5b703
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -0,0 +1,736 @@
1/*
2 * Shared Atheros AR9170 Header
3 *
4 * Register map, hardware-specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#ifndef __CARL9170_SHARED_HW_H
40#define __CARL9170_SHARED_HW_H
41
42/* High Speed UART */
43#define AR9170_UART_REG_BASE 0x1c0000
44
45/* Definitions of interrupt registers */
46#define AR9170_UART_REG_RX_BUFFER (AR9170_UART_REG_BASE + 0x000)
47#define AR9170_UART_REG_TX_HOLDING (AR9170_UART_REG_BASE + 0x004)
48#define AR9170_UART_REG_FIFO_CONTROL (AR9170_UART_REG_BASE + 0x010)
49#define AR9170_UART_FIFO_CTRL_RESET_RX_FIFO 0x02
50#define AR9170_UART_FIFO_CTRL_RESET_TX_FIFO 0x04
51
52#define AR9170_UART_REG_LINE_CONTROL (AR9170_UART_REG_BASE + 0x014)
53#define AR9170_UART_REG_MODEM_CONTROL (AR9170_UART_REG_BASE + 0x018)
54#define AR9170_UART_MODEM_CTRL_DTR_BIT 0x01
55#define AR9170_UART_MODEM_CTRL_RTS_BIT 0x02
56#define AR9170_UART_MODEM_CTRL_INTERNAL_LOOP_BACK 0x10
57#define AR9170_UART_MODEM_CTRL_AUTO_RTS 0x20
58#define AR9170_UART_MODEM_CTRL_AUTO_CTR 0x40
59
60#define AR9170_UART_REG_LINE_STATUS (AR9170_UART_REG_BASE + 0x01c)
61#define AR9170_UART_LINE_STS_RX_DATA_READY 0x01
62#define AR9170_UART_LINE_STS_RX_BUFFER_OVERRUN 0x02
63#define AR9170_UART_LINE_STS_RX_BREAK_IND 0x10
64#define AR9170_UART_LINE_STS_TX_FIFO_NEAR_EMPTY 0x20
65#define AR9170_UART_LINE_STS_TRANSMITTER_EMPTY 0x40
66
67#define AR9170_UART_REG_MODEM_STATUS (AR9170_UART_REG_BASE + 0x020)
68#define AR9170_UART_MODEM_STS_CTS_CHANGE 0x01
69#define AR9170_UART_MODEM_STS_DSR_CHANGE 0x02
70#define AR9170_UART_MODEM_STS_DCD_CHANGE 0x08
71#define AR9170_UART_MODEM_STS_CTS_COMPL 0x10
72#define AR9170_UART_MODEM_STS_DSR_COMPL 0x20
73#define AR9170_UART_MODEM_STS_DCD_COMPL 0x80
74
75#define AR9170_UART_REG_SCRATCH (AR9170_UART_REG_BASE + 0x024)
76#define AR9170_UART_REG_DIVISOR_LSB (AR9170_UART_REG_BASE + 0x028)
77#define AR9170_UART_REG_DIVISOR_MSB (AR9170_UART_REG_BASE + 0x02c)
78#define AR9170_UART_REG_WORD_RX_BUFFER (AR9170_UART_REG_BASE + 0x034)
79#define AR9170_UART_REG_WORD_TX_HOLDING (AR9170_UART_REG_BASE + 0x038)
80#define AR9170_UART_REG_FIFO_COUNT (AR9170_UART_REG_BASE + 0x03c)
81#define AR9170_UART_REG_REMAINDER (AR9170_UART_REG_BASE + 0x04c)
82
83/* Timer */
84#define AR9170_TIMER_REG_BASE 0x1c1000
85
86#define AR9170_TIMER_REG_WATCH_DOG (AR9170_TIMER_REG_BASE + 0x000)
87#define AR9170_TIMER_REG_TIMER0 (AR9170_TIMER_REG_BASE + 0x010)
88#define AR9170_TIMER_REG_TIMER1 (AR9170_TIMER_REG_BASE + 0x014)
89#define AR9170_TIMER_REG_TIMER2 (AR9170_TIMER_REG_BASE + 0x018)
90#define AR9170_TIMER_REG_TIMER3 (AR9170_TIMER_REG_BASE + 0x01c)
91#define AR9170_TIMER_REG_TIMER4 (AR9170_TIMER_REG_BASE + 0x020)
92#define AR9170_TIMER_REG_CONTROL (AR9170_TIMER_REG_BASE + 0x024)
93#define AR9170_TIMER_CTRL_DISABLE_CLOCK 0x100
94
95#define AR9170_TIMER_REG_INTERRUPT (AR9170_TIMER_REG_BASE + 0x028)
96#define AR9170_TIMER_INT_TIMER0 0x001
97#define AR9170_TIMER_INT_TIMER1 0x002
98#define AR9170_TIMER_INT_TIMER2 0x004
99#define AR9170_TIMER_INT_TIMER3 0x008
100#define AR9170_TIMER_INT_TIMER4 0x010
101#define AR9170_TIMER_INT_TICK_TIMER 0x100
102
103#define AR9170_TIMER_REG_TICK_TIMER (AR9170_TIMER_REG_BASE + 0x030)
104#define AR9170_TIMER_REG_CLOCK_LOW (AR9170_TIMER_REG_BASE + 0x040)
105#define AR9170_TIMER_REG_CLOCK_HIGH (AR9170_TIMER_REG_BASE + 0x044)
106
107#define AR9170_MAC_REG_BASE 0x1c3000
108
109#define AR9170_MAC_REG_POWER_STATE_CTRL (AR9170_MAC_REG_BASE + 0x500)
110#define AR9170_MAC_POWER_STATE_CTRL_RESET 0x20
111
112#define AR9170_MAC_REG_MAC_POWER_STATE_CTRL (AR9170_MAC_REG_BASE + 0x50c)
113
114#define AR9170_MAC_REG_INT_CTRL (AR9170_MAC_REG_BASE + 0x510)
115#define AR9170_MAC_INT_TXC BIT(0)
116#define AR9170_MAC_INT_RXC BIT(1)
117#define AR9170_MAC_INT_RETRY_FAIL BIT(2)
118#define AR9170_MAC_INT_WAKEUP BIT(3)
119#define AR9170_MAC_INT_ATIM BIT(4)
120#define AR9170_MAC_INT_DTIM BIT(5)
121#define AR9170_MAC_INT_CFG_BCN BIT(6)
122#define AR9170_MAC_INT_ABORT BIT(7)
123#define AR9170_MAC_INT_QOS BIT(8)
124#define AR9170_MAC_INT_MIMO_PS BIT(9)
125#define AR9170_MAC_INT_KEY_GEN BIT(10)
126#define AR9170_MAC_INT_DECRY_NOUSER BIT(11)
127#define AR9170_MAC_INT_RADAR BIT(12)
128#define AR9170_MAC_INT_QUIET_FRAME BIT(13)
129#define AR9170_MAC_INT_PRETBTT BIT(14)
130
131#define AR9170_MAC_REG_TSF_L (AR9170_MAC_REG_BASE + 0x514)
132#define AR9170_MAC_REG_TSF_H (AR9170_MAC_REG_BASE + 0x518)
133
134#define AR9170_MAC_REG_ATIM_WINDOW (AR9170_MAC_REG_BASE + 0x51c)
135#define AR9170_MAC_ATIM_PERIOD_S 0
136#define AR9170_MAC_ATIM_PERIOD 0x0000ffff
137
138#define AR9170_MAC_REG_BCN_PERIOD (AR9170_MAC_REG_BASE + 0x520)
139#define AR9170_MAC_BCN_PERIOD_S 0
140#define AR9170_MAC_BCN_PERIOD 0x0000ffff
141#define AR9170_MAC_BCN_DTIM_S 16
142#define AR9170_MAC_BCN_DTIM 0x00ff0000
143#define AR9170_MAC_BCN_AP_MODE BIT(24)
144#define AR9170_MAC_BCN_IBSS_MODE BIT(25)
145#define AR9170_MAC_BCN_PWR_MGT BIT(26)
146#define AR9170_MAC_BCN_STA_PS BIT(27)
147
148#define AR9170_MAC_REG_PRETBTT (AR9170_MAC_REG_BASE + 0x524)
149#define AR9170_MAC_PRETBTT_S 0
150#define AR9170_MAC_PRETBTT 0x0000ffff
151#define AR9170_MAC_PRETBTT2_S 16
152#define AR9170_MAC_PRETBTT2 0xffff0000
153
154#define AR9170_MAC_REG_MAC_ADDR_L (AR9170_MAC_REG_BASE + 0x610)
155#define AR9170_MAC_REG_MAC_ADDR_H (AR9170_MAC_REG_BASE + 0x614)
156#define AR9170_MAC_REG_BSSID_L (AR9170_MAC_REG_BASE + 0x618)
157#define AR9170_MAC_REG_BSSID_H (AR9170_MAC_REG_BASE + 0x61c)
158
159#define AR9170_MAC_REG_GROUP_HASH_TBL_L (AR9170_MAC_REG_BASE + 0x624)
160#define AR9170_MAC_REG_GROUP_HASH_TBL_H (AR9170_MAC_REG_BASE + 0x628)
161
162#define AR9170_MAC_REG_RX_TIMEOUT (AR9170_MAC_REG_BASE + 0x62c)
163
164#define AR9170_MAC_REG_BASIC_RATE (AR9170_MAC_REG_BASE + 0x630)
165#define AR9170_MAC_REG_MANDATORY_RATE (AR9170_MAC_REG_BASE + 0x634)
166#define AR9170_MAC_REG_RTS_CTS_RATE (AR9170_MAC_REG_BASE + 0x638)
167#define AR9170_MAC_REG_BACKOFF_PROTECT (AR9170_MAC_REG_BASE + 0x63c)
168#define AR9170_MAC_REG_RX_THRESHOLD (AR9170_MAC_REG_BASE + 0x640)
169#define AR9170_MAC_REG_AFTER_PNP (AR9170_MAC_REG_BASE + 0x648)
170#define AR9170_MAC_REG_RX_PE_DELAY (AR9170_MAC_REG_BASE + 0x64c)
171
172#define AR9170_MAC_REG_DYNAMIC_SIFS_ACK (AR9170_MAC_REG_BASE + 0x658)
173#define AR9170_MAC_REG_SNIFFER (AR9170_MAC_REG_BASE + 0x674)
174#define AR9170_MAC_SNIFFER_ENABLE_PROMISC BIT(0)
175#define AR9170_MAC_SNIFFER_DEFAULTS 0x02000000
176#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678)
177#define AR9170_MAC_ENCRYPTION_RX_SOFTWARE BIT(3)
178#define AR9170_MAC_ENCRYPTION_DEFAULTS 0x70
179
180#define AR9170_MAC_REG_MISC_680 (AR9170_MAC_REG_BASE + 0x680)
181#define AR9170_MAC_REG_MISC_684 (AR9170_MAC_REG_BASE + 0x684)
182#define AR9170_MAC_REG_TX_UNDERRUN (AR9170_MAC_REG_BASE + 0x688)
183
184#define AR9170_MAC_REG_FRAMETYPE_FILTER (AR9170_MAC_REG_BASE + 0x68c)
185#define AR9170_MAC_FTF_ASSOC_REQ BIT(0)
186#define AR9170_MAC_FTF_ASSOC_RESP BIT(1)
187#define AR9170_MAC_FTF_REASSOC_REQ BIT(2)
188#define AR9170_MAC_FTF_REASSOC_RESP BIT(3)
189#define AR9170_MAC_FTF_PRB_REQ BIT(4)
190#define AR9170_MAC_FTF_PRB_RESP BIT(5)
191#define AR9170_MAC_FTF_BIT6 BIT(6)
192#define AR9170_MAC_FTF_BIT7 BIT(7)
193#define AR9170_MAC_FTF_BEACON BIT(8)
194#define AR9170_MAC_FTF_ATIM BIT(9)
195#define AR9170_MAC_FTF_DEASSOC BIT(10)
196#define AR9170_MAC_FTF_AUTH BIT(11)
197#define AR9170_MAC_FTF_DEAUTH BIT(12)
198#define AR9170_MAC_FTF_BIT13 BIT(13)
199#define AR9170_MAC_FTF_BIT14 BIT(14)
200#define AR9170_MAC_FTF_BIT15 BIT(15)
201#define AR9170_MAC_FTF_BAR BIT(24)
202#define AR9170_MAC_FTF_BA BIT(25)
203#define AR9170_MAC_FTF_PSPOLL BIT(26)
204#define AR9170_MAC_FTF_RTS BIT(27)
205#define AR9170_MAC_FTF_CTS BIT(28)
206#define AR9170_MAC_FTF_ACK BIT(29)
207#define AR9170_MAC_FTF_CFE BIT(30)
208#define AR9170_MAC_FTF_CFE_ACK BIT(31)
209#define AR9170_MAC_FTF_DEFAULTS 0x0500ffff
210#define AR9170_MAC_FTF_MONITOR 0xff00ffff
211
212#define AR9170_MAC_REG_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0x690)
213#define AR9170_MAC_REG_ACK_TPC (AR9170_MAC_REG_BASE + 0x694)
214#define AR9170_MAC_REG_EIFS_AND_SIFS (AR9170_MAC_REG_BASE + 0x698)
215#define AR9170_MAC_REG_RX_TIMEOUT_COUNT (AR9170_MAC_REG_BASE + 0x69c)
216#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6a0)
217#define AR9170_MAC_REG_RX_CRC32 (AR9170_MAC_REG_BASE + 0x6a4)
218#define AR9170_MAC_REG_RX_CRC16 (AR9170_MAC_REG_BASE + 0x6a8)
219#define AR9170_MAC_REG_RX_ERR_DECRYPTION_UNI (AR9170_MAC_REG_BASE + 0x6ac)
220#define AR9170_MAC_REG_RX_OVERRUN (AR9170_MAC_REG_BASE + 0x6b0)
221#define AR9170_MAC_REG_RX_ERR_DECRYPTION_MUL (AR9170_MAC_REG_BASE + 0x6bc)
222#define AR9170_MAC_REG_TX_BLOCKACKS (AR9170_MAC_REG_BASE + 0x6c0)
223#define AR9170_MAC_REG_NAV_COUNT (AR9170_MAC_REG_BASE + 0x6c4)
224#define AR9170_MAC_REG_BACKOFF_STATUS (AR9170_MAC_REG_BASE + 0x6c8)
225#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6cc)
226
227#define AR9170_MAC_REG_TX_COMPLETE (AR9170_MAC_REG_BASE + 0x6d4)
228
229#define AR9170_MAC_REG_CHANNEL_BUSY (AR9170_MAC_REG_BASE + 0x6e8)
230#define AR9170_MAC_REG_EXT_BUSY (AR9170_MAC_REG_BASE + 0x6ec)
231
232#define AR9170_MAC_REG_SLOT_TIME (AR9170_MAC_REG_BASE + 0x6f0)
233#define AR9170_MAC_REG_TX_TOTAL (AR9170_MAC_REG_BASE + 0x6f4)
234#define AR9170_MAC_REG_ACK_FC (AR9170_MAC_REG_BASE + 0x6f8)
235
236#define AR9170_MAC_REG_CAM_MODE (AR9170_MAC_REG_BASE + 0x700)
237#define AR9170_MAC_CAM_IBSS 0xe0
238#define AR9170_MAC_CAM_AP 0xa1
239#define AR9170_MAC_CAM_STA 0x2
240#define AR9170_MAC_CAM_AP_WDS 0x3
241#define AR9170_MAC_CAM_DEFAULTS (0xf << 24)
242#define AR9170_MAC_CAM_HOST_PENDING 0x80000000
243
244#define AR9170_MAC_REG_CAM_ROLL_CALL_TBL_L (AR9170_MAC_REG_BASE + 0x704)
245#define AR9170_MAC_REG_CAM_ROLL_CALL_TBL_H (AR9170_MAC_REG_BASE + 0x708)
246
247#define AR9170_MAC_REG_CAM_ADDR (AR9170_MAC_REG_BASE + 0x70c)
248#define AR9170_MAC_CAM_ADDR_WRITE 0x80000000
249#define AR9170_MAC_REG_CAM_DATA0 (AR9170_MAC_REG_BASE + 0x720)
250#define AR9170_MAC_REG_CAM_DATA1 (AR9170_MAC_REG_BASE + 0x724)
251#define AR9170_MAC_REG_CAM_DATA2 (AR9170_MAC_REG_BASE + 0x728)
252#define AR9170_MAC_REG_CAM_DATA3 (AR9170_MAC_REG_BASE + 0x72c)
253
254#define AR9170_MAC_REG_CAM_DBG0 (AR9170_MAC_REG_BASE + 0x730)
255#define AR9170_MAC_REG_CAM_DBG1 (AR9170_MAC_REG_BASE + 0x734)
256#define AR9170_MAC_REG_CAM_DBG2 (AR9170_MAC_REG_BASE + 0x738)
257#define AR9170_MAC_REG_CAM_STATE (AR9170_MAC_REG_BASE + 0x73c)
258#define AR9170_MAC_CAM_STATE_READ_PENDING 0x40000000
259#define AR9170_MAC_CAM_STATE_WRITE_PENDING 0x80000000
260
261#define AR9170_MAC_REG_CAM_TXKEY (AR9170_MAC_REG_BASE + 0x740)
262#define AR9170_MAC_REG_CAM_RXKEY (AR9170_MAC_REG_BASE + 0x750)
263
264#define AR9170_MAC_REG_CAM_TX_ENC_TYPE (AR9170_MAC_REG_BASE + 0x760)
265#define AR9170_MAC_REG_CAM_RX_ENC_TYPE (AR9170_MAC_REG_BASE + 0x770)
266#define AR9170_MAC_REG_CAM_TX_SERACH_HIT (AR9170_MAC_REG_BASE + 0x780)
267#define AR9170_MAC_REG_CAM_RX_SERACH_HIT (AR9170_MAC_REG_BASE + 0x790)
268
269#define AR9170_MAC_REG_AC0_CW (AR9170_MAC_REG_BASE + 0xb00)
270#define AR9170_MAC_REG_AC1_CW (AR9170_MAC_REG_BASE + 0xb04)
271#define AR9170_MAC_REG_AC2_CW (AR9170_MAC_REG_BASE + 0xb08)
272#define AR9170_MAC_REG_AC3_CW (AR9170_MAC_REG_BASE + 0xb0c)
273#define AR9170_MAC_REG_AC4_CW (AR9170_MAC_REG_BASE + 0xb10)
274#define AR9170_MAC_REG_AC2_AC1_AC0_AIFS (AR9170_MAC_REG_BASE + 0xb14)
275#define AR9170_MAC_REG_AC4_AC3_AC2_AIFS (AR9170_MAC_REG_BASE + 0xb18)
276#define AR9170_MAC_REG_TXOP_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0xb1c)
277#define AR9170_MAC_REG_TXOP_ACK_INTERVAL (AR9170_MAC_REG_BASE + 0xb20)
278#define AR9170_MAC_REG_CONTENTION_POINT (AR9170_MAC_REG_BASE + 0xb24)
279#define AR9170_MAC_REG_RETRY_MAX (AR9170_MAC_REG_BASE + 0xb28)
280#define AR9170_MAC_REG_TID_CFACK_CFEND_RATE (AR9170_MAC_REG_BASE + 0xb2c)
281#define AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND (AR9170_MAC_REG_BASE + 0xb30)
282#define AR9170_MAC_REG_TKIP_TSC (AR9170_MAC_REG_BASE + 0xb34)
283#define AR9170_MAC_REG_TXOP_DURATION (AR9170_MAC_REG_BASE + 0xb38)
284#define AR9170_MAC_REG_TX_QOS_THRESHOLD (AR9170_MAC_REG_BASE + 0xb3c)
285#define AR9170_MAC_REG_QOS_PRIORITY_VIRTUAL_CCA (AR9170_MAC_REG_BASE + 0xb40)
286#define AR9170_MAC_VIRTUAL_CCA_Q0 BIT(15)
287#define AR9170_MAC_VIRTUAL_CCA_Q1 BIT(16)
288#define AR9170_MAC_VIRTUAL_CCA_Q2 BIT(17)
289#define AR9170_MAC_VIRTUAL_CCA_Q3 BIT(18)
290#define AR9170_MAC_VIRTUAL_CCA_Q4 BIT(19)
291#define AR9170_MAC_VIRTUAL_CCA_ALL (0xf8000)
292
293#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xb44)
294#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xb48)
295
296#define AR9170_MAC_REG_AMPDU_COUNT (AR9170_MAC_REG_BASE + 0xb88)
297#define AR9170_MAC_REG_MPDU_COUNT (AR9170_MAC_REG_BASE + 0xb8c)
298
299#define AR9170_MAC_REG_AMPDU_FACTOR (AR9170_MAC_REG_BASE + 0xb9c)
300#define AR9170_MAC_AMPDU_FACTOR 0x7f0000
301#define AR9170_MAC_AMPDU_FACTOR_S 16
302#define AR9170_MAC_REG_AMPDU_DENSITY (AR9170_MAC_REG_BASE + 0xba0)
303#define AR9170_MAC_AMPDU_DENSITY 0x7
304#define AR9170_MAC_AMPDU_DENSITY_S 0
305
306#define AR9170_MAC_REG_FCS_SELECT (AR9170_MAC_REG_BASE + 0xbb0)
307#define AR9170_MAC_FCS_SWFCS 0x1
308#define AR9170_MAC_FCS_FIFO_PROT 0x4
309
310#define AR9170_MAC_REG_RTS_CTS_TPC (AR9170_MAC_REG_BASE + 0xbb4)
311#define AR9170_MAC_REG_CFEND_QOSNULL_TPC (AR9170_MAC_REG_BASE + 0xbb8)
312
313#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xc00)
314#define AR9170_MAC_REG_RX_CONTROL (AR9170_MAC_REG_BASE + 0xc40)
315#define AR9170_MAC_RX_CTRL_DEAGG 0x1
316#define AR9170_MAC_RX_CTRL_SHORT_FILTER 0x2
317#define AR9170_MAC_RX_CTRL_SA_DA_SEARCH 0x20
318#define AR9170_MAC_RX_CTRL_PASS_TO_HOST BIT(28)
319#define AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER BIT(30)
320
321#define AR9170_MAC_REG_RX_CONTROL_1 (AR9170_MAC_REG_BASE + 0xc44)
322
323#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xc50)
324
325#define AR9170_MAC_REG_RX_MPDU (AR9170_MAC_REG_BASE + 0xca0)
326#define AR9170_MAC_REG_RX_DROPPED_MPDU (AR9170_MAC_REG_BASE + 0xca4)
327#define AR9170_MAC_REG_RX_DEL_MPDU (AR9170_MAC_REG_BASE + 0xca8)
328#define AR9170_MAC_REG_RX_PHY_MISC_ERROR (AR9170_MAC_REG_BASE + 0xcac)
329#define AR9170_MAC_REG_RX_PHY_XR_ERROR (AR9170_MAC_REG_BASE + 0xcb0)
330#define AR9170_MAC_REG_RX_PHY_OFDM_ERROR (AR9170_MAC_REG_BASE + 0xcb4)
331#define AR9170_MAC_REG_RX_PHY_CCK_ERROR (AR9170_MAC_REG_BASE + 0xcb8)
332#define AR9170_MAC_REG_RX_PHY_HT_ERROR (AR9170_MAC_REG_BASE + 0xcbc)
333#define AR9170_MAC_REG_RX_PHY_TOTAL (AR9170_MAC_REG_BASE + 0xcc0)
334
335#define AR9170_MAC_REG_DMA_TXQ_ADDR (AR9170_MAC_REG_BASE + 0xd00)
336#define AR9170_MAC_REG_DMA_TXQ_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd04)
337#define AR9170_MAC_REG_DMA_TXQ0_ADDR (AR9170_MAC_REG_BASE + 0xd00)
338#define AR9170_MAC_REG_DMA_TXQ0_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd04)
339#define AR9170_MAC_REG_DMA_TXQ1_ADDR (AR9170_MAC_REG_BASE + 0xd08)
340#define AR9170_MAC_REG_DMA_TXQ1_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd0c)
341#define AR9170_MAC_REG_DMA_TXQ2_ADDR (AR9170_MAC_REG_BASE + 0xd10)
342#define AR9170_MAC_REG_DMA_TXQ2_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd14)
343#define AR9170_MAC_REG_DMA_TXQ3_ADDR (AR9170_MAC_REG_BASE + 0xd18)
344#define AR9170_MAC_REG_DMA_TXQ3_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd1c)
345#define AR9170_MAC_REG_DMA_TXQ4_ADDR (AR9170_MAC_REG_BASE + 0xd20)
346#define AR9170_MAC_REG_DMA_TXQ4_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd24)
347#define AR9170_MAC_REG_DMA_RXQ_ADDR (AR9170_MAC_REG_BASE + 0xd28)
348#define AR9170_MAC_REG_DMA_RXQ_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd2c)
349
350#define AR9170_MAC_REG_DMA_TRIGGER (AR9170_MAC_REG_BASE + 0xd30)
351#define AR9170_DMA_TRIGGER_TXQ0 BIT(0)
352#define AR9170_DMA_TRIGGER_TXQ1 BIT(1)
353#define AR9170_DMA_TRIGGER_TXQ2 BIT(2)
354#define AR9170_DMA_TRIGGER_TXQ3 BIT(3)
355#define AR9170_DMA_TRIGGER_TXQ4 BIT(4)
356#define AR9170_DMA_TRIGGER_RXQ BIT(8)
357
358#define AR9170_MAC_REG_DMA_WLAN_STATUS (AR9170_MAC_REG_BASE + 0xd38)
359#define AR9170_MAC_REG_DMA_STATUS (AR9170_MAC_REG_BASE + 0xd3c)
360
361#define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xd7c)
362#define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f
363#define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0
364#define AR9170_MAC_TXRX_MPI_RX_MPI_MASK 0x000f0000
365#define AR9170_MAC_TXRX_MPI_RX_TO_MASK 0xfff00000
366
367#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xd84)
368#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xd88)
369#define AR9170_MAC_BCN_LENGTH_MAX 256
370
371#define AR9170_MAC_REG_BCN_STATUS (AR9170_MAC_REG_BASE + 0xd8c)
372
373#define AR9170_MAC_REG_BCN_PLCP (AR9170_MAC_REG_BASE + 0xd90)
374#define AR9170_MAC_REG_BCN_CTRL (AR9170_MAC_REG_BASE + 0xd94)
375#define AR9170_BCN_CTRL_READY 0x01
376#define AR9170_BCN_CTRL_LOCK 0x02
377
378#define AR9170_MAC_REG_BCN_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd98)
379#define AR9170_MAC_REG_BCN_COUNT (AR9170_MAC_REG_BASE + 0xd9c)
380
381
382#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xda0)
383#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xda4)
384
385#define AR9170_MAC_REG_DMA_TXQX_ADDR_CURR (AR9170_MAC_REG_BASE + 0xdc0)
386
387/* Random number generator */
388#define AR9170_RAND_REG_BASE 0x1d0000
389
390#define AR9170_RAND_REG_NUM (AR9170_RAND_REG_BASE + 0x000)
391#define AR9170_RAND_REG_MODE (AR9170_RAND_REG_BASE + 0x004)
392#define AR9170_RAND_MODE_MANUAL 0x000
393#define AR9170_RAND_MODE_FREE 0x001
394
395/* GPIO */
396#define AR9170_GPIO_REG_BASE 0x1d0100
397#define AR9170_GPIO_REG_PORT_TYPE (AR9170_GPIO_REG_BASE + 0x000)
398#define AR9170_GPIO_REG_PORT_DATA (AR9170_GPIO_REG_BASE + 0x004)
399#define AR9170_GPIO_PORT_LED_0 1
400#define AR9170_GPIO_PORT_LED_1 2
401/* WPS Button GPIO for TP-Link TL-WN821N */
402#define AR9170_GPIO_PORT_WPS_BUTTON_PRESSED 4
403
404/* Memory Controller */
405#define AR9170_MC_REG_BASE 0x1d1000
406
407#define AR9170_MC_REG_FLASH_WAIT_STATE (AR9170_MC_REG_BASE + 0x000)
408#define AR9170_MC_REG_SEEPROM_WP0 (AR9170_MC_REG_BASE + 0x400)
409#define AR9170_MC_REG_SEEPROM_WP1 (AR9170_MC_REG_BASE + 0x404)
410#define AR9170_MC_REG_SEEPROM_WP2 (AR9170_MC_REG_BASE + 0x408)
411
412/* Interrupt Controller */
413#define AR9170_MAX_INT_SRC 9
414#define AR9170_INT_REG_BASE 0x1d2000
415
416#define AR9170_INT_REG_FLAG (AR9170_INT_REG_BASE + 0x000)
417#define AR9170_INT_REG_FIQ_MASK (AR9170_INT_REG_BASE + 0x004)
418#define AR9170_INT_REG_IRQ_MASK (AR9170_INT_REG_BASE + 0x008)
419/* INT_REG_FLAG, INT_REG_FIQ_MASK and INT_REG_IRQ_MASK */
420#define AR9170_INT_FLAG_WLAN 0x001
421#define AR9170_INT_FLAG_PTAB_BIT 0x002
422#define AR9170_INT_FLAG_SE_BIT 0x004
423#define AR9170_INT_FLAG_UART_BIT 0x008
424#define AR9170_INT_FLAG_TIMER_BIT 0x010
425#define AR9170_INT_FLAG_EXT_BIT 0x020
426#define AR9170_INT_FLAG_SW_BIT 0x040
427#define AR9170_INT_FLAG_USB_BIT 0x080
428#define AR9170_INT_FLAG_ETHERNET_BIT 0x100
429
430#define AR9170_INT_REG_PRIORITY1 (AR9170_INT_REG_BASE + 0x00c)
431#define AR9170_INT_REG_PRIORITY2 (AR9170_INT_REG_BASE + 0x010)
432#define AR9170_INT_REG_PRIORITY3 (AR9170_INT_REG_BASE + 0x014)
433#define AR9170_INT_REG_EXT_INT_CONTROL (AR9170_INT_REG_BASE + 0x018)
434#define AR9170_INT_REG_SW_INT_CONTROL (AR9170_INT_REG_BASE + 0x01c)
435#define AR9170_INT_SW_INT_ENABLE 0x1
436
437#define AR9170_INT_REG_FIQ_ENCODE (AR9170_INT_REG_BASE + 0x020)
438#define AR9170_INT_INT_IRQ_ENCODE (AR9170_INT_REG_BASE + 0x024)
439
440/* Power Management */
441#define AR9170_PWR_REG_BASE 0x1d4000
442
443#define AR9170_PWR_REG_POWER_STATE (AR9170_PWR_REG_BASE + 0x000)
444
445#define AR9170_PWR_REG_RESET (AR9170_PWR_REG_BASE + 0x004)
446#define AR9170_PWR_RESET_COMMIT_RESET_MASK BIT(0)
447#define AR9170_PWR_RESET_WLAN_MASK BIT(1)
448#define AR9170_PWR_RESET_DMA_MASK BIT(2)
449#define AR9170_PWR_RESET_BRIDGE_MASK BIT(3)
450#define AR9170_PWR_RESET_AHB_MASK BIT(9)
451#define AR9170_PWR_RESET_BB_WARM_RESET BIT(10)
452#define AR9170_PWR_RESET_BB_COLD_RESET BIT(11)
453#define AR9170_PWR_RESET_ADDA_CLK_COLD_RESET BIT(12)
454#define AR9170_PWR_RESET_PLL BIT(13)
455#define AR9170_PWR_RESET_USB_PLL BIT(14)
456
457#define AR9170_PWR_REG_CLOCK_SEL (AR9170_PWR_REG_BASE + 0x008)
458#define AR9170_PWR_CLK_AHB_40MHZ 0
459#define AR9170_PWR_CLK_AHB_20_22MHZ 1
460#define AR9170_PWR_CLK_AHB_40_44MHZ 2
461#define AR9170_PWR_CLK_AHB_80_88MHZ 3
462#define AR9170_PWR_CLK_DAC_160_INV_DLY 0x70
463
464#define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010)
465#define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014)
466#define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020)
467
468/* Faraday USB Controller */
469#define AR9170_USB_REG_BASE 0x1e1000
470
471#define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000)
472#define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0)
473#define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2)
474#define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6)
475
476#define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001)
477#define AR9170_USB_DEVICE_ADDRESS_CONFIGURE BIT(7)
478
479#define AR9170_USB_REG_TEST (AR9170_USB_REG_BASE + 0x002)
480#define AR9170_USB_REG_PHY_TEST_SELECT (AR9170_USB_REG_BASE + 0x008)
481#define AR9170_USB_REG_CX_CONFIG_STATUS (AR9170_USB_REG_BASE + 0x00b)
482#define AR9170_USB_REG_EP0_DATA (AR9170_USB_REG_BASE + 0x00c)
483#define AR9170_USB_REG_EP0_DATA1 (AR9170_USB_REG_BASE + 0x00c)
484#define AR9170_USB_REG_EP0_DATA2 (AR9170_USB_REG_BASE + 0x00d)
485
486#define AR9170_USB_REG_INTR_MASK_BYTE_0 (AR9170_USB_REG_BASE + 0x011)
487#define AR9170_USB_REG_INTR_MASK_BYTE_1 (AR9170_USB_REG_BASE + 0x012)
488#define AR9170_USB_REG_INTR_MASK_BYTE_2 (AR9170_USB_REG_BASE + 0x013)
489#define AR9170_USB_REG_INTR_MASK_BYTE_3 (AR9170_USB_REG_BASE + 0x014)
490#define AR9170_USB_REG_INTR_MASK_BYTE_4 (AR9170_USB_REG_BASE + 0x015)
491#define AR9170_USB_INTR_DISABLE_OUT_INT (BIT(7) | BIT(6))
492
493#define AR9170_USB_REG_INTR_MASK_BYTE_5 (AR9170_USB_REG_BASE + 0x016)
494#define AR9170_USB_REG_INTR_MASK_BYTE_6 (AR9170_USB_REG_BASE + 0x017)
495#define AR9170_USB_INTR_DISABLE_IN_INT BIT(6)
496
497#define AR9170_USB_REG_INTR_MASK_BYTE_7 (AR9170_USB_REG_BASE + 0x018)
498
499#define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020)
500
501#define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021)
502#define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022)
503#define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023)
504#define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024)
505#define AR9170_USB_REG_INTR_SOURCE_4 (AR9170_USB_REG_BASE + 0x025)
506#define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026)
507#define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027)
508#define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028)
509
510#define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030)
511#define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030)
512#define AR9170_USB_REG_EP2_MAP (AR9170_USB_REG_BASE + 0x031)
513#define AR9170_USB_REG_EP3_MAP (AR9170_USB_REG_BASE + 0x032)
514#define AR9170_USB_REG_EP4_MAP (AR9170_USB_REG_BASE + 0x033)
515#define AR9170_USB_REG_EP5_MAP (AR9170_USB_REG_BASE + 0x034)
516#define AR9170_USB_REG_EP6_MAP (AR9170_USB_REG_BASE + 0x035)
517#define AR9170_USB_REG_EP7_MAP (AR9170_USB_REG_BASE + 0x036)
518#define AR9170_USB_REG_EP8_MAP (AR9170_USB_REG_BASE + 0x037)
519#define AR9170_USB_REG_EP9_MAP (AR9170_USB_REG_BASE + 0x038)
520#define AR9170_USB_REG_EP10_MAP (AR9170_USB_REG_BASE + 0x039)
521
522#define AR9170_USB_REG_EP_IN_MAX_SIZE_HIGH (AR9170_USB_REG_BASE + 0x03f)
523#define AR9170_USB_EP_IN_TOGGLE 0x10
524
525#define AR9170_USB_REG_EP_IN_MAX_SIZE_LOW (AR9170_USB_REG_BASE + 0x03e)
526
527#define AR9170_USB_REG_EP_OUT_MAX_SIZE_HIGH (AR9170_USB_REG_BASE + 0x05f)
528#define AR9170_USB_EP_OUT_TOGGLE 0x10
529
530#define AR9170_USB_REG_EP_OUT_MAX_SIZE_LOW (AR9170_USB_REG_BASE + 0x05e)
531
532#define AR9170_USB_REG_EP3_BYTE_COUNT_HIGH (AR9170_USB_REG_BASE + 0x0ae)
533#define AR9170_USB_REG_EP3_BYTE_COUNT_LOW (AR9170_USB_REG_BASE + 0x0be)
534#define AR9170_USB_REG_EP4_BYTE_COUNT_HIGH (AR9170_USB_REG_BASE + 0x0af)
535#define AR9170_USB_REG_EP4_BYTE_COUNT_LOW (AR9170_USB_REG_BASE + 0x0bf)
536
537#define AR9170_USB_REG_FIFO_MAP (AR9170_USB_REG_BASE + 0x080)
538#define AR9170_USB_REG_FIFO0_MAP (AR9170_USB_REG_BASE + 0x080)
539#define AR9170_USB_REG_FIFO1_MAP (AR9170_USB_REG_BASE + 0x081)
540#define AR9170_USB_REG_FIFO2_MAP (AR9170_USB_REG_BASE + 0x082)
541#define AR9170_USB_REG_FIFO3_MAP (AR9170_USB_REG_BASE + 0x083)
542#define AR9170_USB_REG_FIFO4_MAP (AR9170_USB_REG_BASE + 0x084)
543#define AR9170_USB_REG_FIFO5_MAP (AR9170_USB_REG_BASE + 0x085)
544#define AR9170_USB_REG_FIFO6_MAP (AR9170_USB_REG_BASE + 0x086)
545#define AR9170_USB_REG_FIFO7_MAP (AR9170_USB_REG_BASE + 0x087)
546#define AR9170_USB_REG_FIFO8_MAP (AR9170_USB_REG_BASE + 0x088)
547#define AR9170_USB_REG_FIFO9_MAP (AR9170_USB_REG_BASE + 0x089)
548
549#define AR9170_USB_REG_FIFO_CONFIG (AR9170_USB_REG_BASE + 0x090)
550#define AR9170_USB_REG_FIFO0_CONFIG (AR9170_USB_REG_BASE + 0x090)
551#define AR9170_USB_REG_FIFO1_CONFIG (AR9170_USB_REG_BASE + 0x091)
552#define AR9170_USB_REG_FIFO2_CONFIG (AR9170_USB_REG_BASE + 0x092)
553#define AR9170_USB_REG_FIFO3_CONFIG (AR9170_USB_REG_BASE + 0x093)
554#define AR9170_USB_REG_FIFO4_CONFIG (AR9170_USB_REG_BASE + 0x094)
555#define AR9170_USB_REG_FIFO5_CONFIG (AR9170_USB_REG_BASE + 0x095)
556#define AR9170_USB_REG_FIFO6_CONFIG (AR9170_USB_REG_BASE + 0x096)
557#define AR9170_USB_REG_FIFO7_CONFIG (AR9170_USB_REG_BASE + 0x097)
558#define AR9170_USB_REG_FIFO8_CONFIG (AR9170_USB_REG_BASE + 0x098)
559#define AR9170_USB_REG_FIFO9_CONFIG (AR9170_USB_REG_BASE + 0x099)
560
561#define AR9170_USB_REG_EP3_DATA (AR9170_USB_REG_BASE + 0x0f8)
562#define AR9170_USB_REG_EP4_DATA (AR9170_USB_REG_BASE + 0x0fc)
563
564#define AR9170_USB_REG_FIFO_SIZE (AR9170_USB_REG_BASE + 0x100)
565#define AR9170_USB_REG_DMA_CTL (AR9170_USB_REG_BASE + 0x108)
566#define AR9170_USB_DMA_CTL_ENABLE_TO_DEVICE BIT(0)
567#define AR9170_USB_DMA_CTL_ENABLE_FROM_DEVICE BIT(1)
568#define AR9170_USB_DMA_CTL_HIGH_SPEED BIT(2)
569#define AR9170_USB_DMA_CTL_UP_PACKET_MODE BIT(3)
570#define AR9170_USB_DMA_CTL_UP_STREAM_S 4
571#define AR9170_USB_DMA_CTL_UP_STREAM (BIT(4) | BIT(5))
572#define AR9170_USB_DMA_CTL_UP_STREAM_4K (0)
573#define AR9170_USB_DMA_CTL_UP_STREAM_8K BIT(4)
574#define AR9170_USB_DMA_CTL_UP_STREAM_16K BIT(5)
575#define AR9170_USB_DMA_CTL_UP_STREAM_32K (BIT(4) | BIT(5))
576#define AR9170_USB_DMA_CTL_DOWN_STREAM BIT(6)
577
578#define AR9170_USB_REG_DMA_STATUS (AR9170_USB_REG_BASE + 0x10c)
579#define AR9170_USB_DMA_STATUS_UP_IDLE BIT(8)
580#define AR9170_USB_DMA_STATUS_DN_IDLE BIT(16)
581
582#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
583#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
584#define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0)
585#define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1))
586
587/* PCI/USB to AHB Bridge */
588#define AR9170_PTA_REG_BASE 0x1e2000
589
590#define AR9170_PTA_REG_CMD (AR9170_PTA_REG_BASE + 0x000)
591#define AR9170_PTA_REG_PARAM1 (AR9170_PTA_REG_BASE + 0x004)
592#define AR9170_PTA_REG_PARAM2 (AR9170_PTA_REG_BASE + 0x008)
593#define AR9170_PTA_REG_PARAM3 (AR9170_PTA_REG_BASE + 0x00c)
594#define AR9170_PTA_REG_RSP (AR9170_PTA_REG_BASE + 0x010)
595#define AR9170_PTA_REG_STATUS1 (AR9170_PTA_REG_BASE + 0x014)
596#define AR9170_PTA_REG_STATUS2 (AR9170_PTA_REG_BASE + 0x018)
597#define AR9170_PTA_REG_STATUS3 (AR9170_PTA_REG_BASE + 0x01c)
598#define AR9170_PTA_REG_AHB_INT_FLAG (AR9170_PTA_REG_BASE + 0x020)
599#define AR9170_PTA_REG_AHB_INT_MASK (AR9170_PTA_REG_BASE + 0x024)
600#define AR9170_PTA_REG_AHB_INT_ACK (AR9170_PTA_REG_BASE + 0x028)
601#define AR9170_PTA_REG_AHB_SCRATCH1 (AR9170_PTA_REG_BASE + 0x030)
602#define AR9170_PTA_REG_AHB_SCRATCH2 (AR9170_PTA_REG_BASE + 0x034)
603#define AR9170_PTA_REG_AHB_SCRATCH3 (AR9170_PTA_REG_BASE + 0x038)
604#define AR9170_PTA_REG_AHB_SCRATCH4 (AR9170_PTA_REG_BASE + 0x03c)
605
606#define AR9170_PTA_REG_SHARE_MEM_CTRL (AR9170_PTA_REG_BASE + 0x124)
607
608/*
609 * PCI to AHB Bridge
610 */
611
612#define AR9170_PTA_REG_INT_FLAG (AR9170_PTA_REG_BASE + 0x100)
613#define AR9170_PTA_INT_FLAG_DN 0x01
614#define AR9170_PTA_INT_FLAG_UP 0x02
615#define AR9170_PTA_INT_FLAG_CMD 0x04
616
617#define AR9170_PTA_REG_INT_MASK (AR9170_PTA_REG_BASE + 0x104)
618#define AR9170_PTA_REG_DN_DMA_ADDRL (AR9170_PTA_REG_BASE + 0x108)
619#define AR9170_PTA_REG_DN_DMA_ADDRH (AR9170_PTA_REG_BASE + 0x10c)
620#define AR9170_PTA_REG_UP_DMA_ADDRL (AR9170_PTA_REG_BASE + 0x110)
621#define AR9170_PTA_REG_UP_DMA_ADDRH (AR9170_PTA_REG_BASE + 0x114)
622#define AR9170_PTA_REG_DN_PEND_TIME (AR9170_PTA_REG_BASE + 0x118)
623#define AR9170_PTA_REG_UP_PEND_TIME (AR9170_PTA_REG_BASE + 0x11c)
624#define AR9170_PTA_REG_CONTROL (AR9170_PTA_REG_BASE + 0x120)
625#define AR9170_PTA_CTRL_4_BEAT_BURST 0x00
626#define AR9170_PTA_CTRL_8_BEAT_BURST 0x01
627#define AR9170_PTA_CTRL_16_BEAT_BURST 0x02
628#define AR9170_PTA_CTRL_LOOPBACK_MODE 0x10
629
630#define AR9170_PTA_REG_MEM_CTRL (AR9170_PTA_REG_BASE + 0x124)
631#define AR9170_PTA_REG_MEM_ADDR (AR9170_PTA_REG_BASE + 0x128)
632#define AR9170_PTA_REG_DN_DMA_TRIGGER (AR9170_PTA_REG_BASE + 0x12c)
633#define AR9170_PTA_REG_UP_DMA_TRIGGER (AR9170_PTA_REG_BASE + 0x130)
634#define AR9170_PTA_REG_DMA_STATUS (AR9170_PTA_REG_BASE + 0x134)
635#define AR9170_PTA_REG_DN_CURR_ADDRL (AR9170_PTA_REG_BASE + 0x138)
636#define AR9170_PTA_REG_DN_CURR_ADDRH (AR9170_PTA_REG_BASE + 0x13c)
637#define AR9170_PTA_REG_UP_CURR_ADDRL (AR9170_PTA_REG_BASE + 0x140)
638#define AR9170_PTA_REG_UP_CURR_ADDRH (AR9170_PTA_REG_BASE + 0x144)
639#define AR9170_PTA_REG_DMA_MODE_CTRL (AR9170_PTA_REG_BASE + 0x148)
640#define AR9170_PTA_DMA_MODE_CTRL_RESET BIT(0)
641#define AR9170_PTA_DMA_MODE_CTRL_DISABLE_USB BIT(1)
642
643/* Protocol Controller Module */
644#define AR9170_MAC_REG_PC_REG_BASE (AR9170_MAC_REG_BASE + 0xe00)
645
646
647#define AR9170_NUM_LEDS 2
648
649/* CAM */
650#define AR9170_CAM_MAX_USER 64
651#define AR9170_CAM_MAX_KEY_LENGTH 16
652
653#define AR9170_SRAM_OFFSET 0x100000
654#define AR9170_SRAM_SIZE 0x18000
655
656#define AR9170_PRAM_OFFSET 0x200000
657#define AR9170_PRAM_SIZE 0x8000
658
659enum cpu_clock {
660 AHB_STATIC_40MHZ = 0,
661 AHB_GMODE_22MHZ = 1,
662 AHB_AMODE_20MHZ = 1,
663 AHB_GMODE_44MHZ = 2,
664 AHB_AMODE_40MHZ = 2,
665 AHB_GMODE_88MHZ = 3,
666 AHB_AMODE_80MHZ = 3
667};
668
669/* USB endpoints */
670enum ar9170_usb_ep {
671 /*
672 * Control EP is always EP 0 (USB SPEC)
673 *
674 * The weird thing is: the original firmware has a few
675 * comments that suggest that the actual EP numbers
676 * are in the 1 to 10 range?!
677 */
678 AR9170_USB_EP_CTRL = 0,
679
680 AR9170_USB_EP_TX,
681 AR9170_USB_EP_RX,
682 AR9170_USB_EP_IRQ,
683 AR9170_USB_EP_CMD,
684 AR9170_USB_NUM_EXTRA_EP = 4,
685
686 __AR9170_USB_NUM_EP,
687
688 __AR9170_USB_NUM_MAX_EP = 10
689};
690
691enum ar9170_usb_fifo {
692 __AR9170_USB_NUM_MAX_FIFO = 10
693};
694
695enum ar9170_tx_queues {
696 AR9170_TXQ0 = 0,
697 AR9170_TXQ1,
698 AR9170_TXQ2,
699 AR9170_TXQ3,
700 AR9170_TXQ_SPECIAL,
701
702 /* keep last */
703 __AR9170_NUM_TX_QUEUES = 5
704};
705
706#define AR9170_TX_STREAM_TAG 0x697e
707#define AR9170_RX_STREAM_TAG 0x4e00
708#define AR9170_RX_STREAM_MAX_SIZE 0xffff
709
710struct ar9170_stream {
711 __le16 length;
712 __le16 tag;
713
714 u8 payload[0];
715};
716
717#define AR9170_MAX_ACKTABLE_ENTRIES 8
718#define AR9170_MAX_VIRTUAL_MAC 7
719
720#define AR9170_USB_EP_CTRL_MAX 64
721#define AR9170_USB_EP_TX_MAX 512
722#define AR9170_USB_EP_RX_MAX 512
723#define AR9170_USB_EP_IRQ_MAX 64
724#define AR9170_USB_EP_CMD_MAX 64
725
726/* Trigger PRETBTT interrupt 6 Kus earlier */
727#define CARL9170_PRETBTT_KUS 6
728
729#define AR5416_MAX_RATE_POWER 63
730
731#define SET_VAL(reg, value, newvalue) \
732 (value = ((value) & ~reg) | (((newvalue) << reg##_S) & reg))
733
734#define MOD_VAL(reg, value, newvalue) \
735 (((value) & ~reg) | (((newvalue) << reg##_S) & reg))
736#endif /* __CARL9170_SHARED_HW_H */
diff --git a/drivers/net/wireless/ath/carl9170/led.c b/drivers/net/wireless/ath/carl9170/led.c
new file mode 100644
index 000000000000..4bb2cbd8bd9b
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/led.c
@@ -0,0 +1,190 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * LED handling
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparer <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include "carl9170.h"
41#include "cmd.h"
42
43int carl9170_led_set_state(struct ar9170 *ar, const u32 led_state)
44{
45 return carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_DATA, led_state);
46}
47
48int carl9170_led_init(struct ar9170 *ar)
49{
50 int err;
51
52 /* disable LEDs */
53 /* GPIO [0/1 mode: output, 2/3: input] */
54 err = carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3);
55 if (err)
56 goto out;
57
58 /* GPIO 0/1 value: off */
59 err = carl9170_led_set_state(ar, 0);
60
61out:
62 return err;
63}
64
65#ifdef CONFIG_CARL9170_LEDS
66static void carl9170_led_update(struct work_struct *work)
67{
68 struct ar9170 *ar = container_of(work, struct ar9170, led_work.work);
69 int i, tmp = 300, blink_delay = 1000;
70 u32 led_val = 0;
71 bool rerun = false;
72
73 if (!IS_ACCEPTING_CMD(ar))
74 return;
75
76 mutex_lock(&ar->mutex);
77 for (i = 0; i < AR9170_NUM_LEDS; i++) {
78 if (ar->leds[i].registered) {
79 if (ar->leds[i].last_state ||
80 ar->leds[i].toggled) {
81
82 if (ar->leds[i].toggled)
83 tmp = 70 + 200 / (ar->leds[i].toggled);
84
85 if (tmp < blink_delay)
86 blink_delay = tmp;
87
88 led_val |= 1 << i;
89 ar->leds[i].toggled = 0;
90 rerun = true;
91 }
92 }
93 }
94
95 carl9170_led_set_state(ar, led_val);
96 mutex_unlock(&ar->mutex);
97
98 if (!rerun)
99 return;
100
101 ieee80211_queue_delayed_work(ar->hw,
102 &ar->led_work,
103 msecs_to_jiffies(blink_delay));
104}
105
106static void carl9170_led_set_brightness(struct led_classdev *led,
107 enum led_brightness brightness)
108{
109 struct carl9170_led *arl = container_of(led, struct carl9170_led, l);
110 struct ar9170 *ar = arl->ar;
111
112 if (!arl->registered)
113 return;
114
115 if (arl->last_state != !!brightness) {
116 arl->toggled++;
117 arl->last_state = !!brightness;
118 }
119
120 if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled))
121 ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ/10);
122}
123
124static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
125 char *trigger)
126{
127 int err;
128
129 snprintf(ar->leds[i].name, sizeof(ar->leds[i].name),
130 "carl9170-%s::%s", wiphy_name(ar->hw->wiphy), name);
131
132 ar->leds[i].ar = ar;
133 ar->leds[i].l.name = ar->leds[i].name;
134 ar->leds[i].l.brightness_set = carl9170_led_set_brightness;
135 ar->leds[i].l.brightness = 0;
136 ar->leds[i].l.default_trigger = trigger;
137
138 err = led_classdev_register(wiphy_dev(ar->hw->wiphy),
139 &ar->leds[i].l);
140 if (err) {
141 wiphy_err(ar->hw->wiphy, "failed to register %s LED (%d).\n",
142 ar->leds[i].name, err);
143 } else {
144 ar->leds[i].registered = true;
145 }
146
147 return err;
148}
149
150void carl9170_led_unregister(struct ar9170 *ar)
151{
152 int i;
153
154 for (i = 0; i < AR9170_NUM_LEDS; i++)
155 if (ar->leds[i].registered) {
156 led_classdev_unregister(&ar->leds[i].l);
157 ar->leds[i].registered = false;
158 ar->leds[i].toggled = 0;
159 }
160
161 cancel_delayed_work_sync(&ar->led_work);
162}
163
164int carl9170_led_register(struct ar9170 *ar)
165{
166 int err;
167
168 INIT_DELAYED_WORK(&ar->led_work, carl9170_led_update);
169
170 err = carl9170_led_register_led(ar, 0, "tx",
171 ieee80211_get_tx_led_name(ar->hw));
172 if (err)
173 goto fail;
174
175 if (ar->features & CARL9170_ONE_LED)
176 return 0;
177
178 err = carl9170_led_register_led(ar, 1, "assoc",
179 ieee80211_get_assoc_led_name(ar->hw));
180 if (err)
181 goto fail;
182
183 return 0;
184
185fail:
186 carl9170_led_unregister(ar);
187 return err;
188}
189
190#endif /* CONFIG_CARL9170_LEDS */
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
new file mode 100644
index 000000000000..2305bc27151c
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -0,0 +1,604 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * MAC programming
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include <asm/unaligned.h>
40
41#include "carl9170.h"
42#include "cmd.h"
43
44int carl9170_set_dyn_sifs_ack(struct ar9170 *ar)
45{
46 u32 val;
47
48 if (conf_is_ht40(&ar->hw->conf))
49 val = 0x010a;
50 else {
51 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
52 val = 0x105;
53 else
54 val = 0x104;
55 }
56
57 return carl9170_write_reg(ar, AR9170_MAC_REG_DYNAMIC_SIFS_ACK, val);
58}
59
60int carl9170_set_rts_cts_rate(struct ar9170 *ar)
61{
62 u32 rts_rate, cts_rate;
63
64 if (conf_is_ht(&ar->hw->conf)) {
65 /* 12 mbit OFDM */
66 rts_rate = 0x1da;
67 cts_rate = 0x10a;
68 } else {
69 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
70 /* 11 mbit CCK */
71 rts_rate = 033;
72 cts_rate = 003;
73 } else {
74 /* 6 mbit OFDM */
75 rts_rate = 0x1bb;
76 cts_rate = 0x10b;
77 }
78 }
79
80 return carl9170_write_reg(ar, AR9170_MAC_REG_RTS_CTS_RATE,
81 rts_rate | (cts_rate) << 16);
82}
83
84int carl9170_set_slot_time(struct ar9170 *ar)
85{
86 struct ieee80211_vif *vif;
87 u32 slottime = 20;
88
89 rcu_read_lock();
90 vif = carl9170_get_main_vif(ar);
91 if (!vif) {
92 rcu_read_unlock();
93 return 0;
94 }
95
96 if ((ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ) ||
97 vif->bss_conf.use_short_slot)
98 slottime = 9;
99
100 rcu_read_unlock();
101
102 return carl9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME,
103 slottime << 10);
104}
105
106int carl9170_set_mac_rates(struct ar9170 *ar)
107{
108 struct ieee80211_vif *vif;
109 u32 basic, mandatory;
110
111 rcu_read_lock();
112 vif = carl9170_get_main_vif(ar);
113
114 if (!vif) {
115 rcu_read_unlock();
116 return 0;
117 }
118
119 basic = (vif->bss_conf.basic_rates & 0xf);
120 basic |= (vif->bss_conf.basic_rates & 0xff0) << 4;
121 rcu_read_unlock();
122
123 if (ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
124 mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */
125 else
126 mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */
127
128 carl9170_regwrite_begin(ar);
129 carl9170_regwrite(AR9170_MAC_REG_BASIC_RATE, basic);
130 carl9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, mandatory);
131 carl9170_regwrite_finish();
132
133 return carl9170_regwrite_result();
134}
135
136int carl9170_set_qos(struct ar9170 *ar)
137{
138 carl9170_regwrite_begin(ar);
139
140 carl9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min |
141 (ar->edcf[0].cw_max << 16));
142 carl9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min |
143 (ar->edcf[1].cw_max << 16));
144 carl9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min |
145 (ar->edcf[2].cw_max << 16));
146 carl9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min |
147 (ar->edcf[3].cw_max << 16));
148 carl9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min |
149 (ar->edcf[4].cw_max << 16));
150
151 carl9170_regwrite(AR9170_MAC_REG_AC2_AC1_AC0_AIFS,
152 ((ar->edcf[0].aifs * 9 + 10)) |
153 ((ar->edcf[1].aifs * 9 + 10) << 12) |
154 ((ar->edcf[2].aifs * 9 + 10) << 24));
155 carl9170_regwrite(AR9170_MAC_REG_AC4_AC3_AC2_AIFS,
156 ((ar->edcf[2].aifs * 9 + 10) >> 8) |
157 ((ar->edcf[3].aifs * 9 + 10) << 4) |
158 ((ar->edcf[4].aifs * 9 + 10) << 16));
159
160 carl9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
161 ar->edcf[0].txop | ar->edcf[1].txop << 16);
162 carl9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
163 ar->edcf[2].txop | ar->edcf[3].txop << 16 |
164 ar->edcf[4].txop << 24);
165
166 carl9170_regwrite_finish();
167
168 return carl9170_regwrite_result();
169}
170
171int carl9170_init_mac(struct ar9170 *ar)
172{
173 carl9170_regwrite_begin(ar);
174
175 /* switch MAC to OTUS interface */
176 carl9170_regwrite(0x1c3600, 0x3);
177
178 carl9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40);
179
180 carl9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0x0);
181
182 carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
183 AR9170_MAC_FTF_MONITOR);
184
185 /* enable MMIC */
186 carl9170_regwrite(AR9170_MAC_REG_SNIFFER,
187 AR9170_MAC_SNIFFER_DEFAULTS);
188
189 carl9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80);
190
191 carl9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70);
192 carl9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000);
193 carl9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10);
194
195 /* CF-END & CF-ACK rate => 24M OFDM */
196 carl9170_regwrite(AR9170_MAC_REG_TID_CFACK_CFEND_RATE, 0x59900000);
197
198 /* NAV protects ACK only (in TXOP) */
199 carl9170_regwrite(AR9170_MAC_REG_TXOP_DURATION, 0x201);
200
201 /* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */
202 /* OTUS set AM to 0x1 */
203 carl9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170);
204
205 carl9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
206
207 /* Aggregation MAX number and timeout */
208 carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0xa);
209 carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a00);
210
211 carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
212 AR9170_MAC_FTF_DEFAULTS);
213
214 carl9170_regwrite(AR9170_MAC_REG_RX_CONTROL,
215 AR9170_MAC_RX_CTRL_DEAGG |
216 AR9170_MAC_RX_CTRL_SHORT_FILTER);
217
218 /* rate sets */
219 carl9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f);
220 carl9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f);
221 carl9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x0030033);
222
223 /* MIMO response control */
224 carl9170_regwrite(AR9170_MAC_REG_ACK_TPC, 0x4003c1e);
225
226 carl9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff);
227
228 /* set PHY register read timeout (??) */
229 carl9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008);
230
231 /* Disable Rx TimeOut, workaround for BB. */
232 carl9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0);
233
234 /* Set WLAN DMA interrupt mode: generate int per packet */
235 carl9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011);
236
237 carl9170_regwrite(AR9170_MAC_REG_FCS_SELECT,
238 AR9170_MAC_FCS_FIFO_PROT);
239
240 /* Disables the CF_END frame, undocumented register */
241 carl9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND,
242 0x141e0f48);
243
244 /* reset group hash table */
245 carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, 0xffffffff);
246 carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, 0xffffffff);
247
248 /* disable PRETBTT interrupt */
249 carl9170_regwrite(AR9170_MAC_REG_PRETBTT, 0x0);
250 carl9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, 0x0);
251
252 carl9170_regwrite_finish();
253
254 return carl9170_regwrite_result();
255}
256
257static int carl9170_set_mac_reg(struct ar9170 *ar,
258 const u32 reg, const u8 *mac)
259{
260 static const u8 zero[ETH_ALEN] = { 0 };
261
262 if (!mac)
263 mac = zero;
264
265 carl9170_regwrite_begin(ar);
266
267 carl9170_regwrite(reg, get_unaligned_le32(mac));
268 carl9170_regwrite(reg + 4, get_unaligned_le16(mac + 4));
269
270 carl9170_regwrite_finish();
271
272 return carl9170_regwrite_result();
273}
274
275int carl9170_mod_virtual_mac(struct ar9170 *ar, const unsigned int id,
276 const u8 *mac)
277{
278 if (WARN_ON(id >= ar->fw.vif_num))
279 return -EINVAL;
280
281 return carl9170_set_mac_reg(ar,
282 AR9170_MAC_REG_ACK_TABLE + (id - 1) * 8, mac);
283}
284
285int carl9170_update_multicast(struct ar9170 *ar, const u64 mc_hash)
286{
287 int err;
288
289 carl9170_regwrite_begin(ar);
290 carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, mc_hash >> 32);
291 carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, mc_hash);
292 carl9170_regwrite_finish();
293 err = carl9170_regwrite_result();
294 if (err)
295 return err;
296
297 ar->cur_mc_hash = mc_hash;
298 return 0;
299}
300
301int carl9170_set_operating_mode(struct ar9170 *ar)
302{
303 struct ieee80211_vif *vif;
304 struct ath_common *common = &ar->common;
305 u8 *mac_addr, *bssid;
306 u32 cam_mode = AR9170_MAC_CAM_DEFAULTS;
307 u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS;
308 u32 rx_ctrl = AR9170_MAC_RX_CTRL_DEAGG |
309 AR9170_MAC_RX_CTRL_SHORT_FILTER;
310 u32 sniffer = AR9170_MAC_SNIFFER_DEFAULTS;
311 int err = 0;
312
313 rcu_read_lock();
314 vif = carl9170_get_main_vif(ar);
315
316 if (vif) {
317 mac_addr = common->macaddr;
318 bssid = common->curbssid;
319
320 switch (vif->type) {
321 case NL80211_IFTYPE_MESH_POINT:
322 case NL80211_IFTYPE_ADHOC:
323 cam_mode |= AR9170_MAC_CAM_IBSS;
324 break;
325 case NL80211_IFTYPE_AP:
326 cam_mode |= AR9170_MAC_CAM_AP;
327
328 /* iwlagn 802.11n STA Workaround */
329 rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
330 break;
331 case NL80211_IFTYPE_WDS:
332 cam_mode |= AR9170_MAC_CAM_AP_WDS;
333 rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
334 break;
335 case NL80211_IFTYPE_STATION:
336 cam_mode |= AR9170_MAC_CAM_STA;
337 rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
338 break;
339 default:
340 WARN(1, "Unsupported operation mode %x\n", vif->type);
341 err = -EOPNOTSUPP;
342 break;
343 }
344 } else {
345 mac_addr = NULL;
346 bssid = NULL;
347 }
348 rcu_read_unlock();
349
350 if (err)
351 return err;
352
353 if (ar->rx_software_decryption)
354 enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
355
356 if (ar->sniffer_enabled) {
357 rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
358 sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
359 enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
360 }
361
362 err = carl9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr);
363 if (err)
364 return err;
365
366 err = carl9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid);
367 if (err)
368 return err;
369
370 carl9170_regwrite_begin(ar);
371 carl9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer);
372 carl9170_regwrite(AR9170_MAC_REG_CAM_MODE, cam_mode);
373 carl9170_regwrite(AR9170_MAC_REG_ENCRYPTION, enc_mode);
374 carl9170_regwrite(AR9170_MAC_REG_RX_CONTROL, rx_ctrl);
375 carl9170_regwrite_finish();
376
377 return carl9170_regwrite_result();
378}
379
380int carl9170_set_hwretry_limit(struct ar9170 *ar, const unsigned int max_retry)
381{
382 u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111);
383
384 return carl9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp);
385}
386
387int carl9170_set_beacon_timers(struct ar9170 *ar)
388{
389 struct ieee80211_vif *vif;
390 u32 v = 0;
391 u32 pretbtt = 0;
392
393 rcu_read_lock();
394 vif = carl9170_get_main_vif(ar);
395
396 if (vif) {
397 struct carl9170_vif_info *mvif;
398 mvif = (void *) vif->drv_priv;
399
400 if (mvif->enable_beacon && !WARN_ON(!ar->beacon_enabled)) {
401 ar->global_beacon_int = vif->bss_conf.beacon_int /
402 ar->beacon_enabled;
403
404 SET_VAL(AR9170_MAC_BCN_DTIM, v,
405 vif->bss_conf.dtim_period);
406
407 switch (vif->type) {
408 case NL80211_IFTYPE_MESH_POINT:
409 case NL80211_IFTYPE_ADHOC:
410 v |= AR9170_MAC_BCN_IBSS_MODE;
411 break;
412 case NL80211_IFTYPE_AP:
413 v |= AR9170_MAC_BCN_AP_MODE;
414 break;
415 default:
416 WARN_ON_ONCE(1);
417 break;
418 }
419 } else if (vif->type == NL80211_IFTYPE_STATION) {
420 ar->global_beacon_int = vif->bss_conf.beacon_int;
421
422 SET_VAL(AR9170_MAC_BCN_DTIM, v,
423 ar->hw->conf.ps_dtim_period);
424
425 v |= AR9170_MAC_BCN_STA_PS |
426 AR9170_MAC_BCN_PWR_MGT;
427 }
428
429 if (ar->global_beacon_int) {
430 if (ar->global_beacon_int < 15) {
431 rcu_read_unlock();
432 return -ERANGE;
433 }
434
435 ar->global_pretbtt = ar->global_beacon_int -
436 CARL9170_PRETBTT_KUS;
437 } else {
438 ar->global_pretbtt = 0;
439 }
440 } else {
441 ar->global_beacon_int = 0;
442 ar->global_pretbtt = 0;
443 }
444
445 rcu_read_unlock();
446
447 SET_VAL(AR9170_MAC_BCN_PERIOD, v, ar->global_beacon_int);
448 SET_VAL(AR9170_MAC_PRETBTT, pretbtt, ar->global_pretbtt);
449 SET_VAL(AR9170_MAC_PRETBTT2, pretbtt, ar->global_pretbtt);
450
451 carl9170_regwrite_begin(ar);
452 carl9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt);
453 carl9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v);
454 carl9170_regwrite_finish();
455 return carl9170_regwrite_result();
456}
457
458int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
459{
460 struct sk_buff *skb;
461 struct carl9170_vif_info *cvif;
462 __le32 *data, *old = NULL;
463 u32 word, off, addr, len;
464 int i = 0, err = 0;
465
466 rcu_read_lock();
467 cvif = rcu_dereference(ar->beacon_iter);
468retry:
469 if (ar->vifs == 0 || !cvif)
470 goto out_unlock;
471
472 list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
473 if (cvif->active && cvif->enable_beacon)
474 goto found;
475 }
476
477 if (!ar->beacon_enabled || i++)
478 goto out_unlock;
479
480 goto retry;
481
482found:
483 rcu_assign_pointer(ar->beacon_iter, cvif);
484
485 skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
486 NULL, NULL);
487
488 if (!skb) {
489 err = -ENOMEM;
490 goto out_unlock;
491 }
492
493 spin_lock_bh(&ar->beacon_lock);
494 data = (__le32 *)skb->data;
495 if (cvif->beacon)
496 old = (__le32 *)cvif->beacon->data;
497
498 off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
499 addr = ar->fw.beacon_addr + off;
500 len = roundup(skb->len + FCS_LEN, 4);
501
502 if ((off + len) > ar->fw.beacon_max_len) {
503 if (net_ratelimit()) {
504 wiphy_err(ar->hw->wiphy, "beacon does not "
505 "fit into device memory!\n");
506 }
507
508 spin_unlock_bh(&ar->beacon_lock);
509 dev_kfree_skb_any(skb);
510 err = -EINVAL;
511 goto out_unlock;
512 }
513
514 if (len > AR9170_MAC_BCN_LENGTH_MAX) {
515 if (net_ratelimit()) {
516 wiphy_err(ar->hw->wiphy, "no support for beacons "
517 "bigger than %d (yours:%d).\n",
518 AR9170_MAC_BCN_LENGTH_MAX, len);
519 }
520
521 spin_unlock_bh(&ar->beacon_lock);
522 dev_kfree_skb_any(skb);
523 err = -EMSGSIZE;
524 goto out_unlock;
525 }
526
527 carl9170_async_regwrite_begin(ar);
528
529 /* XXX: use skb->cb info */
530 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
531 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP,
532 ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400);
533 } else {
534 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP,
535 ((skb->len + FCS_LEN) << 16) + 0x001b);
536 }
537
538 for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
539 /*
540 * XXX: This accesses beyond skb data for up
541 * to the last 3 bytes!!
542 */
543
544 if (old && (data[i] == old[i]))
545 continue;
546
547 word = le32_to_cpu(data[i]);
548 carl9170_async_regwrite(addr + 4 * i, word);
549 }
550 carl9170_async_regwrite_finish();
551
552 dev_kfree_skb_any(cvif->beacon);
553 cvif->beacon = NULL;
554
555 err = carl9170_async_regwrite_result();
556 if (!err)
557 cvif->beacon = skb;
558 spin_unlock_bh(&ar->beacon_lock);
559 if (err)
560 goto out_unlock;
561
562 if (submit) {
563 err = carl9170_bcn_ctrl(ar, cvif->id,
564 CARL9170_BCN_CTRL_CAB_TRIGGER,
565 addr, skb->len + FCS_LEN);
566
567 if (err)
568 goto out_unlock;
569 }
570out_unlock:
571 rcu_read_unlock();
572 return err;
573}
574
575int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
576 const u8 ktype, const u8 keyidx, const u8 *keydata,
577 const int keylen)
578{
579 struct carl9170_set_key_cmd key = { };
580 static const u8 bcast[ETH_ALEN] = {
581 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
582
583 mac = mac ? : bcast;
584
585 key.user = cpu_to_le16(id);
586 key.keyId = cpu_to_le16(keyidx);
587 key.type = cpu_to_le16(ktype);
588 memcpy(&key.macAddr, mac, ETH_ALEN);
589 if (keydata)
590 memcpy(&key.key, keydata, keylen);
591
592 return carl9170_exec_cmd(ar, CARL9170_CMD_EKEY,
593 sizeof(key), (u8 *)&key, 0, NULL);
594}
595
596int carl9170_disable_key(struct ar9170 *ar, const u8 id)
597{
598 struct carl9170_disable_key_cmd key = { };
599
600 key.user = cpu_to_le16(id);
601
602 return carl9170_exec_cmd(ar, CARL9170_CMD_DKEY,
603 sizeof(key), (u8 *)&key, 0, NULL);
604}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
new file mode 100644
index 000000000000..43de9dfa5820
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -0,0 +1,1855 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/etherdevice.h>
44#include <linux/random.h>
45#include <net/mac80211.h>
46#include <net/cfg80211.h>
47#include "hw.h"
48#include "carl9170.h"
49#include "cmd.h"
50
51static int modparam_nohwcrypt;
52module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
53MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
54
55int modparam_noht;
56module_param_named(noht, modparam_noht, int, S_IRUGO);
57MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
58
59#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
60 .bitrate = (_bitrate), \
61 .flags = (_flags), \
62 .hw_value = (_hw_rate) | (_txpidx) << 4, \
63}
64
65struct ieee80211_rate __carl9170_ratetable[] = {
66 RATE(10, 0, 0, 0),
67 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
70 RATE(60, 0xb, 0, 0),
71 RATE(90, 0xf, 0, 0),
72 RATE(120, 0xa, 0, 0),
73 RATE(180, 0xe, 0, 0),
74 RATE(240, 0x9, 0, 0),
75 RATE(360, 0xd, 1, 0),
76 RATE(480, 0x8, 2, 0),
77 RATE(540, 0xc, 3, 0),
78};
79#undef RATE
80
81#define carl9170_g_ratetable (__carl9170_ratetable + 0)
82#define carl9170_g_ratetable_size 12
83#define carl9170_a_ratetable (__carl9170_ratetable + 4)
84#define carl9170_a_ratetable_size 8
85
86/*
87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88 * array in phy.c so that we don't have to do frequency lookups!
89 */
90#define CHAN(_freq, _idx) { \
91 .center_freq = (_freq), \
92 .hw_value = (_idx), \
93 .max_power = 18, /* XXX */ \
94}
95
96static struct ieee80211_channel carl9170_2ghz_chantable[] = {
97 CHAN(2412, 0),
98 CHAN(2417, 1),
99 CHAN(2422, 2),
100 CHAN(2427, 3),
101 CHAN(2432, 4),
102 CHAN(2437, 5),
103 CHAN(2442, 6),
104 CHAN(2447, 7),
105 CHAN(2452, 8),
106 CHAN(2457, 9),
107 CHAN(2462, 10),
108 CHAN(2467, 11),
109 CHAN(2472, 12),
110 CHAN(2484, 13),
111};
112
113static struct ieee80211_channel carl9170_5ghz_chantable[] = {
114 CHAN(4920, 14),
115 CHAN(4940, 15),
116 CHAN(4960, 16),
117 CHAN(4980, 17),
118 CHAN(5040, 18),
119 CHAN(5060, 19),
120 CHAN(5080, 20),
121 CHAN(5180, 21),
122 CHAN(5200, 22),
123 CHAN(5220, 23),
124 CHAN(5240, 24),
125 CHAN(5260, 25),
126 CHAN(5280, 26),
127 CHAN(5300, 27),
128 CHAN(5320, 28),
129 CHAN(5500, 29),
130 CHAN(5520, 30),
131 CHAN(5540, 31),
132 CHAN(5560, 32),
133 CHAN(5580, 33),
134 CHAN(5600, 34),
135 CHAN(5620, 35),
136 CHAN(5640, 36),
137 CHAN(5660, 37),
138 CHAN(5680, 38),
139 CHAN(5700, 39),
140 CHAN(5745, 40),
141 CHAN(5765, 41),
142 CHAN(5785, 42),
143 CHAN(5805, 43),
144 CHAN(5825, 44),
145 CHAN(5170, 45),
146 CHAN(5190, 46),
147 CHAN(5210, 47),
148 CHAN(5230, 48),
149};
150#undef CHAN
151
152#define CARL9170_HT_CAP \
153{ \
154 .ht_supported = true, \
155 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
156 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
157 IEEE80211_HT_CAP_SGI_40 | \
158 IEEE80211_HT_CAP_DSSSCCK40 | \
159 IEEE80211_HT_CAP_SM_PS, \
160 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
161 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
162 .mcs = { \
163 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
164 .rx_highest = cpu_to_le16(300), \
165 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
166 }, \
167}
168
169static struct ieee80211_supported_band carl9170_band_2GHz = {
170 .channels = carl9170_2ghz_chantable,
171 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
172 .bitrates = carl9170_g_ratetable,
173 .n_bitrates = carl9170_g_ratetable_size,
174 .ht_cap = CARL9170_HT_CAP,
175};
176
177static struct ieee80211_supported_band carl9170_band_5GHz = {
178 .channels = carl9170_5ghz_chantable,
179 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
180 .bitrates = carl9170_a_ratetable,
181 .n_bitrates = carl9170_a_ratetable_size,
182 .ht_cap = CARL9170_HT_CAP,
183};
184
185static void carl9170_ampdu_gc(struct ar9170 *ar)
186{
187 struct carl9170_sta_tid *tid_info;
188 LIST_HEAD(tid_gc);
189
190 rcu_read_lock();
191 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
192 spin_lock_bh(&ar->tx_ampdu_list_lock);
193 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
194 tid_info->state = CARL9170_TID_STATE_KILLED;
195 list_del_rcu(&tid_info->list);
196 ar->tx_ampdu_list_len--;
197 list_add_tail(&tid_info->tmp_list, &tid_gc);
198 }
199 spin_unlock_bh(&ar->tx_ampdu_list_lock);
200
201 }
202 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
203 rcu_read_unlock();
204
205 synchronize_rcu();
206
207 while (!list_empty(&tid_gc)) {
208 struct sk_buff *skb;
209 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
210 tmp_list);
211
212 while ((skb = __skb_dequeue(&tid_info->queue)))
213 carl9170_tx_status(ar, skb, false);
214
215 list_del_init(&tid_info->tmp_list);
216 kfree(tid_info);
217 }
218}
219
220static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
221{
222 if (drop_queued) {
223 int i;
224
225 /*
226 * We can only drop frames which have not been uploaded
227 * to the device yet.
228 */
229
230 for (i = 0; i < ar->hw->queues; i++) {
231 struct sk_buff *skb;
232
233 while ((skb = skb_dequeue(&ar->tx_pending[i])))
234 carl9170_tx_status(ar, skb, false);
235 }
236 }
237
238 /* Wait for all other outstanding frames to timeout. */
239 if (atomic_read(&ar->tx_total_queued))
240 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
241}
242
243static void carl9170_flush_ba(struct ar9170 *ar)
244{
245 struct sk_buff_head free;
246 struct carl9170_sta_tid *tid_info;
247 struct sk_buff *skb;
248
249 __skb_queue_head_init(&free);
250
251 rcu_read_lock();
252 spin_lock_bh(&ar->tx_ampdu_list_lock);
253 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
254 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
255 tid_info->state = CARL9170_TID_STATE_SUSPEND;
256
257 spin_lock(&tid_info->lock);
258 while ((skb = __skb_dequeue(&tid_info->queue)))
259 __skb_queue_tail(&free, skb);
260 spin_unlock(&tid_info->lock);
261 }
262 }
263 spin_unlock_bh(&ar->tx_ampdu_list_lock);
264 rcu_read_unlock();
265
266 while ((skb = __skb_dequeue(&free)))
267 carl9170_tx_status(ar, skb, false);
268}
269
270static void carl9170_zap_queues(struct ar9170 *ar)
271{
272 struct carl9170_vif_info *cvif;
273 unsigned int i;
274
275 carl9170_ampdu_gc(ar);
276
277 carl9170_flush_ba(ar);
278 carl9170_flush(ar, true);
279
280 for (i = 0; i < ar->hw->queues; i++) {
281 spin_lock_bh(&ar->tx_status[i].lock);
282 while (!skb_queue_empty(&ar->tx_status[i])) {
283 struct sk_buff *skb;
284
285 skb = skb_peek(&ar->tx_status[i]);
286 carl9170_tx_get_skb(skb);
287 spin_unlock_bh(&ar->tx_status[i].lock);
288 carl9170_tx_drop(ar, skb);
289 spin_lock_bh(&ar->tx_status[i].lock);
290 carl9170_tx_put_skb(skb);
291 }
292 spin_unlock_bh(&ar->tx_status[i].lock);
293 }
294
295 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
296 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
297 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
298
299 /* reinitialize queues statistics */
300 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
301 for (i = 0; i < ar->hw->queues; i++)
302 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
303
304 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
305 ar->mem_bitmap[i] = 0;
306
307 rcu_read_lock();
308 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
309 spin_lock_bh(&ar->beacon_lock);
310 dev_kfree_skb_any(cvif->beacon);
311 cvif->beacon = NULL;
312 spin_unlock_bh(&ar->beacon_lock);
313 }
314 rcu_read_unlock();
315
316 atomic_set(&ar->tx_ampdu_upload, 0);
317 atomic_set(&ar->tx_ampdu_scheduler, 0);
318 atomic_set(&ar->tx_total_pending, 0);
319 atomic_set(&ar->tx_total_queued, 0);
320 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
321}
322
323#define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
324do { \
325 queue.aifs = ai_fs; \
326 queue.cw_min = cwmin; \
327 queue.cw_max = cwmax; \
328 queue.txop = _txop; \
329} while (0)
330
331static int carl9170_op_start(struct ieee80211_hw *hw)
332{
333 struct ar9170 *ar = hw->priv;
334 int err, i;
335
336 mutex_lock(&ar->mutex);
337
338 carl9170_zap_queues(ar);
339
340 /* reset QoS defaults */
341 CARL9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT */
342 CARL9170_FILL_QUEUE(ar->edcf[1], 2, 7, 15, 94); /* VIDEO */
343 CARL9170_FILL_QUEUE(ar->edcf[2], 2, 3, 7, 47); /* VOICE */
344 CARL9170_FILL_QUEUE(ar->edcf[3], 7, 15, 1023, 0); /* BACKGROUND */
345 CARL9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
346
347 ar->current_factor = ar->current_density = -1;
348 /* "The first key is unique." */
349 ar->usedkeys = 1;
350 ar->filter_state = 0;
351 ar->ps.last_action = jiffies;
352 ar->ps.last_slept = jiffies;
353 ar->erp_mode = CARL9170_ERP_AUTO;
354 ar->rx_software_decryption = false;
355 ar->disable_offload = false;
356
357 for (i = 0; i < ar->hw->queues; i++) {
358 ar->queue_stop_timeout[i] = jiffies;
359 ar->max_queue_stop_timeout[i] = 0;
360 }
361
362 atomic_set(&ar->mem_allocs, 0);
363
364 err = carl9170_usb_open(ar);
365 if (err)
366 goto out;
367
368 err = carl9170_init_mac(ar);
369 if (err)
370 goto out;
371
372 err = carl9170_set_qos(ar);
373 if (err)
374 goto out;
375
376 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
377 AR9170_DMA_TRIGGER_RXQ);
378 if (err)
379 goto out;
380
381 /* Clear key-cache */
382 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
383 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
384 0, NULL, 0);
385 if (err)
386 goto out;
387
388 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
389 1, NULL, 0);
390 if (err)
391 goto out;
392
393 if (i < AR9170_CAM_MAX_USER) {
394 err = carl9170_disable_key(ar, i);
395 if (err)
396 goto out;
397 }
398 }
399
400 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
401
402 ieee80211_wake_queues(ar->hw);
403 err = 0;
404
405out:
406 mutex_unlock(&ar->mutex);
407 return err;
408}
409
410static void carl9170_cancel_worker(struct ar9170 *ar)
411{
412 cancel_delayed_work_sync(&ar->tx_janitor);
413#ifdef CONFIG_CARL9170_LEDS
414 cancel_delayed_work_sync(&ar->led_work);
415#endif /* CONFIG_CARL9170_LEDS */
416 cancel_work_sync(&ar->ps_work);
417 cancel_work_sync(&ar->ampdu_work);
418}
419
420static void carl9170_op_stop(struct ieee80211_hw *hw)
421{
422 struct ar9170 *ar = hw->priv;
423
424 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
425
426 ieee80211_stop_queues(ar->hw);
427
428 mutex_lock(&ar->mutex);
429 if (IS_ACCEPTING_CMD(ar)) {
430 rcu_assign_pointer(ar->beacon_iter, NULL);
431
432 carl9170_led_set_state(ar, 0);
433
434 /* stop DMA */
435 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
436 carl9170_usb_stop(ar);
437 }
438
439 carl9170_zap_queues(ar);
440 mutex_unlock(&ar->mutex);
441
442 carl9170_cancel_worker(ar);
443}
444
445static void carl9170_restart_work(struct work_struct *work)
446{
447 struct ar9170 *ar = container_of(work, struct ar9170,
448 restart_work);
449 int err;
450
451 ar->usedkeys = 0;
452 ar->filter_state = 0;
453 carl9170_cancel_worker(ar);
454
455 mutex_lock(&ar->mutex);
456 err = carl9170_usb_restart(ar);
457 if (net_ratelimit()) {
458 if (err) {
459 dev_err(&ar->udev->dev, "Failed to restart device "
460 " (%d).\n", err);
461 } else {
462 dev_info(&ar->udev->dev, "device restarted "
463 "successfully.\n");
464 }
465 }
466
467 carl9170_zap_queues(ar);
468 mutex_unlock(&ar->mutex);
469 if (!err) {
470 ar->restart_counter++;
471 atomic_set(&ar->pending_restarts, 0);
472
473 ieee80211_restart_hw(ar->hw);
474 } else {
475 /*
476 * The reset was unsuccessful and the device seems to
477 * be dead. But there's still one option: a low-level
478 * usb subsystem reset...
479 */
480
481 carl9170_usb_reset(ar);
482 }
483}
484
485void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
486{
487 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
488
489 /*
490 * Sometimes, an error can trigger several different reset events.
491 * By ignoring these *surplus* reset events, the device won't be
492 * killed again, right after it has recovered.
493 */
494 if (atomic_inc_return(&ar->pending_restarts) > 1) {
495 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
496 return;
497 }
498
499 ieee80211_stop_queues(ar->hw);
500
501 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
502
503 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
504 !WARN_ON(r >= __CARL9170_RR_LAST))
505 ar->last_reason = r;
506
507 if (!ar->registered)
508 return;
509
510 if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset)
511 ieee80211_queue_work(ar->hw, &ar->restart_work);
512 else
513 carl9170_usb_reset(ar);
514
515 /*
516 * At this point, the device instance might have vanished/disabled.
517 * So, don't put any code which access the ar9170 struct
518 * without proper protection.
519 */
520}
521
522static int carl9170_init_interface(struct ar9170 *ar,
523 struct ieee80211_vif *vif)
524{
525 struct ath_common *common = &ar->common;
526 int err;
527
528 if (!vif) {
529 WARN_ON_ONCE(IS_STARTED(ar));
530 return 0;
531 }
532
533 memcpy(common->macaddr, vif->addr, ETH_ALEN);
534
535 if (modparam_nohwcrypt ||
536 ((vif->type != NL80211_IFTYPE_STATION) &&
537 (vif->type != NL80211_IFTYPE_AP))) {
538 ar->rx_software_decryption = true;
539 ar->disable_offload = true;
540 }
541
542 err = carl9170_set_operating_mode(ar);
543 return err;
544}
545
546static int carl9170_op_add_interface(struct ieee80211_hw *hw,
547 struct ieee80211_vif *vif)
548{
549 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
550 struct ieee80211_vif *main_vif;
551 struct ar9170 *ar = hw->priv;
552 int vif_id = -1, err = 0;
553
554 mutex_lock(&ar->mutex);
555 rcu_read_lock();
556 if (vif_priv->active) {
557 /*
558 * Skip the interface structure initialization,
559 * if the vif survived the _restart call.
560 */
561 vif_id = vif_priv->id;
562 vif_priv->enable_beacon = false;
563
564 spin_lock_bh(&ar->beacon_lock);
565 dev_kfree_skb_any(vif_priv->beacon);
566 vif_priv->beacon = NULL;
567 spin_unlock_bh(&ar->beacon_lock);
568
569 goto init;
570 }
571
572 main_vif = carl9170_get_main_vif(ar);
573
574 if (main_vif) {
575 switch (main_vif->type) {
576 case NL80211_IFTYPE_STATION:
577 if (vif->type == NL80211_IFTYPE_STATION)
578 break;
579
580 err = -EBUSY;
581 rcu_read_unlock();
582
583 goto unlock;
584
585 case NL80211_IFTYPE_AP:
586 if ((vif->type == NL80211_IFTYPE_STATION) ||
587 (vif->type == NL80211_IFTYPE_WDS) ||
588 (vif->type == NL80211_IFTYPE_AP))
589 break;
590
591 err = -EBUSY;
592 rcu_read_unlock();
593 goto unlock;
594
595 default:
596 rcu_read_unlock();
597 goto unlock;
598 }
599 }
600
601 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
602
603 if (vif_id < 0) {
604 rcu_read_unlock();
605
606 err = -ENOSPC;
607 goto unlock;
608 }
609
610 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
611
612 vif_priv->active = true;
613 vif_priv->id = vif_id;
614 vif_priv->enable_beacon = false;
615 ar->vifs++;
616 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
617 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
618
619init:
620 if (carl9170_get_main_vif(ar) == vif) {
621 rcu_assign_pointer(ar->beacon_iter, vif_priv);
622 rcu_read_unlock();
623
624 err = carl9170_init_interface(ar, vif);
625 if (err)
626 goto unlock;
627 } else {
628 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
629 rcu_read_unlock();
630
631 if (err)
632 goto unlock;
633 }
634
635unlock:
636 if (err && (vif_id != -1)) {
637 vif_priv->active = false;
638 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
639 ar->vifs--;
640 rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
641 list_del_rcu(&vif_priv->list);
642 mutex_unlock(&ar->mutex);
643 synchronize_rcu();
644 } else {
645 if (ar->vifs > 1)
646 ar->ps.off_override |= PS_OFF_VIF;
647
648 mutex_unlock(&ar->mutex);
649 }
650
651 return err;
652}
653
654static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
655 struct ieee80211_vif *vif)
656{
657 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
658 struct ieee80211_vif *main_vif;
659 struct ar9170 *ar = hw->priv;
660 unsigned int id;
661
662 mutex_lock(&ar->mutex);
663
664 if (WARN_ON_ONCE(!vif_priv->active))
665 goto unlock;
666
667 ar->vifs--;
668
669 rcu_read_lock();
670 main_vif = carl9170_get_main_vif(ar);
671
672 id = vif_priv->id;
673
674 vif_priv->active = false;
675 WARN_ON(vif_priv->enable_beacon);
676 vif_priv->enable_beacon = false;
677 list_del_rcu(&vif_priv->list);
678 rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
679
680 if (vif == main_vif) {
681 rcu_read_unlock();
682
683 if (ar->vifs) {
684 WARN_ON(carl9170_init_interface(ar,
685 carl9170_get_main_vif(ar)));
686 } else {
687 carl9170_set_operating_mode(ar);
688 }
689 } else {
690 rcu_read_unlock();
691
692 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
693 }
694
695 carl9170_update_beacon(ar, false);
696 carl9170_flush_cab(ar, id);
697
698 spin_lock_bh(&ar->beacon_lock);
699 dev_kfree_skb_any(vif_priv->beacon);
700 vif_priv->beacon = NULL;
701 spin_unlock_bh(&ar->beacon_lock);
702
703 bitmap_release_region(&ar->vif_bitmap, id, 0);
704
705 carl9170_set_beacon_timers(ar);
706
707 if (ar->vifs == 1)
708 ar->ps.off_override &= ~PS_OFF_VIF;
709
710unlock:
711 mutex_unlock(&ar->mutex);
712
713 synchronize_rcu();
714}
715
716void carl9170_ps_check(struct ar9170 *ar)
717{
718 ieee80211_queue_work(ar->hw, &ar->ps_work);
719}
720
721/* caller must hold ar->mutex */
722static int carl9170_ps_update(struct ar9170 *ar)
723{
724 bool ps = false;
725 int err = 0;
726
727 if (!ar->ps.off_override)
728 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
729
730 if (ps != ar->ps.state) {
731 err = carl9170_powersave(ar, ps);
732 if (err)
733 return err;
734
735 if (ar->ps.state && !ps) {
736 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
737 ar->ps.last_action);
738 }
739
740 if (ps)
741 ar->ps.last_slept = jiffies;
742
743 ar->ps.last_action = jiffies;
744 ar->ps.state = ps;
745 }
746
747 return 0;
748}
749
750static void carl9170_ps_work(struct work_struct *work)
751{
752 struct ar9170 *ar = container_of(work, struct ar9170,
753 ps_work);
754 mutex_lock(&ar->mutex);
755 if (IS_STARTED(ar))
756 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
757 mutex_unlock(&ar->mutex);
758}
759
760
761static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
762{
763 struct ar9170 *ar = hw->priv;
764 int err = 0;
765
766 mutex_lock(&ar->mutex);
767 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
768 /* TODO */
769 err = 0;
770 }
771
772 if (changed & IEEE80211_CONF_CHANGE_PS) {
773 err = carl9170_ps_update(ar);
774 if (err)
775 goto out;
776 }
777
778 if (changed & IEEE80211_CONF_CHANGE_POWER) {
779 /* TODO */
780 err = 0;
781 }
782
783 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
784 /* TODO */
785 err = 0;
786 }
787
788 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
789 /* adjust slot time for 5 GHz */
790 err = carl9170_set_slot_time(ar);
791 if (err)
792 goto out;
793
794 err = carl9170_set_channel(ar, hw->conf.channel,
795 hw->conf.channel_type, CARL9170_RFI_NONE);
796 if (err)
797 goto out;
798
799 err = carl9170_set_dyn_sifs_ack(ar);
800 if (err)
801 goto out;
802
803 err = carl9170_set_rts_cts_rate(ar);
804 if (err)
805 goto out;
806 }
807
808out:
809 mutex_unlock(&ar->mutex);
810 return err;
811}
812
813static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
814 struct netdev_hw_addr_list *mc_list)
815{
816 struct netdev_hw_addr *ha;
817 u64 mchash;
818
819 /* always get broadcast frames */
820 mchash = 1ULL << (0xff >> 2);
821
822 netdev_hw_addr_list_for_each(ha, mc_list)
823 mchash |= 1ULL << (ha->addr[5] >> 2);
824
825 return mchash;
826}
827
828static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
829 unsigned int changed_flags,
830 unsigned int *new_flags,
831 u64 multicast)
832{
833 struct ar9170 *ar = hw->priv;
834
835 /* mask supported flags */
836 *new_flags &= FIF_ALLMULTI | FIF_FCSFAIL | FIF_PLCPFAIL |
837 FIF_OTHER_BSS | FIF_PROMISC_IN_BSS;
838
839 if (!IS_ACCEPTING_CMD(ar))
840 return;
841
842 mutex_lock(&ar->mutex);
843
844 ar->filter_state = *new_flags;
845 /*
846 * We can support more by setting the sniffer bit and
847 * then checking the error flags, later.
848 */
849
850 if (changed_flags & FIF_ALLMULTI && *new_flags & FIF_ALLMULTI)
851 multicast = ~0ULL;
852
853 if (multicast != ar->cur_mc_hash)
854 WARN_ON(carl9170_update_multicast(ar, multicast));
855
856 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
857 ar->sniffer_enabled = !!(*new_flags &
858 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
859
860 WARN_ON(carl9170_set_operating_mode(ar));
861 }
862
863 mutex_unlock(&ar->mutex);
864}
865
866
867static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
868 struct ieee80211_vif *vif,
869 struct ieee80211_bss_conf *bss_conf,
870 u32 changed)
871{
872 struct ar9170 *ar = hw->priv;
873 struct ath_common *common = &ar->common;
874 int err = 0;
875 struct carl9170_vif_info *vif_priv;
876 struct ieee80211_vif *main_vif;
877
878 mutex_lock(&ar->mutex);
879 vif_priv = (void *) vif->drv_priv;
880 main_vif = carl9170_get_main_vif(ar);
881 if (WARN_ON(!main_vif))
882 goto out;
883
884 if (changed & BSS_CHANGED_BEACON_ENABLED) {
885 struct carl9170_vif_info *iter;
886 int i = 0;
887
888 vif_priv->enable_beacon = bss_conf->enable_beacon;
889 rcu_read_lock();
890 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
891 if (iter->active && iter->enable_beacon)
892 i++;
893
894 }
895 rcu_read_unlock();
896
897 ar->beacon_enabled = i;
898 }
899
900 if (changed & BSS_CHANGED_BEACON) {
901 err = carl9170_update_beacon(ar, false);
902 if (err)
903 goto out;
904 }
905
906 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
907 BSS_CHANGED_BEACON_INT)) {
908
909 if (main_vif != vif) {
910 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
911 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
912 }
913
914 /*
915 * Therefore a hard limit for the broadcast traffic should
916 * prevent false alarms.
917 */
918 if (vif->type != NL80211_IFTYPE_STATION &&
919 (bss_conf->beacon_int * bss_conf->dtim_period >=
920 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
921 err = -EINVAL;
922 goto out;
923 }
924
925 err = carl9170_set_beacon_timers(ar);
926 if (err)
927 goto out;
928 }
929
930 if (changed & BSS_CHANGED_HT) {
931 /* TODO */
932 err = 0;
933 if (err)
934 goto out;
935 }
936
937 if (main_vif != vif)
938 goto out;
939
940 /*
941 * The following settings can only be changed by the
942 * master interface.
943 */
944
945 if (changed & BSS_CHANGED_BSSID) {
946 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
947 err = carl9170_set_operating_mode(ar);
948 if (err)
949 goto out;
950 }
951
952 if (changed & BSS_CHANGED_ASSOC) {
953 ar->common.curaid = bss_conf->aid;
954 err = carl9170_set_beacon_timers(ar);
955 if (err)
956 goto out;
957 }
958
959 if (changed & BSS_CHANGED_ERP_SLOT) {
960 err = carl9170_set_slot_time(ar);
961 if (err)
962 goto out;
963 }
964
965 if (changed & BSS_CHANGED_BASIC_RATES) {
966 err = carl9170_set_mac_rates(ar);
967 if (err)
968 goto out;
969 }
970
971out:
972 WARN_ON_ONCE(err && IS_STARTED(ar));
973 mutex_unlock(&ar->mutex);
974}
975
976static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw)
977{
978 struct ar9170 *ar = hw->priv;
979 struct carl9170_tsf_rsp tsf;
980 int err;
981
982 mutex_lock(&ar->mutex);
983 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
984 0, NULL, sizeof(tsf), &tsf);
985 mutex_unlock(&ar->mutex);
986 if (WARN_ON(err))
987 return 0;
988
989 return le64_to_cpu(tsf.tsf_64);
990}
991
992static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
993 struct ieee80211_vif *vif,
994 struct ieee80211_sta *sta,
995 struct ieee80211_key_conf *key)
996{
997 struct ar9170 *ar = hw->priv;
998 int err = 0, i;
999 u8 ktype;
1000
1001 if (ar->disable_offload || !vif)
1002 return -EOPNOTSUPP;
1003
1004 /*
1005 * We have to fall back to software encryption, whenever
1006 * the user choose to participates in an IBSS or is connected
1007 * to more than one network.
1008 *
1009 * This is very unfortunate, because some machines cannot handle
1010 * the high througput speed in 802.11n networks.
1011 */
1012
1013 if (!is_main_vif(ar, vif))
1014 goto err_softw;
1015
1016 /*
1017 * While the hardware supports *catch-all* key, for offloading
1018 * group-key en-/de-cryption. The way of how the hardware
1019 * decides which keyId maps to which key, remains a mystery...
1020 */
1021 if ((vif->type != NL80211_IFTYPE_STATION &&
1022 vif->type != NL80211_IFTYPE_ADHOC) &&
1023 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1024 return -EOPNOTSUPP;
1025
1026 switch (key->cipher) {
1027 case WLAN_CIPHER_SUITE_WEP40:
1028 ktype = AR9170_ENC_ALG_WEP64;
1029 break;
1030 case WLAN_CIPHER_SUITE_WEP104:
1031 ktype = AR9170_ENC_ALG_WEP128;
1032 break;
1033 case WLAN_CIPHER_SUITE_TKIP:
1034 ktype = AR9170_ENC_ALG_TKIP;
1035 break;
1036 case WLAN_CIPHER_SUITE_CCMP:
1037 ktype = AR9170_ENC_ALG_AESCCMP;
1038 break;
1039 default:
1040 return -EOPNOTSUPP;
1041 }
1042
1043 mutex_lock(&ar->mutex);
1044 if (cmd == SET_KEY) {
1045 if (!IS_STARTED(ar)) {
1046 err = -EOPNOTSUPP;
1047 goto out;
1048 }
1049
1050 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1051 sta = NULL;
1052
1053 i = 64 + key->keyidx;
1054 } else {
1055 for (i = 0; i < 64; i++)
1056 if (!(ar->usedkeys & BIT(i)))
1057 break;
1058 if (i == 64)
1059 goto err_softw;
1060 }
1061
1062 key->hw_key_idx = i;
1063
1064 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1065 ktype, 0, key->key,
1066 min_t(u8, 16, key->keylen));
1067 if (err)
1068 goto out;
1069
1070 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1071 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1072 NULL, ktype, 1,
1073 key->key + 16, 16);
1074 if (err)
1075 goto out;
1076
1077 /*
1078 * hardware is not capable generating MMIC
1079 * of fragmented frames!
1080 */
1081 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1082 }
1083
1084 if (i < 64)
1085 ar->usedkeys |= BIT(i);
1086
1087 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1088 } else {
1089 if (!IS_STARTED(ar)) {
1090 /* The device is gone... together with the key ;-) */
1091 err = 0;
1092 goto out;
1093 }
1094
1095 if (key->hw_key_idx < 64) {
1096 ar->usedkeys &= ~BIT(key->hw_key_idx);
1097 } else {
1098 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1099 AR9170_ENC_ALG_NONE, 0,
1100 NULL, 0);
1101 if (err)
1102 goto out;
1103
1104 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1105 err = carl9170_upload_key(ar, key->hw_key_idx,
1106 NULL,
1107 AR9170_ENC_ALG_NONE,
1108 1, NULL, 0);
1109 if (err)
1110 goto out;
1111 }
1112
1113 }
1114
1115 err = carl9170_disable_key(ar, key->hw_key_idx);
1116 if (err)
1117 goto out;
1118 }
1119
1120out:
1121 mutex_unlock(&ar->mutex);
1122 return err;
1123
1124err_softw:
1125 if (!ar->rx_software_decryption) {
1126 ar->rx_software_decryption = true;
1127 carl9170_set_operating_mode(ar);
1128 }
1129 mutex_unlock(&ar->mutex);
1130 return -ENOSPC;
1131}
1132
1133static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1134 struct ieee80211_vif *vif,
1135 struct ieee80211_sta *sta)
1136{
1137 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1138 unsigned int i;
1139
1140 if (sta->ht_cap.ht_supported) {
1141 if (sta->ht_cap.ampdu_density > 6) {
1142 /*
1143 * HW does support 16us AMPDU density.
1144 * No HT-Xmit for station.
1145 */
1146
1147 return 0;
1148 }
1149
1150 for (i = 0; i < CARL9170_NUM_TID; i++)
1151 rcu_assign_pointer(sta_info->agg[i], NULL);
1152
1153 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1154 sta_info->ht_sta = true;
1155 }
1156
1157 return 0;
1158}
1159
1160static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1161 struct ieee80211_vif *vif,
1162 struct ieee80211_sta *sta)
1163{
1164 struct ar9170 *ar = hw->priv;
1165 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1166 unsigned int i;
1167 bool cleanup = false;
1168
1169 if (sta->ht_cap.ht_supported) {
1170
1171 sta_info->ht_sta = false;
1172
1173 rcu_read_lock();
1174 for (i = 0; i < CARL9170_NUM_TID; i++) {
1175 struct carl9170_sta_tid *tid_info;
1176
1177 tid_info = rcu_dereference(sta_info->agg[i]);
1178 rcu_assign_pointer(sta_info->agg[i], NULL);
1179
1180 if (!tid_info)
1181 continue;
1182
1183 spin_lock_bh(&ar->tx_ampdu_list_lock);
1184 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1185 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1186 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1187 cleanup = true;
1188 }
1189 rcu_read_unlock();
1190
1191 if (cleanup)
1192 carl9170_ampdu_gc(ar);
1193 }
1194
1195 return 0;
1196}
1197
1198static int carl9170_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1199 const struct ieee80211_tx_queue_params *param)
1200{
1201 struct ar9170 *ar = hw->priv;
1202 int ret;
1203
1204 mutex_lock(&ar->mutex);
1205 if (queue < ar->hw->queues) {
1206 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1207 ret = carl9170_set_qos(ar);
1208 } else {
1209 ret = -EINVAL;
1210 }
1211
1212 mutex_unlock(&ar->mutex);
1213 return ret;
1214}
1215
1216static void carl9170_ampdu_work(struct work_struct *work)
1217{
1218 struct ar9170 *ar = container_of(work, struct ar9170,
1219 ampdu_work);
1220
1221 if (!IS_STARTED(ar))
1222 return;
1223
1224 mutex_lock(&ar->mutex);
1225 carl9170_ampdu_gc(ar);
1226 mutex_unlock(&ar->mutex);
1227}
1228
1229static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1230 struct ieee80211_vif *vif,
1231 enum ieee80211_ampdu_mlme_action action,
1232 struct ieee80211_sta *sta,
1233 u16 tid, u16 *ssn)
1234{
1235 struct ar9170 *ar = hw->priv;
1236 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1237 struct carl9170_sta_tid *tid_info;
1238
1239 if (modparam_noht)
1240 return -EOPNOTSUPP;
1241
1242 switch (action) {
1243 case IEEE80211_AMPDU_TX_START:
1244 if (WARN_ON_ONCE(!sta_info->ht_sta))
1245 return -EOPNOTSUPP;
1246
1247 rcu_read_lock();
1248 if (rcu_dereference(sta_info->agg[tid])) {
1249 rcu_read_unlock();
1250 return -EBUSY;
1251 }
1252
1253 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1254 GFP_ATOMIC);
1255 if (!tid_info) {
1256 rcu_read_unlock();
1257 return -ENOMEM;
1258 }
1259
1260 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1261 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1262 tid_info->tid = tid;
1263 tid_info->max = sta_info->ampdu_max_len;
1264
1265 INIT_LIST_HEAD(&tid_info->list);
1266 INIT_LIST_HEAD(&tid_info->tmp_list);
1267 skb_queue_head_init(&tid_info->queue);
1268 spin_lock_init(&tid_info->lock);
1269
1270 spin_lock_bh(&ar->tx_ampdu_list_lock);
1271 ar->tx_ampdu_list_len++;
1272 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1273 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1274 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1275 rcu_read_unlock();
1276
1277 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1278 break;
1279
1280 case IEEE80211_AMPDU_TX_STOP:
1281 rcu_read_lock();
1282 tid_info = rcu_dereference(sta_info->agg[tid]);
1283 if (tid_info) {
1284 spin_lock_bh(&ar->tx_ampdu_list_lock);
1285 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1286 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1287 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1288 }
1289
1290 rcu_assign_pointer(sta_info->agg[tid], NULL);
1291 rcu_read_unlock();
1292
1293 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1294 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1295 break;
1296
1297 case IEEE80211_AMPDU_TX_OPERATIONAL:
1298 rcu_read_lock();
1299 tid_info = rcu_dereference(sta_info->agg[tid]);
1300
1301 sta_info->stats[tid].clear = true;
1302
1303 if (tid_info) {
1304 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1305 tid_info->state = CARL9170_TID_STATE_IDLE;
1306 }
1307 rcu_read_unlock();
1308
1309 if (WARN_ON_ONCE(!tid_info))
1310 return -EFAULT;
1311
1312 break;
1313
1314 case IEEE80211_AMPDU_RX_START:
1315 case IEEE80211_AMPDU_RX_STOP:
1316 /* Handled by hardware */
1317 break;
1318
1319 default:
1320 return -EOPNOTSUPP;
1321 }
1322
1323 return 0;
1324}
1325
1326#ifdef CONFIG_CARL9170_WPC
1327static int carl9170_register_wps_button(struct ar9170 *ar)
1328{
1329 struct input_dev *input;
1330 int err;
1331
1332 if (!(ar->features & CARL9170_WPS_BUTTON))
1333 return 0;
1334
1335 input = input_allocate_device();
1336 if (!input)
1337 return -ENOMEM;
1338
1339 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1340 wiphy_name(ar->hw->wiphy));
1341
1342 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1343 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1344
1345 input->name = ar->wps.name;
1346 input->phys = ar->wps.phys;
1347 input->id.bustype = BUS_USB;
1348 input->dev.parent = &ar->hw->wiphy->dev;
1349
1350 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1351
1352 err = input_register_device(input);
1353 if (err) {
1354 input_free_device(input);
1355 return err;
1356 }
1357
1358 ar->wps.pbc = input;
1359 return 0;
1360}
1361#endif /* CONFIG_CARL9170_WPC */
1362
1363static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1364 struct survey_info *survey)
1365{
1366 struct ar9170 *ar = hw->priv;
1367 int err;
1368
1369 if (idx != 0)
1370 return -ENOENT;
1371
1372 mutex_lock(&ar->mutex);
1373 err = carl9170_get_noisefloor(ar);
1374 mutex_unlock(&ar->mutex);
1375 if (err)
1376 return err;
1377
1378 survey->channel = ar->channel;
1379 survey->filled = SURVEY_INFO_NOISE_DBM;
1380 survey->noise = ar->noise[0];
1381 return 0;
1382}
1383
1384static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop)
1385{
1386 struct ar9170 *ar = hw->priv;
1387 unsigned int vid;
1388
1389 mutex_lock(&ar->mutex);
1390 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1391 carl9170_flush_cab(ar, vid);
1392
1393 carl9170_flush(ar, drop);
1394 mutex_unlock(&ar->mutex);
1395}
1396
1397static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1398 struct ieee80211_low_level_stats *stats)
1399{
1400 struct ar9170 *ar = hw->priv;
1401
1402 memset(stats, 0, sizeof(*stats));
1403 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1404 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1405 return 0;
1406}
1407
1408static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1409 struct ieee80211_vif *vif,
1410 enum sta_notify_cmd cmd,
1411 struct ieee80211_sta *sta)
1412{
1413 struct ar9170 *ar = hw->priv;
1414 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1415 struct sk_buff *skb, *tmp;
1416 struct sk_buff_head free;
1417 int i;
1418
1419 switch (cmd) {
1420 case STA_NOTIFY_SLEEP:
1421 /*
1422 * Since the peer is no longer listening, we have to return
1423 * as many SKBs as possible back to the mac80211 stack.
1424 * It will deal with the retry procedure, once the peer
1425 * has become available again.
1426 *
1427 * NB: Ideally, the driver should return the all frames in
1428 * the correct, ascending order. However, I think that this
1429 * functionality should be implemented in the stack and not
1430 * here...
1431 */
1432
1433 __skb_queue_head_init(&free);
1434
1435 if (sta->ht_cap.ht_supported) {
1436 rcu_read_lock();
1437 for (i = 0; i < CARL9170_NUM_TID; i++) {
1438 struct carl9170_sta_tid *tid_info;
1439
1440 tid_info = rcu_dereference(sta_info->agg[i]);
1441
1442 if (!tid_info)
1443 continue;
1444
1445 spin_lock_bh(&ar->tx_ampdu_list_lock);
1446 if (tid_info->state >
1447 CARL9170_TID_STATE_SUSPEND)
1448 tid_info->state =
1449 CARL9170_TID_STATE_SUSPEND;
1450 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1451
1452 spin_lock_bh(&tid_info->lock);
1453 while ((skb = __skb_dequeue(&tid_info->queue)))
1454 __skb_queue_tail(&free, skb);
1455 spin_unlock_bh(&tid_info->lock);
1456
1457 ieee80211_stop_tx_ba_session(sta,
1458 tid_info->tid);
1459 }
1460 rcu_read_unlock();
1461 }
1462
1463 for (i = 0; i < ar->hw->queues; i++) {
1464 spin_lock_bh(&ar->tx_pending[i].lock);
1465 skb_queue_walk_safe(&ar->tx_pending[i], skb, tmp) {
1466 struct _carl9170_tx_superframe *super;
1467 struct ieee80211_hdr *hdr;
1468
1469 super = (void *) skb->data;
1470 hdr = (void *) super->frame_data;
1471
1472 if (compare_ether_addr(hdr->addr1, sta->addr))
1473 continue;
1474
1475 __skb_unlink(skb, &ar->tx_pending[i]);
1476 carl9170_tx_status(ar, skb, false);
1477 }
1478 spin_unlock_bh(&ar->tx_pending[i].lock);
1479 }
1480
1481 while ((skb = __skb_dequeue(&free)))
1482 carl9170_tx_status(ar, skb, false);
1483
1484 break;
1485
1486 case STA_NOTIFY_AWAKE:
1487 if (!sta->ht_cap.ht_supported)
1488 return;
1489
1490 rcu_read_lock();
1491 for (i = 0; i < CARL9170_NUM_TID; i++) {
1492 struct carl9170_sta_tid *tid_info;
1493
1494 tid_info = rcu_dereference(sta_info->agg[i]);
1495
1496 if (!tid_info)
1497 continue;
1498
1499 if ((tid_info->state == CARL9170_TID_STATE_SUSPEND))
1500 tid_info->state = CARL9170_TID_STATE_IDLE;
1501 }
1502 rcu_read_unlock();
1503 break;
1504 }
1505}
1506
1507static const struct ieee80211_ops carl9170_ops = {
1508 .start = carl9170_op_start,
1509 .stop = carl9170_op_stop,
1510 .tx = carl9170_op_tx,
1511 .flush = carl9170_op_flush,
1512 .add_interface = carl9170_op_add_interface,
1513 .remove_interface = carl9170_op_remove_interface,
1514 .config = carl9170_op_config,
1515 .prepare_multicast = carl9170_op_prepare_multicast,
1516 .configure_filter = carl9170_op_configure_filter,
1517 .conf_tx = carl9170_op_conf_tx,
1518 .bss_info_changed = carl9170_op_bss_info_changed,
1519 .get_tsf = carl9170_op_get_tsf,
1520 .set_key = carl9170_op_set_key,
1521 .sta_add = carl9170_op_sta_add,
1522 .sta_remove = carl9170_op_sta_remove,
1523 .sta_notify = carl9170_op_sta_notify,
1524 .get_survey = carl9170_op_get_survey,
1525 .get_stats = carl9170_op_get_stats,
1526 .ampdu_action = carl9170_op_ampdu_action,
1527};
1528
1529void *carl9170_alloc(size_t priv_size)
1530{
1531 struct ieee80211_hw *hw;
1532 struct ar9170 *ar;
1533 struct sk_buff *skb;
1534 int i;
1535
1536 /*
1537 * this buffer is used for rx stream reconstruction.
1538 * Under heavy load this device (or the transport layer?)
1539 * tends to split the streams into separate rx descriptors.
1540 */
1541
1542 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1543 if (!skb)
1544 goto err_nomem;
1545
1546 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1547 if (!hw)
1548 goto err_nomem;
1549
1550 ar = hw->priv;
1551 ar->hw = hw;
1552 ar->rx_failover = skb;
1553
1554 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1555 ar->rx_has_plcp = false;
1556
1557 /*
1558 * Here's a hidden pitfall!
1559 *
1560 * All 4 AC queues work perfectly well under _legacy_ operation.
1561 * However as soon as aggregation is enabled, the traffic flow
1562 * gets very bumpy. Therefore we have to _switch_ to a
1563 * software AC with a single HW queue.
1564 */
1565 hw->queues = __AR9170_NUM_TXQ;
1566
1567 mutex_init(&ar->mutex);
1568 spin_lock_init(&ar->beacon_lock);
1569 spin_lock_init(&ar->cmd_lock);
1570 spin_lock_init(&ar->tx_stats_lock);
1571 spin_lock_init(&ar->tx_ampdu_list_lock);
1572 spin_lock_init(&ar->mem_lock);
1573 spin_lock_init(&ar->state_lock);
1574 atomic_set(&ar->pending_restarts, 0);
1575 ar->vifs = 0;
1576 for (i = 0; i < ar->hw->queues; i++) {
1577 skb_queue_head_init(&ar->tx_status[i]);
1578 skb_queue_head_init(&ar->tx_pending[i]);
1579 }
1580 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1581 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1582 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1583 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1584 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1585 rcu_assign_pointer(ar->tx_ampdu_iter,
1586 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1587
1588 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1589 INIT_LIST_HEAD(&ar->vif_list);
1590 init_completion(&ar->tx_flush);
1591
1592 /*
1593 * Note:
1594 * IBSS/ADHOC and AP mode are only enabled, if the firmware
1595 * supports these modes. The code which will add the
1596 * additional interface_modes is in fw.c.
1597 */
1598 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1599
1600 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1601 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1602 IEEE80211_HW_SUPPORTS_PS |
1603 IEEE80211_HW_PS_NULLFUNC_STACK |
1604 IEEE80211_HW_SIGNAL_DBM;
1605
1606 if (!modparam_noht) {
1607 /*
1608 * see the comment above, why we allow the user
1609 * to disable HT by a module parameter.
1610 */
1611 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1612 }
1613
1614 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1615 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1616 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1617
1618 hw->max_rates = CARL9170_TX_MAX_RATES;
1619 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1620
1621 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1622 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1623
1624 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1625 return ar;
1626
1627err_nomem:
1628 kfree_skb(skb);
1629 return ERR_PTR(-ENOMEM);
1630}
1631
1632static int carl9170_read_eeprom(struct ar9170 *ar)
1633{
1634#define RW 8 /* number of words to read at once */
1635#define RB (sizeof(u32) * RW)
1636 u8 *eeprom = (void *)&ar->eeprom;
1637 __le32 offsets[RW];
1638 int i, j, err;
1639
1640 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1641
1642 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1643#ifndef __CHECKER__
1644 /* don't want to handle trailing remains */
1645 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1646#endif
1647
1648 for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
1649 for (j = 0; j < RW; j++)
1650 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1651 RB * i + 4 * j);
1652
1653 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1654 RB, (u8 *) &offsets,
1655 RB, eeprom + RB * i);
1656 if (err)
1657 return err;
1658 }
1659
1660#undef RW
1661#undef RB
1662 return 0;
1663}
1664
1665static int carl9170_parse_eeprom(struct ar9170 *ar)
1666{
1667 struct ath_regulatory *regulatory = &ar->common.regulatory;
1668 unsigned int rx_streams, tx_streams, tx_params = 0;
1669 int bands = 0;
1670
1671 if (ar->eeprom.length == cpu_to_le16(0xffff))
1672 return -ENODATA;
1673
1674 rx_streams = hweight8(ar->eeprom.rx_mask);
1675 tx_streams = hweight8(ar->eeprom.tx_mask);
1676
1677 if (rx_streams != tx_streams) {
1678 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1679
1680 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1681 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1682
1683 tx_params = (tx_streams - 1) <<
1684 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1685
1686 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1687 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1688 }
1689
1690 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1691 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1692 &carl9170_band_2GHz;
1693 bands++;
1694 }
1695 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1696 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1697 &carl9170_band_5GHz;
1698 bands++;
1699 }
1700
1701 /*
1702 * I measured this, a bandswitch takes roughly
1703 * 135 ms and a frequency switch about 80.
1704 *
1705 * FIXME: measure these values again once EEPROM settings
1706 * are used, that will influence them!
1707 */
1708 if (bands == 2)
1709 ar->hw->channel_change_time = 135 * 1000;
1710 else
1711 ar->hw->channel_change_time = 80 * 1000;
1712
1713 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1714 regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
1715
1716 /* second part of wiphy init */
1717 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1718
1719 return bands ? 0 : -EINVAL;
1720}
1721
1722static int carl9170_reg_notifier(struct wiphy *wiphy,
1723 struct regulatory_request *request)
1724{
1725 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1726 struct ar9170 *ar = hw->priv;
1727
1728 return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1729}
1730
1731int carl9170_register(struct ar9170 *ar)
1732{
1733 struct ath_regulatory *regulatory = &ar->common.regulatory;
1734 int err = 0, i;
1735
1736 if (WARN_ON(ar->mem_bitmap))
1737 return -EINVAL;
1738
1739 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
1740 sizeof(unsigned long), GFP_KERNEL);
1741
1742 if (!ar->mem_bitmap)
1743 return -ENOMEM;
1744
1745 /* try to read EEPROM, init MAC addr */
1746 err = carl9170_read_eeprom(ar);
1747 if (err)
1748 return err;
1749
1750 err = carl9170_fw_fix_eeprom(ar);
1751 if (err)
1752 return err;
1753
1754 err = carl9170_parse_eeprom(ar);
1755 if (err)
1756 return err;
1757
1758 err = ath_regd_init(regulatory, ar->hw->wiphy,
1759 carl9170_reg_notifier);
1760 if (err)
1761 return err;
1762
1763 if (modparam_noht) {
1764 carl9170_band_2GHz.ht_cap.ht_supported = false;
1765 carl9170_band_5GHz.ht_cap.ht_supported = false;
1766 }
1767
1768 for (i = 0; i < ar->fw.vif_num; i++) {
1769 ar->vif_priv[i].id = i;
1770 ar->vif_priv[i].vif = NULL;
1771 }
1772
1773 err = ieee80211_register_hw(ar->hw);
1774 if (err)
1775 return err;
1776
1777 /* mac80211 interface is now registered */
1778 ar->registered = true;
1779
1780 if (!ath_is_world_regd(regulatory))
1781 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
1782
1783#ifdef CONFIG_CARL9170_DEBUGFS
1784 carl9170_debugfs_register(ar);
1785#endif /* CONFIG_CARL9170_DEBUGFS */
1786
1787 err = carl9170_led_init(ar);
1788 if (err)
1789 goto err_unreg;
1790
1791#ifdef CONFIG_CARL9170_LEDS
1792 err = carl9170_led_register(ar);
1793 if (err)
1794 goto err_unreg;
1795#endif /* CONFIG_CAR9L170_LEDS */
1796
1797#ifdef CONFIG_CARL9170_WPC
1798 err = carl9170_register_wps_button(ar);
1799 if (err)
1800 goto err_unreg;
1801#endif /* CONFIG_CARL9170_WPC */
1802
1803 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
1804 wiphy_name(ar->hw->wiphy));
1805
1806 return 0;
1807
1808err_unreg:
1809 carl9170_unregister(ar);
1810 return err;
1811}
1812
1813void carl9170_unregister(struct ar9170 *ar)
1814{
1815 if (!ar->registered)
1816 return;
1817
1818 ar->registered = false;
1819
1820#ifdef CONFIG_CARL9170_LEDS
1821 carl9170_led_unregister(ar);
1822#endif /* CONFIG_CARL9170_LEDS */
1823
1824#ifdef CONFIG_CARL9170_DEBUGFS
1825 carl9170_debugfs_unregister(ar);
1826#endif /* CONFIG_CARL9170_DEBUGFS */
1827
1828#ifdef CONFIG_CARL9170_WPC
1829 if (ar->wps.pbc) {
1830 input_unregister_device(ar->wps.pbc);
1831 ar->wps.pbc = NULL;
1832 }
1833#endif /* CONFIG_CARL9170_WPC */
1834
1835 carl9170_cancel_worker(ar);
1836 cancel_work_sync(&ar->restart_work);
1837
1838 ieee80211_unregister_hw(ar->hw);
1839}
1840
1841void carl9170_free(struct ar9170 *ar)
1842{
1843 WARN_ON(ar->registered);
1844 WARN_ON(IS_INITIALIZED(ar));
1845
1846 kfree_skb(ar->rx_failover);
1847 ar->rx_failover = NULL;
1848
1849 kfree(ar->mem_bitmap);
1850 ar->mem_bitmap = NULL;
1851
1852 mutex_destroy(&ar->mutex);
1853
1854 ieee80211_free_hw(ar->hw);
1855}
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
new file mode 100644
index 000000000000..89deca37a988
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -0,0 +1,1810 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * PHY and RF code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include <linux/bitrev.h>
40#include "carl9170.h"
41#include "cmd.h"
42#include "phy.h"
43
44static int carl9170_init_power_cal(struct ar9170 *ar)
45{
46 carl9170_regwrite_begin(ar);
47
48 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE_MAX, 0x7f);
49 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE1, 0x3f3f3f3f);
50 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE2, 0x3f3f3f3f);
51 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE3, 0x3f3f3f3f);
52 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE4, 0x3f3f3f3f);
53 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE5, 0x3f3f3f3f);
54 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE6, 0x3f3f3f3f);
55 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE7, 0x3f3f3f3f);
56 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE8, 0x3f3f3f3f);
57 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE9, 0x3f3f3f3f);
58
59 carl9170_regwrite_finish();
60 return carl9170_regwrite_result();
61}
62
63struct carl9170_phy_init {
64 u32 reg, _5ghz_20, _5ghz_40, _2ghz_40, _2ghz_20;
65};
66
67static struct carl9170_phy_init ar5416_phy_init[] = {
68 { 0x1c5800, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
69 { 0x1c5804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, },
70 { 0x1c5808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
71 { 0x1c580c, 0xad848e19, 0xad848e19, 0xad848e19, 0xad848e19, },
72 { 0x1c5810, 0x7d14e000, 0x7d14e000, 0x7d14e000, 0x7d14e000, },
73 { 0x1c5814, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, },
74 { 0x1c5818, 0x00000090, 0x00000090, 0x00000090, 0x00000090, },
75 { 0x1c581c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
76 { 0x1c5820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, },
77 { 0x1c5824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
78 { 0x1c5828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, },
79 { 0x1c582c, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
80 { 0x1c5830, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
81 { 0x1c5834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
82 { 0x1c5838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
83 { 0x1c583c, 0x00200400, 0x00200400, 0x00200400, 0x00200400, },
84 { 0x1c5840, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e, },
85 { 0x1c5844, 0x1372161e, 0x13721c1e, 0x13721c24, 0x137216a4, },
86 { 0x1c5848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, },
87 { 0x1c584c, 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c, },
88 { 0x1c5850, 0x6c48b4e4, 0x6d48b4e4, 0x6d48b0e4, 0x6c48b0e4, },
89 { 0x1c5854, 0x00000859, 0x00000859, 0x00000859, 0x00000859, },
90 { 0x1c5858, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, },
91 { 0x1c585c, 0x31395c5e, 0x3139605e, 0x3139605e, 0x31395c5e, },
92 { 0x1c5860, 0x0004dd10, 0x0004dd10, 0x0004dd20, 0x0004dd20, },
93 { 0x1c5864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
94 { 0x1c5868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, },
95 { 0x1c586c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, },
96 { 0x1c5900, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
97 { 0x1c5904, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
98 { 0x1c5908, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
99 { 0x1c590c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
100 { 0x1c5914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, },
101 { 0x1c5918, 0x00000118, 0x00000230, 0x00000268, 0x00000134, },
102 { 0x1c591c, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, },
103 { 0x1c5920, 0x0510081c, 0x0510081c, 0x0510001c, 0x0510001c, },
104 { 0x1c5924, 0xd0058a15, 0xd0058a15, 0xd0058a15, 0xd0058a15, },
105 { 0x1c5928, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
106 { 0x1c592c, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
107 { 0x1c5934, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
108 { 0x1c5938, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
109 { 0x1c593c, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f, },
110 { 0x1c5944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, },
111 { 0x1c5948, 0x9280b212, 0x9280b212, 0x9280b212, 0x9280b212, },
112 { 0x1c594c, 0x00020028, 0x00020028, 0x00020028, 0x00020028, },
113 { 0x1c5954, 0x5d50e188, 0x5d50e188, 0x5d50e188, 0x5d50e188, },
114 { 0x1c5958, 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff, },
115 { 0x1c5960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
116 { 0x1c5964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, },
117 { 0x1c5970, 0x190fb515, 0x190fb515, 0x190fb515, 0x190fb515, },
118 { 0x1c5974, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
119 { 0x1c5978, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
120 { 0x1c597c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
121 { 0x1c5980, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
122 { 0x1c5984, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
123 { 0x1c5988, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
124 { 0x1c598c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
125 { 0x1c5990, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
126 { 0x1c5994, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
127 { 0x1c5998, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
128 { 0x1c599c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
129 { 0x1c59a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
130 { 0x1c59a4, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
131 { 0x1c59a8, 0x001fff00, 0x001fff00, 0x001fff00, 0x001fff00, },
132 { 0x1c59ac, 0x006f00c4, 0x006f00c4, 0x006f00c4, 0x006f00c4, },
133 { 0x1c59b0, 0x03051000, 0x03051000, 0x03051000, 0x03051000, },
134 { 0x1c59b4, 0x00000820, 0x00000820, 0x00000820, 0x00000820, },
135 { 0x1c59bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
136 { 0x1c59c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, },
137 { 0x1c59c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, },
138 { 0x1c59c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, },
139 { 0x1c59cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, },
140 { 0x1c59d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, },
141 { 0x1c59d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
142 { 0x1c59d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
143 { 0x1c59dc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
144 { 0x1c59e0, 0x00000200, 0x00000200, 0x00000200, 0x00000200, },
145 { 0x1c59e4, 0x64646464, 0x64646464, 0x64646464, 0x64646464, },
146 { 0x1c59e8, 0x3c787878, 0x3c787878, 0x3c787878, 0x3c787878, },
147 { 0x1c59ec, 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa, },
148 { 0x1c59f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
149 { 0x1c59fc, 0x00001042, 0x00001042, 0x00001042, 0x00001042, },
150 { 0x1c5a00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
151 { 0x1c5a04, 0x00000040, 0x00000040, 0x00000040, 0x00000040, },
152 { 0x1c5a08, 0x00000080, 0x00000080, 0x00000080, 0x00000080, },
153 { 0x1c5a0c, 0x000001a1, 0x000001a1, 0x00000141, 0x00000141, },
154 { 0x1c5a10, 0x000001e1, 0x000001e1, 0x00000181, 0x00000181, },
155 { 0x1c5a14, 0x00000021, 0x00000021, 0x000001c1, 0x000001c1, },
156 { 0x1c5a18, 0x00000061, 0x00000061, 0x00000001, 0x00000001, },
157 { 0x1c5a1c, 0x00000168, 0x00000168, 0x00000041, 0x00000041, },
158 { 0x1c5a20, 0x000001a8, 0x000001a8, 0x000001a8, 0x000001a8, },
159 { 0x1c5a24, 0x000001e8, 0x000001e8, 0x000001e8, 0x000001e8, },
160 { 0x1c5a28, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
161 { 0x1c5a2c, 0x00000068, 0x00000068, 0x00000068, 0x00000068, },
162 { 0x1c5a30, 0x00000189, 0x00000189, 0x000000a8, 0x000000a8, },
163 { 0x1c5a34, 0x000001c9, 0x000001c9, 0x00000169, 0x00000169, },
164 { 0x1c5a38, 0x00000009, 0x00000009, 0x000001a9, 0x000001a9, },
165 { 0x1c5a3c, 0x00000049, 0x00000049, 0x000001e9, 0x000001e9, },
166 { 0x1c5a40, 0x00000089, 0x00000089, 0x00000029, 0x00000029, },
167 { 0x1c5a44, 0x00000170, 0x00000170, 0x00000069, 0x00000069, },
168 { 0x1c5a48, 0x000001b0, 0x000001b0, 0x00000190, 0x00000190, },
169 { 0x1c5a4c, 0x000001f0, 0x000001f0, 0x000001d0, 0x000001d0, },
170 { 0x1c5a50, 0x00000030, 0x00000030, 0x00000010, 0x00000010, },
171 { 0x1c5a54, 0x00000070, 0x00000070, 0x00000050, 0x00000050, },
172 { 0x1c5a58, 0x00000191, 0x00000191, 0x00000090, 0x00000090, },
173 { 0x1c5a5c, 0x000001d1, 0x000001d1, 0x00000151, 0x00000151, },
174 { 0x1c5a60, 0x00000011, 0x00000011, 0x00000191, 0x00000191, },
175 { 0x1c5a64, 0x00000051, 0x00000051, 0x000001d1, 0x000001d1, },
176 { 0x1c5a68, 0x00000091, 0x00000091, 0x00000011, 0x00000011, },
177 { 0x1c5a6c, 0x000001b8, 0x000001b8, 0x00000051, 0x00000051, },
178 { 0x1c5a70, 0x000001f8, 0x000001f8, 0x00000198, 0x00000198, },
179 { 0x1c5a74, 0x00000038, 0x00000038, 0x000001d8, 0x000001d8, },
180 { 0x1c5a78, 0x00000078, 0x00000078, 0x00000018, 0x00000018, },
181 { 0x1c5a7c, 0x00000199, 0x00000199, 0x00000058, 0x00000058, },
182 { 0x1c5a80, 0x000001d9, 0x000001d9, 0x00000098, 0x00000098, },
183 { 0x1c5a84, 0x00000019, 0x00000019, 0x00000159, 0x00000159, },
184 { 0x1c5a88, 0x00000059, 0x00000059, 0x00000199, 0x00000199, },
185 { 0x1c5a8c, 0x00000099, 0x00000099, 0x000001d9, 0x000001d9, },
186 { 0x1c5a90, 0x000000d9, 0x000000d9, 0x00000019, 0x00000019, },
187 { 0x1c5a94, 0x000000f9, 0x000000f9, 0x00000059, 0x00000059, },
188 { 0x1c5a98, 0x000000f9, 0x000000f9, 0x00000099, 0x00000099, },
189 { 0x1c5a9c, 0x000000f9, 0x000000f9, 0x000000d9, 0x000000d9, },
190 { 0x1c5aa0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
191 { 0x1c5aa4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
192 { 0x1c5aa8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
193 { 0x1c5aac, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
194 { 0x1c5ab0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
195 { 0x1c5ab4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
196 { 0x1c5ab8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
197 { 0x1c5abc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
198 { 0x1c5ac0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
199 { 0x1c5ac4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
200 { 0x1c5ac8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
201 { 0x1c5acc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
202 { 0x1c5ad0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
203 { 0x1c5ad4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
204 { 0x1c5ad8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
205 { 0x1c5adc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
206 { 0x1c5ae0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
207 { 0x1c5ae4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
208 { 0x1c5ae8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
209 { 0x1c5aec, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
210 { 0x1c5af0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
211 { 0x1c5af4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
212 { 0x1c5af8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
213 { 0x1c5afc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
214 { 0x1c5b00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
215 { 0x1c5b04, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
216 { 0x1c5b08, 0x00000002, 0x00000002, 0x00000002, 0x00000002, },
217 { 0x1c5b0c, 0x00000003, 0x00000003, 0x00000003, 0x00000003, },
218 { 0x1c5b10, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
219 { 0x1c5b14, 0x00000005, 0x00000005, 0x00000005, 0x00000005, },
220 { 0x1c5b18, 0x00000008, 0x00000008, 0x00000008, 0x00000008, },
221 { 0x1c5b1c, 0x00000009, 0x00000009, 0x00000009, 0x00000009, },
222 { 0x1c5b20, 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, },
223 { 0x1c5b24, 0x0000000b, 0x0000000b, 0x0000000b, 0x0000000b, },
224 { 0x1c5b28, 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c, },
225 { 0x1c5b2c, 0x0000000d, 0x0000000d, 0x0000000d, 0x0000000d, },
226 { 0x1c5b30, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
227 { 0x1c5b34, 0x00000011, 0x00000011, 0x00000011, 0x00000011, },
228 { 0x1c5b38, 0x00000012, 0x00000012, 0x00000012, 0x00000012, },
229 { 0x1c5b3c, 0x00000013, 0x00000013, 0x00000013, 0x00000013, },
230 { 0x1c5b40, 0x00000014, 0x00000014, 0x00000014, 0x00000014, },
231 { 0x1c5b44, 0x00000015, 0x00000015, 0x00000015, 0x00000015, },
232 { 0x1c5b48, 0x00000018, 0x00000018, 0x00000018, 0x00000018, },
233 { 0x1c5b4c, 0x00000019, 0x00000019, 0x00000019, 0x00000019, },
234 { 0x1c5b50, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
235 { 0x1c5b54, 0x0000001b, 0x0000001b, 0x0000001b, 0x0000001b, },
236 { 0x1c5b58, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, },
237 { 0x1c5b5c, 0x0000001d, 0x0000001d, 0x0000001d, 0x0000001d, },
238 { 0x1c5b60, 0x00000020, 0x00000020, 0x00000020, 0x00000020, },
239 { 0x1c5b64, 0x00000021, 0x00000021, 0x00000021, 0x00000021, },
240 { 0x1c5b68, 0x00000022, 0x00000022, 0x00000022, 0x00000022, },
241 { 0x1c5b6c, 0x00000023, 0x00000023, 0x00000023, 0x00000023, },
242 { 0x1c5b70, 0x00000024, 0x00000024, 0x00000024, 0x00000024, },
243 { 0x1c5b74, 0x00000025, 0x00000025, 0x00000025, 0x00000025, },
244 { 0x1c5b78, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
245 { 0x1c5b7c, 0x00000029, 0x00000029, 0x00000029, 0x00000029, },
246 { 0x1c5b80, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a, },
247 { 0x1c5b84, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, },
248 { 0x1c5b88, 0x0000002c, 0x0000002c, 0x0000002c, 0x0000002c, },
249 { 0x1c5b8c, 0x0000002d, 0x0000002d, 0x0000002d, 0x0000002d, },
250 { 0x1c5b90, 0x00000030, 0x00000030, 0x00000030, 0x00000030, },
251 { 0x1c5b94, 0x00000031, 0x00000031, 0x00000031, 0x00000031, },
252 { 0x1c5b98, 0x00000032, 0x00000032, 0x00000032, 0x00000032, },
253 { 0x1c5b9c, 0x00000033, 0x00000033, 0x00000033, 0x00000033, },
254 { 0x1c5ba0, 0x00000034, 0x00000034, 0x00000034, 0x00000034, },
255 { 0x1c5ba4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
256 { 0x1c5ba8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
257 { 0x1c5bac, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
258 { 0x1c5bb0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
259 { 0x1c5bb4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
260 { 0x1c5bb8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
261 { 0x1c5bbc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
262 { 0x1c5bc0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
263 { 0x1c5bc4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
264 { 0x1c5bc8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
265 { 0x1c5bcc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
266 { 0x1c5bd0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
267 { 0x1c5bd4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
268 { 0x1c5bd8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
269 { 0x1c5bdc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
270 { 0x1c5be0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
271 { 0x1c5be4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
272 { 0x1c5be8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
273 { 0x1c5bec, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
274 { 0x1c5bf0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
275 { 0x1c5bf4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
276 { 0x1c5bf8, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
277 { 0x1c5bfc, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
278 { 0x1c5c00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
279 { 0x1c5c0c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
280 { 0x1c5c10, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
281 { 0x1c5c14, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
282 { 0x1c5c18, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
283 { 0x1c5c1c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
284 { 0x1c5c20, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
285 { 0x1c5c24, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
286 { 0x1c5c28, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
287 { 0x1c5c2c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
288 { 0x1c5c30, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
289 { 0x1c5c34, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
290 { 0x1c5c38, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
291 { 0x1c5c3c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
292 { 0x1c5cf0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
293 { 0x1c5cf4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
294 { 0x1c5cf8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
295 { 0x1c5cfc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
296 { 0x1c6200, 0x00000008, 0x00000008, 0x0000000e, 0x0000000e, },
297 { 0x1c6204, 0x00000440, 0x00000440, 0x00000440, 0x00000440, },
298 { 0x1c6208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, },
299 { 0x1c620c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
300 { 0x1c6210, 0x40806333, 0x40806333, 0x40806333, 0x40806333, },
301 { 0x1c6214, 0x00106c10, 0x00106c10, 0x00106c10, 0x00106c10, },
302 { 0x1c6218, 0x009c4060, 0x009c4060, 0x009c4060, 0x009c4060, },
303 { 0x1c621c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, },
304 { 0x1c6220, 0x018830c6, 0x018830c6, 0x018830c6, 0x018830c6, },
305 { 0x1c6224, 0x00000400, 0x00000400, 0x00000400, 0x00000400, },
306 { 0x1c6228, 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5, },
307 { 0x1c622c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
308 { 0x1c6230, 0x00000108, 0x00000210, 0x00000210, 0x00000108, },
309 { 0x1c6234, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
310 { 0x1c6238, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
311 { 0x1c623c, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, },
312 { 0x1c6240, 0x38490a20, 0x38490a20, 0x38490a20, 0x38490a20, },
313 { 0x1c6244, 0x00007bb6, 0x00007bb6, 0x00007bb6, 0x00007bb6, },
314 { 0x1c6248, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, },
315 { 0x1c624c, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
316 { 0x1c6250, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
317 { 0x1c6254, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
318 { 0x1c6258, 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380, },
319 { 0x1c625c, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, },
320 { 0x1c6260, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, },
321 { 0x1c6264, 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11, },
322 { 0x1c6268, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
323 { 0x1c626c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
324 { 0x1c6274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, },
325 { 0x1c6278, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
326 { 0x1c627c, 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce, },
327 { 0x1c6300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, },
328 { 0x1c6304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, },
329 { 0x1c6308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, },
330 { 0x1c630c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, },
331 { 0x1c6310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, },
332 { 0x1c6314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, },
333 { 0x1c6318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, },
334 { 0x1c631c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, },
335 { 0x1c6320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, },
336 { 0x1c6324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, },
337 { 0x1c6328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, },
338 { 0x1c632c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
339 { 0x1c6330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
340 { 0x1c6334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
341 { 0x1c6338, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
342 { 0x1c633c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
343 { 0x1c6340, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
344 { 0x1c6344, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
345 { 0x1c6348, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
346 { 0x1c634c, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
347 { 0x1c6350, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
348 { 0x1c6354, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, },
349 { 0x1c6358, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, },
350 { 0x1c6388, 0x08000000, 0x08000000, 0x08000000, 0x08000000, },
351 { 0x1c638c, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
352 { 0x1c6390, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
353 { 0x1c6394, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
354 { 0x1c6398, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce, },
355 { 0x1c639c, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
356 { 0x1c63a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
357 { 0x1c63a4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
358 { 0x1c63a8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
359 { 0x1c63ac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
360 { 0x1c63b0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
361 { 0x1c63b4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
362 { 0x1c63b8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
363 { 0x1c63bc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
364 { 0x1c63c0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
365 { 0x1c63c4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
366 { 0x1c63c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
367 { 0x1c63cc, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
368 { 0x1c63d0, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
369 { 0x1c63d4, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
370 { 0x1c63d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
371 { 0x1c63dc, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
372 { 0x1c63e0, 0x000000c0, 0x000000c0, 0x000000c0, 0x000000c0, },
373 { 0x1c6848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
374 { 0x1c6920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
375 { 0x1c6960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
376 { 0x1c720c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
377 { 0x1c726c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
378 { 0x1c7848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
379 { 0x1c7920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
380 { 0x1c7960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
381 { 0x1c820c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
382 { 0x1c826c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
383/* { 0x1c8864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, }, */
384 { 0x1c8864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
385 { 0x1c895c, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, },
386 { 0x1c8968, 0x000003ce, 0x000003ce, 0x000003ce, 0x000003ce, },
387 { 0x1c89bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
388 { 0x1c9270, 0x00820820, 0x00820820, 0x00820820, 0x00820820, },
389 { 0x1c935c, 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f, },
390 { 0x1c9360, 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207, },
391 { 0x1c9364, 0x17601685, 0x17601685, 0x17601685, 0x17601685, },
392 { 0x1c9368, 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104, },
393 { 0x1c936c, 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03, },
394 { 0x1c9370, 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883, },
395 { 0x1c9374, 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803, },
396 { 0x1c9378, 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682, },
397 { 0x1c937c, 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482, },
398 { 0x1c9380, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, },
399 { 0x1c9384, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, }
400};
401
402/*
403 * look up a certain register in ar5416_phy_init[] and return the init. value
404 * for the band and bandwidth given. Return 0 if register address not found.
405 */
406static u32 carl9170_def_val(u32 reg, bool is_2ghz, bool is_40mhz)
407{
408 unsigned int i;
409 for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
410 if (ar5416_phy_init[i].reg != reg)
411 continue;
412
413 if (is_2ghz) {
414 if (is_40mhz)
415 return ar5416_phy_init[i]._2ghz_40;
416 else
417 return ar5416_phy_init[i]._2ghz_20;
418 } else {
419 if (is_40mhz)
420 return ar5416_phy_init[i]._5ghz_40;
421 else
422 return ar5416_phy_init[i]._5ghz_20;
423 }
424 }
425 return 0;
426}
427
428/*
429 * initialize some phy regs from eeprom values in modal_header[]
430 * acc. to band and bandwith
431 */
432static int carl9170_init_phy_from_eeprom(struct ar9170 *ar,
433 bool is_2ghz, bool is_40mhz)
434{
435 static const u8 xpd2pd[16] = {
436 0x2, 0x2, 0x2, 0x1, 0x2, 0x2, 0x6, 0x2,
437 0x2, 0x3, 0x7, 0x2, 0xb, 0x2, 0x2, 0x2
438 };
439 /* pointer to the modal_header acc. to band */
440 struct ar9170_eeprom_modal *m = &ar->eeprom.modal_header[is_2ghz];
441 u32 val;
442
443 carl9170_regwrite_begin(ar);
444
445 /* ant common control (index 0) */
446 carl9170_regwrite(AR9170_PHY_REG_SWITCH_COM,
447 le32_to_cpu(m->antCtrlCommon));
448
449 /* ant control chain 0 (index 1) */
450 carl9170_regwrite(AR9170_PHY_REG_SWITCH_CHAIN_0,
451 le32_to_cpu(m->antCtrlChain[0]));
452
453 /* ant control chain 2 (index 2) */
454 carl9170_regwrite(AR9170_PHY_REG_SWITCH_CHAIN_2,
455 le32_to_cpu(m->antCtrlChain[1]));
456
457 /* SwSettle (index 3) */
458 if (!is_40mhz) {
459 val = carl9170_def_val(AR9170_PHY_REG_SETTLING,
460 is_2ghz, is_40mhz);
461 SET_VAL(AR9170_PHY_SETTLING_SWITCH, val, m->switchSettling);
462 carl9170_regwrite(AR9170_PHY_REG_SETTLING, val);
463 }
464
465 /* adcDesired, pdaDesired (index 4) */
466 val = carl9170_def_val(AR9170_PHY_REG_DESIRED_SZ, is_2ghz, is_40mhz);
467 SET_VAL(AR9170_PHY_DESIRED_SZ_PGA, val, m->pgaDesiredSize);
468 SET_VAL(AR9170_PHY_DESIRED_SZ_ADC, val, m->adcDesiredSize);
469 carl9170_regwrite(AR9170_PHY_REG_DESIRED_SZ, val);
470
471 /* TxEndToXpaOff, TxFrameToXpaOn (index 5) */
472 val = carl9170_def_val(AR9170_PHY_REG_RF_CTL4, is_2ghz, is_40mhz);
473 SET_VAL(AR9170_PHY_RF_CTL4_TX_END_XPAB_OFF, val, m->txEndToXpaOff);
474 SET_VAL(AR9170_PHY_RF_CTL4_TX_END_XPAA_OFF, val, m->txEndToXpaOff);
475 SET_VAL(AR9170_PHY_RF_CTL4_FRAME_XPAB_ON, val, m->txFrameToXpaOn);
476 SET_VAL(AR9170_PHY_RF_CTL4_FRAME_XPAA_ON, val, m->txFrameToXpaOn);
477 carl9170_regwrite(AR9170_PHY_REG_RF_CTL4, val);
478
479 /* TxEndToRxOn (index 6) */
480 val = carl9170_def_val(AR9170_PHY_REG_RF_CTL3, is_2ghz, is_40mhz);
481 SET_VAL(AR9170_PHY_RF_CTL3_TX_END_TO_A2_RX_ON, val, m->txEndToRxOn);
482 carl9170_regwrite(AR9170_PHY_REG_RF_CTL3, val);
483
484 /* thresh62 (index 7) */
485 val = carl9170_def_val(0x1c8864, is_2ghz, is_40mhz);
486 val = (val & ~0x7f000) | (m->thresh62 << 12);
487 carl9170_regwrite(0x1c8864, val);
488
489 /* tx/rx attenuation chain 0 (index 8) */
490 val = carl9170_def_val(AR9170_PHY_REG_RXGAIN, is_2ghz, is_40mhz);
491 SET_VAL(AR9170_PHY_RXGAIN_TXRX_ATTEN, val, m->txRxAttenCh[0]);
492 carl9170_regwrite(AR9170_PHY_REG_RXGAIN, val);
493
494 /* tx/rx attenuation chain 2 (index 9) */
495 val = carl9170_def_val(AR9170_PHY_REG_RXGAIN_CHAIN_2,
496 is_2ghz, is_40mhz);
497 SET_VAL(AR9170_PHY_RXGAIN_TXRX_ATTEN, val, m->txRxAttenCh[1]);
498 carl9170_regwrite(AR9170_PHY_REG_RXGAIN_CHAIN_2, val);
499
500 /* tx/rx margin chain 0 (index 10) */
501 val = carl9170_def_val(AR9170_PHY_REG_GAIN_2GHZ, is_2ghz, is_40mhz);
502 SET_VAL(AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN, val, m->rxTxMarginCh[0]);
503 /* bsw margin chain 0 for 5GHz only */
504 if (!is_2ghz)
505 SET_VAL(AR9170_PHY_GAIN_2GHZ_BSW_MARGIN, val, m->bswMargin[0]);
506 carl9170_regwrite(AR9170_PHY_REG_GAIN_2GHZ, val);
507
508 /* tx/rx margin chain 2 (index 11) */
509 val = carl9170_def_val(AR9170_PHY_REG_GAIN_2GHZ_CHAIN_2,
510 is_2ghz, is_40mhz);
511 SET_VAL(AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN, val, m->rxTxMarginCh[1]);
512 carl9170_regwrite(AR9170_PHY_REG_GAIN_2GHZ_CHAIN_2, val);
513
514 /* iqCall, iqCallq chain 0 (index 12) */
515 val = carl9170_def_val(AR9170_PHY_REG_TIMING_CTRL4(0),
516 is_2ghz, is_40mhz);
517 SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, val, m->iqCalICh[0]);
518 SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, val, m->iqCalQCh[0]);
519 carl9170_regwrite(AR9170_PHY_REG_TIMING_CTRL4(0), val);
520
521 /* iqCall, iqCallq chain 2 (index 13) */
522 val = carl9170_def_val(AR9170_PHY_REG_TIMING_CTRL4(2),
523 is_2ghz, is_40mhz);
524 SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, val, m->iqCalICh[1]);
525 SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, val, m->iqCalQCh[1]);
526 carl9170_regwrite(AR9170_PHY_REG_TIMING_CTRL4(2), val);
527
528 /* xpd gain mask (index 14) */
529 val = carl9170_def_val(AR9170_PHY_REG_TPCRG1, is_2ghz, is_40mhz);
530 SET_VAL(AR9170_PHY_TPCRG1_PD_GAIN_1, val,
531 xpd2pd[m->xpdGain & 0xf] & 3);
532 SET_VAL(AR9170_PHY_TPCRG1_PD_GAIN_2, val,
533 xpd2pd[m->xpdGain & 0xf] >> 2);
534 carl9170_regwrite(AR9170_PHY_REG_TPCRG1, val);
535
536 carl9170_regwrite(AR9170_PHY_REG_RX_CHAINMASK, ar->eeprom.rx_mask);
537 carl9170_regwrite(AR9170_PHY_REG_CAL_CHAINMASK, ar->eeprom.rx_mask);
538
539 carl9170_regwrite_finish();
540 return carl9170_regwrite_result();
541}
542
543static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
544{
545 int i, err;
546 u32 val;
547 bool is_2ghz = band == IEEE80211_BAND_2GHZ;
548 bool is_40mhz = conf_is_ht40(&ar->hw->conf);
549
550 carl9170_regwrite_begin(ar);
551
552 for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
553 if (is_40mhz) {
554 if (is_2ghz)
555 val = ar5416_phy_init[i]._2ghz_40;
556 else
557 val = ar5416_phy_init[i]._5ghz_40;
558 } else {
559 if (is_2ghz)
560 val = ar5416_phy_init[i]._2ghz_20;
561 else
562 val = ar5416_phy_init[i]._5ghz_20;
563 }
564
565 carl9170_regwrite(ar5416_phy_init[i].reg, val);
566 }
567
568 carl9170_regwrite_finish();
569 err = carl9170_regwrite_result();
570 if (err)
571 return err;
572
573 err = carl9170_init_phy_from_eeprom(ar, is_2ghz, is_40mhz);
574 if (err)
575 return err;
576
577 err = carl9170_init_power_cal(ar);
578 if (err)
579 return err;
580
581 /* XXX: remove magic! */
582 if (is_2ghz)
583 err = carl9170_write_reg(ar, AR9170_PWR_REG_PLL_ADDAC, 0x5163);
584 else
585 err = carl9170_write_reg(ar, AR9170_PWR_REG_PLL_ADDAC, 0x5143);
586
587 return err;
588}
589
590struct carl9170_rf_initvals {
591 u32 reg, _5ghz, _2ghz;
592};
593
594static struct carl9170_rf_initvals carl9170_rf_initval[] = {
595 /* bank 0 */
596 { 0x1c58b0, 0x1e5795e5, 0x1e5795e5},
597 { 0x1c58e0, 0x02008020, 0x02008020},
598 /* bank 1 */
599 { 0x1c58b0, 0x02108421, 0x02108421},
600 { 0x1c58ec, 0x00000008, 0x00000008},
601 /* bank 2 */
602 { 0x1c58b0, 0x0e73ff17, 0x0e73ff17},
603 { 0x1c58e0, 0x00000420, 0x00000420},
604 /* bank 3 */
605 { 0x1c58f0, 0x01400018, 0x01c00018},
606 /* bank 4 */
607 { 0x1c58b0, 0x000001a1, 0x000001a1},
608 { 0x1c58e8, 0x00000001, 0x00000001},
609 /* bank 5 */
610 { 0x1c58b0, 0x00000013, 0x00000013},
611 { 0x1c58e4, 0x00000002, 0x00000002},
612 /* bank 6 */
613 { 0x1c58b0, 0x00000000, 0x00000000},
614 { 0x1c58b0, 0x00000000, 0x00000000},
615 { 0x1c58b0, 0x00000000, 0x00000000},
616 { 0x1c58b0, 0x00000000, 0x00000000},
617 { 0x1c58b0, 0x00000000, 0x00000000},
618 { 0x1c58b0, 0x00004000, 0x00004000},
619 { 0x1c58b0, 0x00006c00, 0x00006c00},
620 { 0x1c58b0, 0x00002c00, 0x00002c00},
621 { 0x1c58b0, 0x00004800, 0x00004800},
622 { 0x1c58b0, 0x00004000, 0x00004000},
623 { 0x1c58b0, 0x00006000, 0x00006000},
624 { 0x1c58b0, 0x00001000, 0x00001000},
625 { 0x1c58b0, 0x00004000, 0x00004000},
626 { 0x1c58b0, 0x00007c00, 0x00007c00},
627 { 0x1c58b0, 0x00007c00, 0x00007c00},
628 { 0x1c58b0, 0x00007c00, 0x00007c00},
629 { 0x1c58b0, 0x00007c00, 0x00007c00},
630 { 0x1c58b0, 0x00007c00, 0x00007c00},
631 { 0x1c58b0, 0x00087c00, 0x00087c00},
632 { 0x1c58b0, 0x00007c00, 0x00007c00},
633 { 0x1c58b0, 0x00005400, 0x00005400},
634 { 0x1c58b0, 0x00000c00, 0x00000c00},
635 { 0x1c58b0, 0x00001800, 0x00001800},
636 { 0x1c58b0, 0x00007c00, 0x00007c00},
637 { 0x1c58b0, 0x00006c00, 0x00006c00},
638 { 0x1c58b0, 0x00006c00, 0x00006c00},
639 { 0x1c58b0, 0x00007c00, 0x00007c00},
640 { 0x1c58b0, 0x00002c00, 0x00002c00},
641 { 0x1c58b0, 0x00003c00, 0x00003c00},
642 { 0x1c58b0, 0x00003800, 0x00003800},
643 { 0x1c58b0, 0x00001c00, 0x00001c00},
644 { 0x1c58b0, 0x00000800, 0x00000800},
645 { 0x1c58b0, 0x00000408, 0x00000408},
646 { 0x1c58b0, 0x00004c15, 0x00004c15},
647 { 0x1c58b0, 0x00004188, 0x00004188},
648 { 0x1c58b0, 0x0000201e, 0x0000201e},
649 { 0x1c58b0, 0x00010408, 0x00010408},
650 { 0x1c58b0, 0x00000801, 0x00000801},
651 { 0x1c58b0, 0x00000c08, 0x00000c08},
652 { 0x1c58b0, 0x0000181e, 0x0000181e},
653 { 0x1c58b0, 0x00001016, 0x00001016},
654 { 0x1c58b0, 0x00002800, 0x00002800},
655 { 0x1c58b0, 0x00004010, 0x00004010},
656 { 0x1c58b0, 0x0000081c, 0x0000081c},
657 { 0x1c58b0, 0x00000115, 0x00000115},
658 { 0x1c58b0, 0x00000015, 0x00000015},
659 { 0x1c58b0, 0x00000066, 0x00000066},
660 { 0x1c58b0, 0x0000001c, 0x0000001c},
661 { 0x1c58b0, 0x00000000, 0x00000000},
662 { 0x1c58b0, 0x00000004, 0x00000004},
663 { 0x1c58b0, 0x00000015, 0x00000015},
664 { 0x1c58b0, 0x0000001f, 0x0000001f},
665 { 0x1c58e0, 0x00000000, 0x00000400},
666 /* bank 7 */
667 { 0x1c58b0, 0x000000a0, 0x000000a0},
668 { 0x1c58b0, 0x00000000, 0x00000000},
669 { 0x1c58b0, 0x00000040, 0x00000040},
670 { 0x1c58f0, 0x0000001c, 0x0000001c},
671};
672
673static int carl9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz)
674{
675 int err, i;
676
677 carl9170_regwrite_begin(ar);
678
679 for (i = 0; i < ARRAY_SIZE(carl9170_rf_initval); i++)
680 carl9170_regwrite(carl9170_rf_initval[i].reg,
681 band5ghz ? carl9170_rf_initval[i]._5ghz
682 : carl9170_rf_initval[i]._2ghz);
683
684 carl9170_regwrite_finish();
685 err = carl9170_regwrite_result();
686 if (err)
687 wiphy_err(ar->hw->wiphy, "rf init failed\n");
688
689 return err;
690}
691
692struct carl9170_phy_freq_params {
693 u8 coeff_exp;
694 u16 coeff_man;
695 u8 coeff_exp_shgi;
696 u16 coeff_man_shgi;
697};
698
699enum carl9170_bw {
700 CARL9170_BW_20,
701 CARL9170_BW_40_BELOW,
702 CARL9170_BW_40_ABOVE,
703
704 __CARL9170_NUM_BW,
705};
706
707struct carl9170_phy_freq_entry {
708 u16 freq;
709 struct carl9170_phy_freq_params params[__CARL9170_NUM_BW];
710};
711
712/* NB: must be in sync with channel tables in main! */
713static const struct carl9170_phy_freq_entry carl9170_phy_freq_params[] = {
714/*
715 * freq,
716 * 20MHz,
717 * 40MHz (below),
718 * 40Mhz (above),
719 */
720 { 2412, {
721 { 3, 21737, 3, 19563, },
722 { 3, 21827, 3, 19644, },
723 { 3, 21647, 3, 19482, },
724 } },
725 { 2417, {
726 { 3, 21692, 3, 19523, },
727 { 3, 21782, 3, 19604, },
728 { 3, 21602, 3, 19442, },
729 } },
730 { 2422, {
731 { 3, 21647, 3, 19482, },
732 { 3, 21737, 3, 19563, },
733 { 3, 21558, 3, 19402, },
734 } },
735 { 2427, {
736 { 3, 21602, 3, 19442, },
737 { 3, 21692, 3, 19523, },
738 { 3, 21514, 3, 19362, },
739 } },
740 { 2432, {
741 { 3, 21558, 3, 19402, },
742 { 3, 21647, 3, 19482, },
743 { 3, 21470, 3, 19323, },
744 } },
745 { 2437, {
746 { 3, 21514, 3, 19362, },
747 { 3, 21602, 3, 19442, },
748 { 3, 21426, 3, 19283, },
749 } },
750 { 2442, {
751 { 3, 21470, 3, 19323, },
752 { 3, 21558, 3, 19402, },
753 { 3, 21382, 3, 19244, },
754 } },
755 { 2447, {
756 { 3, 21426, 3, 19283, },
757 { 3, 21514, 3, 19362, },
758 { 3, 21339, 3, 19205, },
759 } },
760 { 2452, {
761 { 3, 21382, 3, 19244, },
762 { 3, 21470, 3, 19323, },
763 { 3, 21295, 3, 19166, },
764 } },
765 { 2457, {
766 { 3, 21339, 3, 19205, },
767 { 3, 21426, 3, 19283, },
768 { 3, 21252, 3, 19127, },
769 } },
770 { 2462, {
771 { 3, 21295, 3, 19166, },
772 { 3, 21382, 3, 19244, },
773 { 3, 21209, 3, 19088, },
774 } },
775 { 2467, {
776 { 3, 21252, 3, 19127, },
777 { 3, 21339, 3, 19205, },
778 { 3, 21166, 3, 19050, },
779 } },
780 { 2472, {
781 { 3, 21209, 3, 19088, },
782 { 3, 21295, 3, 19166, },
783 { 3, 21124, 3, 19011, },
784 } },
785 { 2484, {
786 { 3, 21107, 3, 18996, },
787 { 3, 21192, 3, 19073, },
788 { 3, 21022, 3, 18920, },
789 } },
790 { 4920, {
791 { 4, 21313, 4, 19181, },
792 { 4, 21356, 4, 19220, },
793 { 4, 21269, 4, 19142, },
794 } },
795 { 4940, {
796 { 4, 21226, 4, 19104, },
797 { 4, 21269, 4, 19142, },
798 { 4, 21183, 4, 19065, },
799 } },
800 { 4960, {
801 { 4, 21141, 4, 19027, },
802 { 4, 21183, 4, 19065, },
803 { 4, 21098, 4, 18988, },
804 } },
805 { 4980, {
806 { 4, 21056, 4, 18950, },
807 { 4, 21098, 4, 18988, },
808 { 4, 21014, 4, 18912, },
809 } },
810 { 5040, {
811 { 4, 20805, 4, 18725, },
812 { 4, 20846, 4, 18762, },
813 { 4, 20764, 4, 18687, },
814 } },
815 { 5060, {
816 { 4, 20723, 4, 18651, },
817 { 4, 20764, 4, 18687, },
818 { 4, 20682, 4, 18614, },
819 } },
820 { 5080, {
821 { 4, 20641, 4, 18577, },
822 { 4, 20682, 4, 18614, },
823 { 4, 20601, 4, 18541, },
824 } },
825 { 5180, {
826 { 4, 20243, 4, 18219, },
827 { 4, 20282, 4, 18254, },
828 { 4, 20204, 4, 18183, },
829 } },
830 { 5200, {
831 { 4, 20165, 4, 18148, },
832 { 4, 20204, 4, 18183, },
833 { 4, 20126, 4, 18114, },
834 } },
835 { 5220, {
836 { 4, 20088, 4, 18079, },
837 { 4, 20126, 4, 18114, },
838 { 4, 20049, 4, 18044, },
839 } },
840 { 5240, {
841 { 4, 20011, 4, 18010, },
842 { 4, 20049, 4, 18044, },
843 { 4, 19973, 4, 17976, },
844 } },
845 { 5260, {
846 { 4, 19935, 4, 17941, },
847 { 4, 19973, 4, 17976, },
848 { 4, 19897, 4, 17907, },
849 } },
850 { 5280, {
851 { 4, 19859, 4, 17873, },
852 { 4, 19897, 4, 17907, },
853 { 4, 19822, 4, 17840, },
854 } },
855 { 5300, {
856 { 4, 19784, 4, 17806, },
857 { 4, 19822, 4, 17840, },
858 { 4, 19747, 4, 17772, },
859 } },
860 { 5320, {
861 { 4, 19710, 4, 17739, },
862 { 4, 19747, 4, 17772, },
863 { 4, 19673, 4, 17706, },
864 } },
865 { 5500, {
866 { 4, 19065, 4, 17159, },
867 { 4, 19100, 4, 17190, },
868 { 4, 19030, 4, 17127, },
869 } },
870 { 5520, {
871 { 4, 18996, 4, 17096, },
872 { 4, 19030, 4, 17127, },
873 { 4, 18962, 4, 17065, },
874 } },
875 { 5540, {
876 { 4, 18927, 4, 17035, },
877 { 4, 18962, 4, 17065, },
878 { 4, 18893, 4, 17004, },
879 } },
880 { 5560, {
881 { 4, 18859, 4, 16973, },
882 { 4, 18893, 4, 17004, },
883 { 4, 18825, 4, 16943, },
884 } },
885 { 5580, {
886 { 4, 18792, 4, 16913, },
887 { 4, 18825, 4, 16943, },
888 { 4, 18758, 4, 16882, },
889 } },
890 { 5600, {
891 { 4, 18725, 4, 16852, },
892 { 4, 18758, 4, 16882, },
893 { 4, 18691, 4, 16822, },
894 } },
895 { 5620, {
896 { 4, 18658, 4, 16792, },
897 { 4, 18691, 4, 16822, },
898 { 4, 18625, 4, 16762, },
899 } },
900 { 5640, {
901 { 4, 18592, 4, 16733, },
902 { 4, 18625, 4, 16762, },
903 { 4, 18559, 4, 16703, },
904 } },
905 { 5660, {
906 { 4, 18526, 4, 16673, },
907 { 4, 18559, 4, 16703, },
908 { 4, 18493, 4, 16644, },
909 } },
910 { 5680, {
911 { 4, 18461, 4, 16615, },
912 { 4, 18493, 4, 16644, },
913 { 4, 18428, 4, 16586, },
914 } },
915 { 5700, {
916 { 4, 18396, 4, 16556, },
917 { 4, 18428, 4, 16586, },
918 { 4, 18364, 4, 16527, },
919 } },
920 { 5745, {
921 { 4, 18252, 4, 16427, },
922 { 4, 18284, 4, 16455, },
923 { 4, 18220, 4, 16398, },
924 } },
925 { 5765, {
926 { 4, 18189, 5, 32740, },
927 { 4, 18220, 4, 16398, },
928 { 4, 18157, 5, 32683, },
929 } },
930 { 5785, {
931 { 4, 18126, 5, 32626, },
932 { 4, 18157, 5, 32683, },
933 { 4, 18094, 5, 32570, },
934 } },
935 { 5805, {
936 { 4, 18063, 5, 32514, },
937 { 4, 18094, 5, 32570, },
938 { 4, 18032, 5, 32458, },
939 } },
940 { 5825, {
941 { 4, 18001, 5, 32402, },
942 { 4, 18032, 5, 32458, },
943 { 4, 17970, 5, 32347, },
944 } },
945 { 5170, {
946 { 4, 20282, 4, 18254, },
947 { 4, 20321, 4, 18289, },
948 { 4, 20243, 4, 18219, },
949 } },
950 { 5190, {
951 { 4, 20204, 4, 18183, },
952 { 4, 20243, 4, 18219, },
953 { 4, 20165, 4, 18148, },
954 } },
955 { 5210, {
956 { 4, 20126, 4, 18114, },
957 { 4, 20165, 4, 18148, },
958 { 4, 20088, 4, 18079, },
959 } },
960 { 5230, {
961 { 4, 20049, 4, 18044, },
962 { 4, 20088, 4, 18079, },
963 { 4, 20011, 4, 18010, },
964 } },
965};
966
967static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
968 u32 freq, enum carl9170_bw bw)
969{
970 int err;
971 u32 d0, d1, td0, td1, fd0, fd1;
972 u8 chansel;
973 u8 refsel0 = 1, refsel1 = 0;
974 u8 lf_synth = 0;
975
976 switch (bw) {
977 case CARL9170_BW_40_ABOVE:
978 freq += 10;
979 break;
980 case CARL9170_BW_40_BELOW:
981 freq -= 10;
982 break;
983 case CARL9170_BW_20:
984 break;
985 default:
986 BUG();
987 return -ENOSYS;
988 }
989
990 if (band5ghz) {
991 if (freq % 10) {
992 chansel = (freq - 4800) / 5;
993 } else {
994 chansel = ((freq - 4800) / 10) * 2;
995 refsel0 = 0;
996 refsel1 = 1;
997 }
998 chansel = byte_rev_table[chansel];
999 } else {
1000 if (freq == 2484) {
1001 chansel = 10 + (freq - 2274) / 5;
1002 lf_synth = 1;
1003 } else
1004 chansel = 16 + (freq - 2272) / 5;
1005 chansel *= 4;
1006 chansel = byte_rev_table[chansel];
1007 }
1008
1009 d1 = chansel;
1010 d0 = 0x21 |
1011 refsel0 << 3 |
1012 refsel1 << 2 |
1013 lf_synth << 1;
1014 td0 = d0 & 0x1f;
1015 td1 = d1 & 0x1f;
1016 fd0 = td1 << 5 | td0;
1017
1018 td0 = (d0 >> 5) & 0x7;
1019 td1 = (d1 >> 5) & 0x7;
1020 fd1 = td1 << 5 | td0;
1021
1022 carl9170_regwrite_begin(ar);
1023
1024 carl9170_regwrite(0x1c58b0, fd0);
1025 carl9170_regwrite(0x1c58e8, fd1);
1026
1027 carl9170_regwrite_finish();
1028 err = carl9170_regwrite_result();
1029 if (err)
1030 return err;
1031
1032 msleep(20);
1033
1034 return 0;
1035}
1036
1037static const struct carl9170_phy_freq_params *
1038carl9170_get_hw_dyn_params(struct ieee80211_channel *channel,
1039 enum carl9170_bw bw)
1040{
1041 unsigned int chanidx = 0;
1042 u16 freq = 2412;
1043
1044 if (channel) {
1045 chanidx = channel->hw_value;
1046 freq = channel->center_freq;
1047 }
1048
1049 BUG_ON(chanidx >= ARRAY_SIZE(carl9170_phy_freq_params));
1050
1051 BUILD_BUG_ON(__CARL9170_NUM_BW != 3);
1052
1053 WARN_ON(carl9170_phy_freq_params[chanidx].freq != freq);
1054
1055 return &carl9170_phy_freq_params[chanidx].params[bw];
1056}
1057
1058static int carl9170_find_freq_idx(int nfreqs, u8 *freqs, u8 f)
1059{
1060 int idx = nfreqs - 2;
1061
1062 while (idx >= 0) {
1063 if (f >= freqs[idx])
1064 return idx;
1065 idx--;
1066 }
1067
1068 return 0;
1069}
1070
1071static s32 carl9170_interpolate_s32(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
1072{
1073 /* nothing to interpolate, it's horizontal */
1074 if (y2 == y1)
1075 return y1;
1076
1077 /* check if we hit one of the edges */
1078 if (x == x1)
1079 return y1;
1080 if (x == x2)
1081 return y2;
1082
1083 /* x1 == x2 is bad, hopefully == x */
1084 if (x2 == x1)
1085 return y1;
1086
1087 return y1 + (((y2 - y1) * (x - x1)) / (x2 - x1));
1088}
1089
1090static u8 carl9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
1091{
1092#define SHIFT 8
1093 s32 y;
1094
1095 y = carl9170_interpolate_s32(x << SHIFT, x1 << SHIFT,
1096 y1 << SHIFT, x2 << SHIFT, y2 << SHIFT);
1097
1098 /*
1099 * XXX: unwrap this expression
1100 * Isn't it just DIV_ROUND_UP(y, 1<<SHIFT)?
1101 * Can we rely on the compiler to optimise away the div?
1102 */
1103 return (y >> SHIFT) + ((y & (1<<(SHIFT-1))) >> (SHIFT - 1));
1104#undef SHIFT
1105}
1106
1107static u8 carl9170_interpolate_val(u8 x, u8 *x_array, u8 *y_array)
1108{
1109 int i;
1110
1111 for (i = 0; i < 3; i++) {
1112 if (x <= x_array[i + 1])
1113 break;
1114 }
1115
1116 return carl9170_interpolate_u8(x, x_array[i], y_array[i],
1117 x_array[i + 1], y_array[i + 1]);
1118}
1119
1120static int carl9170_set_freq_cal_data(struct ar9170 *ar,
1121 struct ieee80211_channel *channel)
1122{
1123 u8 *cal_freq_pier;
1124 u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
1125 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
1126 int chain, idx, i;
1127 u32 phy_data = 0;
1128 u8 f, tmp;
1129
1130 switch (channel->band) {
1131 case IEEE80211_BAND_2GHZ:
1132 f = channel->center_freq - 2300;
1133 cal_freq_pier = ar->eeprom.cal_freq_pier_2G;
1134 i = AR5416_NUM_2G_CAL_PIERS - 1;
1135 break;
1136
1137 case IEEE80211_BAND_5GHZ:
1138 f = (channel->center_freq - 4800) / 5;
1139 cal_freq_pier = ar->eeprom.cal_freq_pier_5G;
1140 i = AR5416_NUM_5G_CAL_PIERS - 1;
1141 break;
1142
1143 default:
1144 return -EINVAL;
1145 break;
1146 }
1147
1148 for (; i >= 0; i--) {
1149 if (cal_freq_pier[i] != 0xff)
1150 break;
1151 }
1152 if (i < 0)
1153 return -EINVAL;
1154
1155 idx = carl9170_find_freq_idx(i, cal_freq_pier, f);
1156
1157 carl9170_regwrite_begin(ar);
1158
1159 for (chain = 0; chain < AR5416_MAX_CHAINS; chain++) {
1160 for (i = 0; i < AR5416_PD_GAIN_ICEPTS; i++) {
1161 struct ar9170_calibration_data_per_freq *cal_pier_data;
1162 int j;
1163
1164 switch (channel->band) {
1165 case IEEE80211_BAND_2GHZ:
1166 cal_pier_data = &ar->eeprom.
1167 cal_pier_data_2G[chain][idx];
1168 break;
1169
1170 case IEEE80211_BAND_5GHZ:
1171 cal_pier_data = &ar->eeprom.
1172 cal_pier_data_5G[chain][idx];
1173 break;
1174
1175 default:
1176 return -EINVAL;
1177 }
1178
1179 for (j = 0; j < 2; j++) {
1180 vpds[j][i] = carl9170_interpolate_u8(f,
1181 cal_freq_pier[idx],
1182 cal_pier_data->vpd_pdg[j][i],
1183 cal_freq_pier[idx + 1],
1184 cal_pier_data[1].vpd_pdg[j][i]);
1185
1186 pwrs[j][i] = carl9170_interpolate_u8(f,
1187 cal_freq_pier[idx],
1188 cal_pier_data->pwr_pdg[j][i],
1189 cal_freq_pier[idx + 1],
1190 cal_pier_data[1].pwr_pdg[j][i]) / 2;
1191 }
1192 }
1193
1194 for (i = 0; i < 76; i++) {
1195 if (i < 25) {
1196 tmp = carl9170_interpolate_val(i, &pwrs[0][0],
1197 &vpds[0][0]);
1198 } else {
1199 tmp = carl9170_interpolate_val(i - 12,
1200 &pwrs[1][0],
1201 &vpds[1][0]);
1202 }
1203
1204 phy_data |= tmp << ((i & 3) << 3);
1205 if ((i & 3) == 3) {
1206 carl9170_regwrite(0x1c6280 + chain * 0x1000 +
1207 (i & ~3), phy_data);
1208 phy_data = 0;
1209 }
1210 }
1211
1212 for (i = 19; i < 32; i++)
1213 carl9170_regwrite(0x1c6280 + chain * 0x1000 + (i << 2),
1214 0x0);
1215 }
1216
1217 carl9170_regwrite_finish();
1218 return carl9170_regwrite_result();
1219}
1220
1221static u8 carl9170_get_max_edge_power(struct ar9170 *ar,
1222 u32 freq, struct ar9170_calctl_edges edges[])
1223{
1224 int i;
1225 u8 rc = AR5416_MAX_RATE_POWER;
1226 u8 f;
1227 if (freq < 3000)
1228 f = freq - 2300;
1229 else
1230 f = (freq - 4800) / 5;
1231
1232 for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
1233 if (edges[i].channel == 0xff)
1234 break;
1235 if (f == edges[i].channel) {
1236 /* exact freq match */
1237 rc = edges[i].power_flags & ~AR9170_CALCTL_EDGE_FLAGS;
1238 break;
1239 }
1240 if (i > 0 && f < edges[i].channel) {
1241 if (f > edges[i - 1].channel &&
1242 edges[i - 1].power_flags &
1243 AR9170_CALCTL_EDGE_FLAGS) {
1244 /* lower channel has the inband flag set */
1245 rc = edges[i - 1].power_flags &
1246 ~AR9170_CALCTL_EDGE_FLAGS;
1247 }
1248 break;
1249 }
1250 }
1251
1252 if (i == AR5416_NUM_BAND_EDGES) {
1253 if (f > edges[i - 1].channel &&
1254 edges[i - 1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
1255 /* lower channel has the inband flag set */
1256 rc = edges[i - 1].power_flags &
1257 ~AR9170_CALCTL_EDGE_FLAGS;
1258 }
1259 }
1260 return rc;
1261}
1262
1263static u8 carl9170_get_heavy_clip(struct ar9170 *ar, u32 freq,
1264 enum carl9170_bw bw, struct ar9170_calctl_edges edges[])
1265{
1266 u8 f;
1267 int i;
1268 u8 rc = 0;
1269
1270 if (freq < 3000)
1271 f = freq - 2300;
1272 else
1273 f = (freq - 4800) / 5;
1274
1275 if (bw == CARL9170_BW_40_BELOW || bw == CARL9170_BW_40_ABOVE)
1276 rc |= 0xf0;
1277
1278 for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
1279 if (edges[i].channel == 0xff)
1280 break;
1281 if (f == edges[i].channel) {
1282 if (!(edges[i].power_flags & AR9170_CALCTL_EDGE_FLAGS))
1283 rc |= 0x0f;
1284 break;
1285 }
1286 }
1287
1288 return rc;
1289}
1290
1291/*
1292 * calculate the conformance test limits and the heavy clip parameter
1293 * and apply them to ar->power* (derived from otus hal/hpmain.c, line 3706)
1294 */
1295static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
1296{
1297 u8 ctl_grp; /* CTL group */
1298 u8 ctl_idx; /* CTL index */
1299 int i, j;
1300 struct ctl_modes {
1301 u8 ctl_mode;
1302 u8 max_power;
1303 u8 *pwr_cal_data;
1304 int pwr_cal_len;
1305 } *modes;
1306
1307 /*
1308 * order is relevant in the mode_list_*: we fall back to the
1309 * lower indices if any mode is missed in the EEPROM.
1310 */
1311 struct ctl_modes mode_list_2ghz[] = {
1312 { CTL_11B, 0, ar->power_2G_cck, 4 },
1313 { CTL_11G, 0, ar->power_2G_ofdm, 4 },
1314 { CTL_2GHT20, 0, ar->power_2G_ht20, 8 },
1315 { CTL_2GHT40, 0, ar->power_2G_ht40, 8 },
1316 };
1317 struct ctl_modes mode_list_5ghz[] = {
1318 { CTL_11A, 0, ar->power_5G_leg, 4 },
1319 { CTL_5GHT20, 0, ar->power_5G_ht20, 8 },
1320 { CTL_5GHT40, 0, ar->power_5G_ht40, 8 },
1321 };
1322 int nr_modes;
1323
1324#define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n])
1325
1326 ar->heavy_clip = 0;
1327
1328 /*
1329 * TODO: investigate the differences between OTUS'
1330 * hpreg.c::zfHpGetRegulatoryDomain() and
1331 * ath/regd.c::ath_regd_get_band_ctl() -
1332 * e.g. for FCC3_WORLD the OTUS procedure
1333 * always returns CTL_FCC, while the one in ath/ delivers
1334 * CTL_ETSI for 2GHz and CTL_FCC for 5GHz.
1335 */
1336 ctl_grp = ath_regd_get_band_ctl(&ar->common.regulatory,
1337 ar->hw->conf.channel->band);
1338
1339 /* ctl group not found - either invalid band (NO_CTL) or ww roaming */
1340 if (ctl_grp == NO_CTL || ctl_grp == SD_NO_CTL)
1341 ctl_grp = CTL_FCC;
1342
1343 if (ctl_grp != CTL_FCC)
1344 /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
1345 return;
1346
1347 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
1348 modes = mode_list_2ghz;
1349 nr_modes = ARRAY_SIZE(mode_list_2ghz);
1350 } else {
1351 modes = mode_list_5ghz;
1352 nr_modes = ARRAY_SIZE(mode_list_5ghz);
1353 }
1354
1355 for (i = 0; i < nr_modes; i++) {
1356 u8 c = ctl_grp | modes[i].ctl_mode;
1357 for (ctl_idx = 0; ctl_idx < AR5416_NUM_CTLS; ctl_idx++)
1358 if (c == ar->eeprom.ctl_index[ctl_idx])
1359 break;
1360 if (ctl_idx < AR5416_NUM_CTLS) {
1361 int f_off = 0;
1362
1363 /*
1364 * determine heavy clip parameter
1365 * from the 11G edges array
1366 */
1367 if (modes[i].ctl_mode == CTL_11G) {
1368 ar->heavy_clip =
1369 carl9170_get_heavy_clip(ar,
1370 freq, bw, EDGES(ctl_idx, 1));
1371 }
1372
1373 /* adjust freq for 40MHz */
1374 if (modes[i].ctl_mode == CTL_2GHT40 ||
1375 modes[i].ctl_mode == CTL_5GHT40) {
1376 if (bw == CARL9170_BW_40_BELOW)
1377 f_off = -10;
1378 else
1379 f_off = 10;
1380 }
1381
1382 modes[i].max_power =
1383 carl9170_get_max_edge_power(ar,
1384 freq+f_off, EDGES(ctl_idx, 1));
1385
1386 /*
1387 * TODO: check if the regulatory max. power is
1388 * controlled by cfg80211 for DFS.
1389 * (hpmain applies it to max_power itself for DFS freq)
1390 */
1391
1392 } else {
1393 /*
1394 * Workaround in otus driver, hpmain.c, line 3906:
1395 * if no data for 5GHT20 are found, take the
1396 * legacy 5G value. We extend this here to fallback
1397 * from any other HT* or 11G, too.
1398 */
1399 int k = i;
1400
1401 modes[i].max_power = AR5416_MAX_RATE_POWER;
1402 while (k-- > 0) {
1403 if (modes[k].max_power !=
1404 AR5416_MAX_RATE_POWER) {
1405 modes[i].max_power = modes[k].max_power;
1406 break;
1407 }
1408 }
1409 }
1410
1411 /* apply max power to pwr_cal_data (ar->power_*) */
1412 for (j = 0; j < modes[i].pwr_cal_len; j++) {
1413 modes[i].pwr_cal_data[j] = min(modes[i].pwr_cal_data[j],
1414 modes[i].max_power);
1415 }
1416 }
1417
1418 if (ar->heavy_clip & 0xf0) {
1419 ar->power_2G_ht40[0]--;
1420 ar->power_2G_ht40[1]--;
1421 ar->power_2G_ht40[2]--;
1422 }
1423 if (ar->heavy_clip & 0xf) {
1424 ar->power_2G_ht20[0]++;
1425 ar->power_2G_ht20[1]++;
1426 ar->power_2G_ht20[2]++;
1427 }
1428
1429#undef EDGES
1430}
1431
1432static int carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
1433 enum carl9170_bw bw)
1434{
1435 struct ar9170_calibration_target_power_legacy *ctpl;
1436 struct ar9170_calibration_target_power_ht *ctph;
1437 u8 *ctpres;
1438 int ntargets;
1439 int idx, i, n;
1440 u8 ackpower, ackchains, f;
1441 u8 pwr_freqs[AR5416_MAX_NUM_TGT_PWRS];
1442
1443 if (freq < 3000)
1444 f = freq - 2300;
1445 else
1446 f = (freq - 4800)/5;
1447
1448 /*
1449 * cycle through the various modes
1450 *
1451 * legacy modes first: 5G, 2G CCK, 2G OFDM
1452 */
1453 for (i = 0; i < 3; i++) {
1454 switch (i) {
1455 case 0: /* 5 GHz legacy */
1456 ctpl = &ar->eeprom.cal_tgt_pwr_5G[0];
1457 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1458 ctpres = ar->power_5G_leg;
1459 break;
1460 case 1: /* 2.4 GHz CCK */
1461 ctpl = &ar->eeprom.cal_tgt_pwr_2G_cck[0];
1462 ntargets = AR5416_NUM_2G_CCK_TARGET_PWRS;
1463 ctpres = ar->power_2G_cck;
1464 break;
1465 case 2: /* 2.4 GHz OFDM */
1466 ctpl = &ar->eeprom.cal_tgt_pwr_2G_ofdm[0];
1467 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1468 ctpres = ar->power_2G_ofdm;
1469 break;
1470 default:
1471 BUG();
1472 }
1473
1474 for (n = 0; n < ntargets; n++) {
1475 if (ctpl[n].freq == 0xff)
1476 break;
1477 pwr_freqs[n] = ctpl[n].freq;
1478 }
1479 ntargets = n;
1480 idx = carl9170_find_freq_idx(ntargets, pwr_freqs, f);
1481 for (n = 0; n < 4; n++)
1482 ctpres[n] = carl9170_interpolate_u8(f,
1483 ctpl[idx + 0].freq, ctpl[idx + 0].power[n],
1484 ctpl[idx + 1].freq, ctpl[idx + 1].power[n]);
1485 }
1486
1487 /* HT modes now: 5G HT20, 5G HT40, 2G CCK, 2G OFDM, 2G HT20, 2G HT40 */
1488 for (i = 0; i < 4; i++) {
1489 switch (i) {
1490 case 0: /* 5 GHz HT 20 */
1491 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht20[0];
1492 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1493 ctpres = ar->power_5G_ht20;
1494 break;
1495 case 1: /* 5 GHz HT 40 */
1496 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht40[0];
1497 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1498 ctpres = ar->power_5G_ht40;
1499 break;
1500 case 2: /* 2.4 GHz HT 20 */
1501 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht20[0];
1502 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1503 ctpres = ar->power_2G_ht20;
1504 break;
1505 case 3: /* 2.4 GHz HT 40 */
1506 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht40[0];
1507 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1508 ctpres = ar->power_2G_ht40;
1509 break;
1510 default:
1511 BUG();
1512 }
1513
1514 for (n = 0; n < ntargets; n++) {
1515 if (ctph[n].freq == 0xff)
1516 break;
1517 pwr_freqs[n] = ctph[n].freq;
1518 }
1519 ntargets = n;
1520 idx = carl9170_find_freq_idx(ntargets, pwr_freqs, f);
1521 for (n = 0; n < 8; n++)
1522 ctpres[n] = carl9170_interpolate_u8(f,
1523 ctph[idx + 0].freq, ctph[idx + 0].power[n],
1524 ctph[idx + 1].freq, ctph[idx + 1].power[n]);
1525 }
1526
1527 /* calc. conformance test limits and apply to ar->power*[] */
1528 carl9170_calc_ctl(ar, freq, bw);
1529
1530 /* set ACK/CTS TX power */
1531 carl9170_regwrite_begin(ar);
1532
1533 if (ar->eeprom.tx_mask != 1)
1534 ackchains = AR9170_TX_PHY_TXCHAIN_2;
1535 else
1536 ackchains = AR9170_TX_PHY_TXCHAIN_1;
1537
1538 if (freq < 3000)
1539 ackpower = ar->power_2G_ofdm[0] & 0x3f;
1540 else
1541 ackpower = ar->power_5G_leg[0] & 0x3f;
1542
1543 carl9170_regwrite(AR9170_MAC_REG_ACK_TPC,
1544 0x3c1e | ackpower << 20 | ackchains << 26);
1545 carl9170_regwrite(AR9170_MAC_REG_RTS_CTS_TPC,
1546 ackpower << 5 | ackchains << 11 |
1547 ackpower << 21 | ackchains << 27);
1548
1549 carl9170_regwrite(AR9170_MAC_REG_CFEND_QOSNULL_TPC,
1550 ackpower << 5 | ackchains << 11 |
1551 ackpower << 21 | ackchains << 27);
1552
1553 carl9170_regwrite_finish();
1554 return carl9170_regwrite_result();
1555}
1556
1557/* TODO: replace this with sign_extend32(noise, 8) */
1558static int carl9170_calc_noise_dbm(u32 raw_noise)
1559{
1560 if (raw_noise & 0x100)
1561 return ~0x1ff | raw_noise;
1562 else
1563 return raw_noise;
1564}
1565
1566int carl9170_get_noisefloor(struct ar9170 *ar)
1567{
1568 static const u32 phy_regs[] = {
1569 AR9170_PHY_REG_CCA, AR9170_PHY_REG_CH2_CCA,
1570 AR9170_PHY_REG_EXT_CCA, AR9170_PHY_REG_CH2_EXT_CCA };
1571 u32 phy_res[ARRAY_SIZE(phy_regs)];
1572 int err, i;
1573
1574 BUILD_BUG_ON(ARRAY_SIZE(phy_regs) != ARRAY_SIZE(ar->noise));
1575
1576 err = carl9170_read_mreg(ar, ARRAY_SIZE(phy_regs), phy_regs, phy_res);
1577 if (err)
1578 return err;
1579
1580 for (i = 0; i < 2; i++) {
1581 ar->noise[i] = carl9170_calc_noise_dbm(
1582 (phy_res[i] >> 19) & 0x1ff);
1583
1584 ar->noise[i + 2] = carl9170_calc_noise_dbm(
1585 (phy_res[i + 2] >> 23) & 0x1ff);
1586 }
1587
1588 return 0;
1589}
1590
1591static enum carl9170_bw nl80211_to_carl(enum nl80211_channel_type type)
1592{
1593 switch (type) {
1594 case NL80211_CHAN_NO_HT:
1595 case NL80211_CHAN_HT20:
1596 return CARL9170_BW_20;
1597 case NL80211_CHAN_HT40MINUS:
1598 return CARL9170_BW_40_BELOW;
1599 case NL80211_CHAN_HT40PLUS:
1600 return CARL9170_BW_40_ABOVE;
1601 default:
1602 BUG();
1603 }
1604}
1605
1606int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1607 enum nl80211_channel_type _bw,
1608 enum carl9170_rf_init_mode rfi)
1609{
1610 const struct carl9170_phy_freq_params *freqpar;
1611 struct carl9170_rf_init_result rf_res;
1612 struct carl9170_rf_init rf;
1613 u32 cmd, tmp, offs = 0, new_ht = 0;
1614 int err;
1615 enum carl9170_bw bw;
1616 bool warm_reset;
1617 struct ieee80211_channel *old_channel = NULL;
1618
1619 bw = nl80211_to_carl(_bw);
1620
1621 if (conf_is_ht(&ar->hw->conf))
1622 new_ht |= CARL9170FW_PHY_HT_ENABLE;
1623
1624 if (conf_is_ht40(&ar->hw->conf))
1625 new_ht |= CARL9170FW_PHY_HT_DYN2040;
1626
1627 /* may be NULL at first setup */
1628 if (ar->channel) {
1629 old_channel = ar->channel;
1630 warm_reset = (old_channel->band != channel->band) ||
1631 (old_channel->center_freq ==
1632 channel->center_freq) ||
1633 (ar->ht_settings != new_ht);
1634
1635 ar->channel = NULL;
1636 } else {
1637 warm_reset = true;
1638 }
1639
1640 /* HW workaround */
1641 if (!ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] &&
1642 channel->center_freq <= 2417)
1643 warm_reset = true;
1644
1645 if (rfi != CARL9170_RFI_NONE || warm_reset) {
1646 u32 val;
1647
1648 if (rfi == CARL9170_RFI_COLD)
1649 val = AR9170_PWR_RESET_BB_COLD_RESET;
1650 else
1651 val = AR9170_PWR_RESET_BB_WARM_RESET;
1652
1653 /* warm/cold reset BB/ADDA */
1654 err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET, val);
1655 if (err)
1656 return err;
1657
1658 err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET, 0x0);
1659 if (err)
1660 return err;
1661
1662 err = carl9170_init_phy(ar, channel->band);
1663 if (err)
1664 return err;
1665
1666 err = carl9170_init_rf_banks_0_7(ar,
1667 channel->band == IEEE80211_BAND_5GHZ);
1668 if (err)
1669 return err;
1670
1671 cmd = CARL9170_CMD_RF_INIT;
1672
1673 msleep(100);
1674
1675 err = carl9170_echo_test(ar, 0xaabbccdd);
1676 if (err)
1677 return err;
1678 } else {
1679 cmd = CARL9170_CMD_FREQUENCY;
1680 }
1681
1682 err = carl9170_exec_cmd(ar, CARL9170_CMD_FREQ_START, 0, NULL, 0, NULL);
1683 if (err)
1684 return err;
1685
1686 err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE,
1687 0x200);
1688
1689 err = carl9170_init_rf_bank4_pwr(ar,
1690 channel->band == IEEE80211_BAND_5GHZ,
1691 channel->center_freq, bw);
1692 if (err)
1693 return err;
1694
1695 tmp = AR9170_PHY_TURBO_FC_SINGLE_HT_LTF1 |
1696 AR9170_PHY_TURBO_FC_HT_EN;
1697
1698 switch (bw) {
1699 case CARL9170_BW_20:
1700 break;
1701 case CARL9170_BW_40_BELOW:
1702 tmp |= AR9170_PHY_TURBO_FC_DYN2040_EN |
1703 AR9170_PHY_TURBO_FC_SHORT_GI_40;
1704 offs = 3;
1705 break;
1706 case CARL9170_BW_40_ABOVE:
1707 tmp |= AR9170_PHY_TURBO_FC_DYN2040_EN |
1708 AR9170_PHY_TURBO_FC_SHORT_GI_40 |
1709 AR9170_PHY_TURBO_FC_DYN2040_PRI_CH;
1710 offs = 1;
1711 break;
1712 default:
1713 BUG();
1714 return -ENOSYS;
1715 }
1716
1717 if (ar->eeprom.tx_mask != 1)
1718 tmp |= AR9170_PHY_TURBO_FC_WALSH;
1719
1720 err = carl9170_write_reg(ar, AR9170_PHY_REG_TURBO, tmp);
1721 if (err)
1722 return err;
1723
1724 err = carl9170_set_freq_cal_data(ar, channel);
1725 if (err)
1726 return err;
1727
1728 err = carl9170_set_power_cal(ar, channel->center_freq, bw);
1729 if (err)
1730 return err;
1731
1732 freqpar = carl9170_get_hw_dyn_params(channel, bw);
1733
1734 rf.ht_settings = new_ht;
1735 if (conf_is_ht40(&ar->hw->conf))
1736 SET_VAL(CARL9170FW_PHY_HT_EXT_CHAN_OFF, rf.ht_settings, offs);
1737
1738 rf.freq = cpu_to_le32(channel->center_freq * 1000);
1739 rf.delta_slope_coeff_exp = cpu_to_le32(freqpar->coeff_exp);
1740 rf.delta_slope_coeff_man = cpu_to_le32(freqpar->coeff_man);
1741 rf.delta_slope_coeff_exp_shgi = cpu_to_le32(freqpar->coeff_exp_shgi);
1742 rf.delta_slope_coeff_man_shgi = cpu_to_le32(freqpar->coeff_man_shgi);
1743
1744 if (rfi != CARL9170_RFI_NONE)
1745 rf.finiteLoopCount = cpu_to_le32(2000);
1746 else
1747 rf.finiteLoopCount = cpu_to_le32(1000);
1748
1749 err = carl9170_exec_cmd(ar, cmd, sizeof(rf), &rf,
1750 sizeof(rf_res), &rf_res);
1751 if (err)
1752 return err;
1753
1754 err = le32_to_cpu(rf_res.ret);
1755 if (err != 0) {
1756 ar->chan_fail++;
1757 ar->total_chan_fail++;
1758
1759 wiphy_err(ar->hw->wiphy, "channel change: %d -> %d "
1760 "failed (%d).\n", old_channel ?
1761 old_channel->center_freq : -1, channel->center_freq,
1762 err);
1763
1764 if ((rfi == CARL9170_RFI_COLD) || (ar->chan_fail > 3)) {
1765 /*
1766 * We have tried very hard to change to _another_
1767 * channel and we've failed to do so!
1768 * Chances are that the PHY/RF is no longer
1769 * operable (due to corruptions/fatal events/bugs?)
1770 * and we need to reset at a higher level.
1771 */
1772 carl9170_restart(ar, CARL9170_RR_TOO_MANY_PHY_ERRORS);
1773 return 0;
1774 }
1775
1776 err = carl9170_set_channel(ar, channel, _bw,
1777 CARL9170_RFI_COLD);
1778 if (err)
1779 return err;
1780 } else {
1781 ar->chan_fail = 0;
1782 }
1783
1784 err = carl9170_get_noisefloor(ar);
1785 if (err)
1786 return err;
1787
1788 if (ar->heavy_clip) {
1789 err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE,
1790 0x200 | ar->heavy_clip);
1791 if (err) {
1792 if (net_ratelimit()) {
1793 wiphy_err(ar->hw->wiphy, "failed to set "
1794 "heavy clip\n");
1795 }
1796
1797 return err;
1798 }
1799 }
1800
1801 /* FIXME: PSM does not work in 5GHz Band */
1802 if (channel->band == IEEE80211_BAND_5GHZ)
1803 ar->ps.off_override |= PS_OFF_5GHZ;
1804 else
1805 ar->ps.off_override &= ~PS_OFF_5GHZ;
1806
1807 ar->channel = channel;
1808 ar->ht_settings = new_ht;
1809 return 0;
1810}
diff --git a/drivers/net/wireless/ath/carl9170/phy.h b/drivers/net/wireless/ath/carl9170/phy.h
new file mode 100644
index 000000000000..53c18d34ffcc
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/phy.h
@@ -0,0 +1,567 @@
1/*
2 * Shared Atheros AR9170 Header
3 *
4 * PHY register map
5 *
6 * Copyright (c) 2008-2009 Atheros Communications Inc.
7 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#ifndef __CARL9170_SHARED_PHY_H
22#define __CARL9170_SHARED_PHY_H
23
24#define AR9170_PHY_REG_BASE (0x1bc000 + 0x9800)
25#define AR9170_PHY_REG(_n) (AR9170_PHY_REG_BASE + \
26 ((_n) << 2))
27
28#define AR9170_PHY_REG_TEST (AR9170_PHY_REG_BASE + 0x0000)
29#define AR9170_PHY_TEST_AGC_CLR 0x10000000
30#define AR9170_PHY_TEST_RFSILENT_BB 0x00002000
31
32#define AR9170_PHY_REG_TURBO (AR9170_PHY_REG_BASE + 0x0004)
33#define AR9170_PHY_TURBO_FC_TURBO_MODE 0x00000001
34#define AR9170_PHY_TURBO_FC_TURBO_SHORT 0x00000002
35#define AR9170_PHY_TURBO_FC_DYN2040_EN 0x00000004
36#define AR9170_PHY_TURBO_FC_DYN2040_PRI_ONLY 0x00000008
37#define AR9170_PHY_TURBO_FC_DYN2040_PRI_CH 0x00000010
38/* For 25 MHz channel spacing -- not used but supported by hw */
39#define AR9170_PHY_TURBO_FC_DYN2040_EXT_CH 0x00000020
40#define AR9170_PHY_TURBO_FC_HT_EN 0x00000040
41#define AR9170_PHY_TURBO_FC_SHORT_GI_40 0x00000080
42#define AR9170_PHY_TURBO_FC_WALSH 0x00000100
43#define AR9170_PHY_TURBO_FC_SINGLE_HT_LTF1 0x00000200
44#define AR9170_PHY_TURBO_FC_ENABLE_DAC_FIFO 0x00000800
45
46#define AR9170_PHY_REG_TEST2 (AR9170_PHY_REG_BASE + 0x0008)
47
48#define AR9170_PHY_REG_TIMING2 (AR9170_PHY_REG_BASE + 0x0010)
49#define AR9170_PHY_TIMING2_USE_FORCE 0x00001000
50#define AR9170_PHY_TIMING2_FORCE 0x00000fff
51#define AR9170_PHY_TIMING2_FORCE_S 0
52
53#define AR9170_PHY_REG_TIMING3 (AR9170_PHY_REG_BASE + 0x0014)
54#define AR9170_PHY_TIMING3_DSC_EXP 0x0001e000
55#define AR9170_PHY_TIMING3_DSC_EXP_S 13
56#define AR9170_PHY_TIMING3_DSC_MAN 0xfffe0000
57#define AR9170_PHY_TIMING3_DSC_MAN_S 17
58
59#define AR9170_PHY_REG_CHIP_ID (AR9170_PHY_REG_BASE + 0x0018)
60#define AR9170_PHY_CHIP_ID_REV_0 0x80
61#define AR9170_PHY_CHIP_ID_REV_1 0x81
62#define AR9170_PHY_CHIP_ID_9160_REV_0 0xb0
63
64#define AR9170_PHY_REG_ACTIVE (AR9170_PHY_REG_BASE + 0x001c)
65#define AR9170_PHY_ACTIVE_EN 0x00000001
66#define AR9170_PHY_ACTIVE_DIS 0x00000000
67
68#define AR9170_PHY_REG_RF_CTL2 (AR9170_PHY_REG_BASE + 0x0024)
69#define AR9170_PHY_RF_CTL2_TX_END_DATA_START 0x000000ff
70#define AR9170_PHY_RF_CTL2_TX_END_DATA_START_S 0
71#define AR9170_PHY_RF_CTL2_TX_END_PA_ON 0x0000ff00
72#define AR9170_PHY_RF_CTL2_TX_END_PA_ON_S 8
73
74#define AR9170_PHY_REG_RF_CTL3 (AR9170_PHY_REG_BASE + 0x0028)
75#define AR9170_PHY_RF_CTL3_TX_END_TO_A2_RX_ON 0x00ff0000
76#define AR9170_PHY_RF_CTL3_TX_END_TO_A2_RX_ON_S 16
77
78#define AR9170_PHY_REG_ADC_CTL (AR9170_PHY_REG_BASE + 0x002c)
79#define AR9170_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
80#define AR9170_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
81#define AR9170_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
82#define AR9170_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
83#define AR9170_PHY_ADC_CTL_OFF_PWDADC 0x00008000
84#define AR9170_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
85#define AR9170_PHY_ADC_CTL_ON_INBUFGAIN_S 16
86
87#define AR9170_PHY_REG_ADC_SERIAL_CTL (AR9170_PHY_REG_BASE + 0x0030)
88#define AR9170_PHY_ADC_SCTL_SEL_INTERNAL_ADDAC 0x00000000
89#define AR9170_PHY_ADC_SCTL_SEL_EXTERNAL_RADIO 0x00000001
90
91#define AR9170_PHY_REG_RF_CTL4 (AR9170_PHY_REG_BASE + 0x0034)
92#define AR9170_PHY_RF_CTL4_TX_END_XPAB_OFF 0xff000000
93#define AR9170_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
94#define AR9170_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00ff0000
95#define AR9170_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
96#define AR9170_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000ff00
97#define AR9170_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
98#define AR9170_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000ff
99#define AR9170_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
100
101#define AR9170_PHY_REG_TSTDAC_CONST (AR9170_PHY_REG_BASE + 0x003c)
102
103#define AR9170_PHY_REG_SETTLING (AR9170_PHY_REG_BASE + 0x0044)
104#define AR9170_PHY_SETTLING_SWITCH 0x00003f80
105#define AR9170_PHY_SETTLING_SWITCH_S 7
106
107#define AR9170_PHY_REG_RXGAIN (AR9170_PHY_REG_BASE + 0x0048)
108#define AR9170_PHY_REG_RXGAIN_CHAIN_2 (AR9170_PHY_REG_BASE + 0x2048)
109#define AR9170_PHY_RXGAIN_TXRX_ATTEN 0x0003f000
110#define AR9170_PHY_RXGAIN_TXRX_ATTEN_S 12
111#define AR9170_PHY_RXGAIN_TXRX_RF_MAX 0x007c0000
112#define AR9170_PHY_RXGAIN_TXRX_RF_MAX_S 18
113
114#define AR9170_PHY_REG_DESIRED_SZ (AR9170_PHY_REG_BASE + 0x0050)
115#define AR9170_PHY_DESIRED_SZ_ADC 0x000000ff
116#define AR9170_PHY_DESIRED_SZ_ADC_S 0
117#define AR9170_PHY_DESIRED_SZ_PGA 0x0000ff00
118#define AR9170_PHY_DESIRED_SZ_PGA_S 8
119#define AR9170_PHY_DESIRED_SZ_TOT_DES 0x0ff00000
120#define AR9170_PHY_DESIRED_SZ_TOT_DES_S 20
121
122#define AR9170_PHY_REG_FIND_SIG (AR9170_PHY_REG_BASE + 0x0058)
123#define AR9170_PHY_FIND_SIG_FIRSTEP 0x0003f000
124#define AR9170_PHY_FIND_SIG_FIRSTEP_S 12
125#define AR9170_PHY_FIND_SIG_FIRPWR 0x03fc0000
126#define AR9170_PHY_FIND_SIG_FIRPWR_S 18
127
128#define AR9170_PHY_REG_AGC_CTL1 (AR9170_PHY_REG_BASE + 0x005c)
129#define AR9170_PHY_AGC_CTL1_COARSE_LOW 0x00007f80
130#define AR9170_PHY_AGC_CTL1_COARSE_LOW_S 7
131#define AR9170_PHY_AGC_CTL1_COARSE_HIGH 0x003f8000
132#define AR9170_PHY_AGC_CTL1_COARSE_HIGH_S 15
133
134#define AR9170_PHY_REG_AGC_CONTROL (AR9170_PHY_REG_BASE + 0x0060)
135#define AR9170_PHY_AGC_CONTROL_CAL 0x00000001
136#define AR9170_PHY_AGC_CONTROL_NF 0x00000002
137#define AR9170_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
138#define AR9170_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
139#define AR9170_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
140
141#define AR9170_PHY_REG_CCA (AR9170_PHY_REG_BASE + 0x0064)
142#define AR9170_PHY_CCA_MINCCA_PWR 0x0ff80000
143#define AR9170_PHY_CCA_MINCCA_PWR_S 19
144#define AR9170_PHY_CCA_THRESH62 0x0007f000
145#define AR9170_PHY_CCA_THRESH62_S 12
146
147#define AR9170_PHY_REG_SFCORR (AR9170_PHY_REG_BASE + 0x0068)
148#define AR9170_PHY_SFCORR_M2COUNT_THR 0x0000001f
149#define AR9170_PHY_SFCORR_M2COUNT_THR_S 0
150#define AR9170_PHY_SFCORR_M1_THRESH 0x00fe0000
151#define AR9170_PHY_SFCORR_M1_THRESH_S 17
152#define AR9170_PHY_SFCORR_M2_THRESH 0x7f000000
153#define AR9170_PHY_SFCORR_M2_THRESH_S 24
154
155#define AR9170_PHY_REG_SFCORR_LOW (AR9170_PHY_REG_BASE + 0x006c)
156#define AR9170_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
157#define AR9170_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003f00
158#define AR9170_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
159#define AR9170_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001fc000
160#define AR9170_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
161#define AR9170_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0fe00000
162#define AR9170_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
163
164#define AR9170_PHY_REG_SLEEP_CTR_CONTROL (AR9170_PHY_REG_BASE + 0x0070)
165#define AR9170_PHY_REG_SLEEP_CTR_LIMIT (AR9170_PHY_REG_BASE + 0x0074)
166#define AR9170_PHY_REG_SLEEP_SCAL (AR9170_PHY_REG_BASE + 0x0078)
167
168#define AR9170_PHY_REG_PLL_CTL (AR9170_PHY_REG_BASE + 0x007c)
169#define AR9170_PHY_PLL_CTL_40 0xaa
170#define AR9170_PHY_PLL_CTL_40_5413 0x04
171#define AR9170_PHY_PLL_CTL_44 0xab
172#define AR9170_PHY_PLL_CTL_44_2133 0xeb
173#define AR9170_PHY_PLL_CTL_40_2133 0xea
174
175#define AR9170_PHY_REG_BIN_MASK_1 (AR9170_PHY_REG_BASE + 0x0100)
176#define AR9170_PHY_REG_BIN_MASK_2 (AR9170_PHY_REG_BASE + 0x0104)
177#define AR9170_PHY_REG_BIN_MASK_3 (AR9170_PHY_REG_BASE + 0x0108)
178#define AR9170_PHY_REG_MASK_CTL (AR9170_PHY_REG_BASE + 0x010c)
179
180/* analogue power on time (100ns) */
181#define AR9170_PHY_REG_RX_DELAY (AR9170_PHY_REG_BASE + 0x0114)
182#define AR9170_PHY_REG_SEARCH_START_DELAY (AR9170_PHY_REG_BASE + 0x0118)
183#define AR9170_PHY_RX_DELAY_DELAY 0x00003fff
184
185#define AR9170_PHY_REG_TIMING_CTRL4(_i) (AR9170_PHY_REG_BASE + \
186 (0x0120 + ((_i) << 12)))
187#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01f
188#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
189#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7e0
190#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
191#define AR9170_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
192#define AR9170_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xf000
193#define AR9170_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
194#define AR9170_PHY_TIMING_CTRL4_DO_IQCAL 0x10000
195#define AR9170_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
196#define AR9170_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
197#define AR9170_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
198#define AR9170_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
199
200#define AR9170_PHY_REG_TIMING5 (AR9170_PHY_REG_BASE + 0x0124)
201#define AR9170_PHY_TIMING5_CYCPWR_THR1 0x000000fe
202#define AR9170_PHY_TIMING5_CYCPWR_THR1_S 1
203
204#define AR9170_PHY_REG_POWER_TX_RATE1 (AR9170_PHY_REG_BASE + 0x0134)
205#define AR9170_PHY_REG_POWER_TX_RATE2 (AR9170_PHY_REG_BASE + 0x0138)
206#define AR9170_PHY_REG_POWER_TX_RATE_MAX (AR9170_PHY_REG_BASE + 0x013c)
207#define AR9170_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
208
209#define AR9170_PHY_REG_FRAME_CTL (AR9170_PHY_REG_BASE + 0x0144)
210#define AR9170_PHY_FRAME_CTL_TX_CLIP 0x00000038
211#define AR9170_PHY_FRAME_CTL_TX_CLIP_S 3
212
213#define AR9170_PHY_REG_SPUR_REG (AR9170_PHY_REG_BASE + 0x014c)
214#define AR9170_PHY_SPUR_REG_MASK_RATE_CNTL (0xff << 18)
215#define AR9170_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
216#define AR9170_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
217#define AR9170_PHY_SPUR_REG_MASK_RATE_SELECT (0xff << 9)
218#define AR9170_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
219#define AR9170_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
220#define AR9170_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7f
221#define AR9170_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
222
223#define AR9170_PHY_REG_RADAR_EXT (AR9170_PHY_REG_BASE + 0x0140)
224#define AR9170_PHY_RADAR_EXT_ENA 0x00004000
225
226#define AR9170_PHY_REG_RADAR_0 (AR9170_PHY_REG_BASE + 0x0154)
227#define AR9170_PHY_RADAR_0_ENA 0x00000001
228#define AR9170_PHY_RADAR_0_FFT_ENA 0x80000000
229/* inband pulse threshold */
230#define AR9170_PHY_RADAR_0_INBAND 0x0000003e
231#define AR9170_PHY_RADAR_0_INBAND_S 1
232/* pulse RSSI threshold */
233#define AR9170_PHY_RADAR_0_PRSSI 0x00000fc0
234#define AR9170_PHY_RADAR_0_PRSSI_S 6
235/* pulse height threshold */
236#define AR9170_PHY_RADAR_0_HEIGHT 0x0003f000
237#define AR9170_PHY_RADAR_0_HEIGHT_S 12
238/* radar RSSI threshold */
239#define AR9170_PHY_RADAR_0_RRSSI 0x00fc0000
240#define AR9170_PHY_RADAR_0_RRSSI_S 18
241/* radar firepower threshold */
242#define AR9170_PHY_RADAR_0_FIRPWR 0x7f000000
243#define AR9170_PHY_RADAR_0_FIRPWR_S 24
244
245#define AR9170_PHY_REG_RADAR_1 (AR9170_PHY_REG_BASE + 0x0158)
246#define AR9170_PHY_RADAR_1_RELPWR_ENA 0x00800000
247#define AR9170_PHY_RADAR_1_USE_FIR128 0x00400000
248#define AR9170_PHY_RADAR_1_RELPWR_THRESH 0x003f0000
249#define AR9170_PHY_RADAR_1_RELPWR_THRESH_S 16
250#define AR9170_PHY_RADAR_1_BLOCK_CHECK 0x00008000
251#define AR9170_PHY_RADAR_1_MAX_RRSSI 0x00004000
252#define AR9170_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
253#define AR9170_PHY_RADAR_1_RELSTEP_THRESH 0x00001f00
254#define AR9170_PHY_RADAR_1_RELSTEP_THRESH_S 8
255#define AR9170_PHY_RADAR_1_MAXLEN 0x000000ff
256#define AR9170_PHY_RADAR_1_MAXLEN_S 0
257
258#define AR9170_PHY_REG_SWITCH_CHAIN_0 (AR9170_PHY_REG_BASE + 0x0160)
259#define AR9170_PHY_REG_SWITCH_CHAIN_2 (AR9170_PHY_REG_BASE + 0x2160)
260
261#define AR9170_PHY_REG_SWITCH_COM (AR9170_PHY_REG_BASE + 0x0164)
262
263#define AR9170_PHY_REG_CCA_THRESHOLD (AR9170_PHY_REG_BASE + 0x0168)
264
265#define AR9170_PHY_REG_SIGMA_DELTA (AR9170_PHY_REG_BASE + 0x016c)
266#define AR9170_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
267#define AR9170_PHY_SIGMA_DELTA_ADC_SEL_S 0
268#define AR9170_PHY_SIGMA_DELTA_FILT2 0x000000f8
269#define AR9170_PHY_SIGMA_DELTA_FILT2_S 3
270#define AR9170_PHY_SIGMA_DELTA_FILT1 0x00001f00
271#define AR9170_PHY_SIGMA_DELTA_FILT1_S 8
272#define AR9170_PHY_SIGMA_DELTA_ADC_CLIP 0x01ffe000
273#define AR9170_PHY_SIGMA_DELTA_ADC_CLIP_S 13
274
275#define AR9170_PHY_REG_RESTART (AR9170_PHY_REG_BASE + 0x0170)
276#define AR9170_PHY_RESTART_DIV_GC 0x001c0000
277#define AR9170_PHY_RESTART_DIV_GC_S 18
278
279#define AR9170_PHY_REG_RFBUS_REQ (AR9170_PHY_REG_BASE + 0x017c)
280#define AR9170_PHY_RFBUS_REQ_EN 0x00000001
281
282#define AR9170_PHY_REG_TIMING7 (AR9170_PHY_REG_BASE + 0x0180)
283#define AR9170_PHY_REG_TIMING8 (AR9170_PHY_REG_BASE + 0x0184)
284#define AR9170_PHY_TIMING8_PILOT_MASK_2 0x000fffff
285#define AR9170_PHY_TIMING8_PILOT_MASK_2_S 0
286
287#define AR9170_PHY_REG_BIN_MASK2_1 (AR9170_PHY_REG_BASE + 0x0188)
288#define AR9170_PHY_REG_BIN_MASK2_2 (AR9170_PHY_REG_BASE + 0x018c)
289#define AR9170_PHY_REG_BIN_MASK2_3 (AR9170_PHY_REG_BASE + 0x0190)
290#define AR9170_PHY_REG_BIN_MASK2_4 (AR9170_PHY_REG_BASE + 0x0194)
291#define AR9170_PHY_BIN_MASK2_4_MASK_4 0x00003fff
292#define AR9170_PHY_BIN_MASK2_4_MASK_4_S 0
293
294#define AR9170_PHY_REG_TIMING9 (AR9170_PHY_REG_BASE + 0x0198)
295#define AR9170_PHY_REG_TIMING10 (AR9170_PHY_REG_BASE + 0x019c)
296#define AR9170_PHY_TIMING10_PILOT_MASK_2 0x000fffff
297#define AR9170_PHY_TIMING10_PILOT_MASK_2_S 0
298
299#define AR9170_PHY_REG_TIMING11 (AR9170_PHY_REG_BASE + 0x01a0)
300#define AR9170_PHY_TIMING11_SPUR_DELTA_PHASE 0x000fffff
301#define AR9170_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
302#define AR9170_PHY_TIMING11_SPUR_FREQ_SD 0x3ff00000
303#define AR9170_PHY_TIMING11_SPUR_FREQ_SD_S 20
304#define AR9170_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
305#define AR9170_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
306
307#define AR9170_PHY_REG_RX_CHAINMASK (AR9170_PHY_REG_BASE + 0x01a4)
308#define AR9170_PHY_REG_NEW_ADC_DC_GAIN_CORR(_i) (AR9170_PHY_REG_BASE + \
309 0x01b4 + ((_i) << 12))
310#define AR9170_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
311#define AR9170_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
312
313#define AR9170_PHY_REG_MULTICHAIN_GAIN_CTL (AR9170_PHY_REG_BASE + 0x01ac)
314#define AR9170_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
315#define AR9170_PHY_9285_ANT_DIV_CTL 0x01000000
316#define AR9170_PHY_9285_ANT_DIV_CTL_S 24
317#define AR9170_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
318#define AR9170_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
319#define AR9170_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
320#define AR9170_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
321#define AR9170_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
322#define AR9170_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
323#define AR9170_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
324#define AR9170_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
325#define AR9170_PHY_9285_ANT_DIV_LNA1 2
326#define AR9170_PHY_9285_ANT_DIV_LNA2 1
327#define AR9170_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
328#define AR9170_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
329#define AR9170_PHY_9285_ANT_DIV_GAINTB_0 0
330#define AR9170_PHY_9285_ANT_DIV_GAINTB_1 1
331
332#define AR9170_PHY_REG_EXT_CCA0 (AR9170_PHY_REG_BASE + 0x01b8)
333#define AR9170_PHY_REG_EXT_CCA0_THRESH62 0x000000ff
334#define AR9170_PHY_REG_EXT_CCA0_THRESH62_S 0
335
336#define AR9170_PHY_REG_EXT_CCA (AR9170_PHY_REG_BASE + 0x01bc)
337#define AR9170_PHY_EXT_CCA_CYCPWR_THR1 0x0000fe00
338#define AR9170_PHY_EXT_CCA_CYCPWR_THR1_S 9
339#define AR9170_PHY_EXT_CCA_THRESH62 0x007f0000
340#define AR9170_PHY_EXT_CCA_THRESH62_S 16
341#define AR9170_PHY_EXT_MINCCA_PWR 0xff800000
342#define AR9170_PHY_EXT_MINCCA_PWR_S 23
343
344#define AR9170_PHY_REG_SFCORR_EXT (AR9170_PHY_REG_BASE + 0x01c0)
345#define AR9170_PHY_SFCORR_EXT_M1_THRESH 0x0000007f
346#define AR9170_PHY_SFCORR_EXT_M1_THRESH_S 0
347#define AR9170_PHY_SFCORR_EXT_M2_THRESH 0x00003f80
348#define AR9170_PHY_SFCORR_EXT_M2_THRESH_S 7
349#define AR9170_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001fc000
350#define AR9170_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
351#define AR9170_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0fe00000
352#define AR9170_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
353#define AR9170_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
354
355#define AR9170_PHY_REG_HALFGI (AR9170_PHY_REG_BASE + 0x01d0)
356#define AR9170_PHY_HALFGI_DSC_MAN 0x0007fff0
357#define AR9170_PHY_HALFGI_DSC_MAN_S 4
358#define AR9170_PHY_HALFGI_DSC_EXP 0x0000000f
359#define AR9170_PHY_HALFGI_DSC_EXP_S 0
360
361#define AR9170_PHY_REG_CHANNEL_MASK_01_30 (AR9170_PHY_REG_BASE + 0x01d4)
362#define AR9170_PHY_REG_CHANNEL_MASK_31_60 (AR9170_PHY_REG_BASE + 0x01d8)
363
364#define AR9170_PHY_REG_CHAN_INFO_MEMORY (AR9170_PHY_REG_BASE + 0x01dc)
365#define AR9170_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
366
367#define AR9170_PHY_REG_HEAVY_CLIP_ENABLE (AR9170_PHY_REG_BASE + 0x01e0)
368#define AR9170_PHY_REG_HEAVY_CLIP_FACTOR_RIFS (AR9170_PHY_REG_BASE + 0x01ec)
369#define AR9170_PHY_RIFS_INIT_DELAY 0x03ff0000
370
371#define AR9170_PHY_REG_CALMODE (AR9170_PHY_REG_BASE + 0x01f0)
372#define AR9170_PHY_CALMODE_IQ 0x00000000
373#define AR9170_PHY_CALMODE_ADC_GAIN 0x00000001
374#define AR9170_PHY_CALMODE_ADC_DC_PER 0x00000002
375#define AR9170_PHY_CALMODE_ADC_DC_INIT 0x00000003
376
377#define AR9170_PHY_REG_REFCLKDLY (AR9170_PHY_REG_BASE + 0x01f4)
378#define AR9170_PHY_REG_REFCLKPD (AR9170_PHY_REG_BASE + 0x01f8)
379
380
381#define AR9170_PHY_REG_CAL_MEAS_0(_i) (AR9170_PHY_REG_BASE + \
382 0x0410 + ((_i) << 12))
383#define AR9170_PHY_REG_CAL_MEAS_1(_i) (AR9170_PHY_REG_BASE + \
384 0x0414 \ + ((_i) << 12))
385#define AR9170_PHY_REG_CAL_MEAS_2(_i) (AR9170_PHY_REG_BASE + \
386 0x0418 + ((_i) << 12))
387#define AR9170_PHY_REG_CAL_MEAS_3(_i) (AR9170_PHY_REG_BASE + \
388 0x041c + ((_i) << 12))
389
390#define AR9170_PHY_REG_CURRENT_RSSI (AR9170_PHY_REG_BASE + 0x041c)
391
392#define AR9170_PHY_REG_RFBUS_GRANT (AR9170_PHY_REG_BASE + 0x0420)
393#define AR9170_PHY_RFBUS_GRANT_EN 0x00000001
394
395#define AR9170_PHY_REG_CHAN_INFO_GAIN_DIFF (AR9170_PHY_REG_BASE + 0x04f4)
396#define AR9170_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
397
398#define AR9170_PHY_REG_CHAN_INFO_GAIN (AR9170_PHY_REG_BASE + 0x04fc)
399
400#define AR9170_PHY_REG_MODE (AR9170_PHY_REG_BASE + 0x0a00)
401#define AR9170_PHY_MODE_ASYNCFIFO 0x80
402#define AR9170_PHY_MODE_AR2133 0x08
403#define AR9170_PHY_MODE_AR5111 0x00
404#define AR9170_PHY_MODE_AR5112 0x08
405#define AR9170_PHY_MODE_DYNAMIC 0x04
406#define AR9170_PHY_MODE_RF2GHZ 0x02
407#define AR9170_PHY_MODE_RF5GHZ 0x00
408#define AR9170_PHY_MODE_CCK 0x01
409#define AR9170_PHY_MODE_OFDM 0x00
410#define AR9170_PHY_MODE_DYN_CCK_DISABLE 0x100
411
412#define AR9170_PHY_REG_CCK_TX_CTRL (AR9170_PHY_REG_BASE + 0x0a04)
413#define AR9170_PHY_CCK_TX_CTRL_JAPAN 0x00000010
414#define AR9170_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000c
415#define AR9170_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
416
417#define AR9170_PHY_REG_CCK_DETECT (AR9170_PHY_REG_BASE + 0x0a08)
418#define AR9170_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003f
419#define AR9170_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
420/* [12:6] settling time for antenna switch */
421#define AR9170_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001fc0
422#define AR9170_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
423#define AR9170_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
424#define AR9170_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
425
426#define AR9170_PHY_REG_GAIN_2GHZ_CHAIN_2 (AR9170_PHY_REG_BASE + 0x2a0c)
427#define AR9170_PHY_REG_GAIN_2GHZ (AR9170_PHY_REG_BASE + 0x0a0c)
428#define AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00fc0000
429#define AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
430#define AR9170_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003c00
431#define AR9170_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
432#define AR9170_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001f
433#define AR9170_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
434#define AR9170_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003e0000
435#define AR9170_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
436#define AR9170_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001f000
437#define AR9170_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
438#define AR9170_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000fc0
439#define AR9170_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
440#define AR9170_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003f
441#define AR9170_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
442
443#define AR9170_PHY_REG_CCK_RXCTRL4 (AR9170_PHY_REG_BASE + 0x0a1c)
444#define AR9170_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01f80000
445#define AR9170_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
446
447#define AR9170_PHY_REG_DAG_CTRLCCK (AR9170_PHY_REG_BASE + 0x0a28)
448#define AR9170_REG_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
449#define AR9170_REG_DAG_CTRLCCK_RSSI_THR 0x0001fc00
450#define AR9170_REG_DAG_CTRLCCK_RSSI_THR_S 10
451
452#define AR9170_PHY_REG_FORCE_CLKEN_CCK (AR9170_PHY_REG_BASE + 0x0a2c)
453#define AR9170_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
454
455#define AR9170_PHY_REG_POWER_TX_RATE3 (AR9170_PHY_REG_BASE + 0x0a34)
456#define AR9170_PHY_REG_POWER_TX_RATE4 (AR9170_PHY_REG_BASE + 0x0a38)
457
458#define AR9170_PHY_REG_SCRM_SEQ_XR (AR9170_PHY_REG_BASE + 0x0a3c)
459#define AR9170_PHY_REG_HEADER_DETECT_XR (AR9170_PHY_REG_BASE + 0x0a40)
460#define AR9170_PHY_REG_CHIRP_DETECTED_XR (AR9170_PHY_REG_BASE + 0x0a44)
461#define AR9170_PHY_REG_BLUETOOTH (AR9170_PHY_REG_BASE + 0x0a54)
462
463#define AR9170_PHY_REG_TPCRG1 (AR9170_PHY_REG_BASE + 0x0a58)
464#define AR9170_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
465#define AR9170_PHY_TPCRG1_NUM_PD_GAIN_S 14
466#define AR9170_PHY_TPCRG1_PD_GAIN_1 0x00030000
467#define AR9170_PHY_TPCRG1_PD_GAIN_1_S 16
468#define AR9170_PHY_TPCRG1_PD_GAIN_2 0x000c0000
469#define AR9170_PHY_TPCRG1_PD_GAIN_2_S 18
470#define AR9170_PHY_TPCRG1_PD_GAIN_3 0x00300000
471#define AR9170_PHY_TPCRG1_PD_GAIN_3_S 20
472#define AR9170_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
473#define AR9170_PHY_TPCRG1_PD_CAL_ENABLE_S 22
474
475#define AR9170_PHY_REG_TX_PWRCTRL4 (AR9170_PHY_REG_BASE + 0x0a64)
476#define AR9170_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
477#define AR9170_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
478#define AR9170_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001fe
479#define AR9170_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
480
481#define AR9170_PHY_REG_ANALOG_SWAP (AR9170_PHY_REG_BASE + 0x0a68)
482#define AR9170_PHY_ANALOG_SWAP_AB 0x0001
483#define AR9170_PHY_ANALOG_SWAP_ALT_CHAIN 0x00000040
484
485#define AR9170_PHY_REG_TPCRG5 (AR9170_PHY_REG_BASE + 0x0a6c)
486#define AR9170_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000f
487#define AR9170_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
488#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003f0
489#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
490#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000fc00
491#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
492#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003f0000
493#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
494#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0fc00000
495#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
496
497#define AR9170_PHY_REG_TX_PWRCTRL6_0 (AR9170_PHY_REG_BASE + 0x0a70)
498#define AR9170_PHY_REG_TX_PWRCTRL6_1 (AR9170_PHY_REG_BASE + 0x1a70)
499#define AR9170_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
500#define AR9170_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
501
502#define AR9170_PHY_REG_TX_PWRCTRL7 (AR9170_PHY_REG_BASE + 0x0a74)
503#define AR9170_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01f80000
504#define AR9170_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
505
506#define AR9170_PHY_REG_TX_PWRCTRL9 (AR9170_PHY_REG_BASE + 0x0a7c)
507#define AR9170_PHY_TX_DESIRED_SCALE_CCK 0x00007c00
508#define AR9170_PHY_TX_DESIRED_SCALE_CCK_S 10
509#define AR9170_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
510#define AR9170_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
511
512#define AR9170_PHY_REG_TX_GAIN_TBL1 (AR9170_PHY_REG_BASE + 0x0b00)
513#define AR9170_PHY_TX_GAIN 0x0007f000
514#define AR9170_PHY_TX_GAIN_S 12
515
516/* Carrier leak calibration control, do it after AGC calibration */
517#define AR9170_PHY_REG_CL_CAL_CTL (AR9170_PHY_REG_BASE + 0x0b58)
518#define AR9170_PHY_CL_CAL_ENABLE 0x00000002
519#define AR9170_PHY_CL_CAL_PARALLEL_CAL_ENABLE 0x00000001
520
521#define AR9170_PHY_REG_POWER_TX_RATE5 (AR9170_PHY_REG_BASE + 0x0b8c)
522#define AR9170_PHY_REG_POWER_TX_RATE6 (AR9170_PHY_REG_BASE + 0x0b90)
523
524#define AR9170_PHY_REG_CH0_TX_PWRCTRL11 (AR9170_PHY_REG_BASE + 0x0b98)
525#define AR9170_PHY_REG_CH1_TX_PWRCTRL11 (AR9170_PHY_REG_BASE + 0x1b98)
526#define AR9170_PHY_TX_CHX_PWRCTRL_OLPC_TEMP_COMP 0x0000fc00
527#define AR9170_PHY_TX_CHX_PWRCTRL_OLPC_TEMP_COMP_S 10
528
529#define AR9170_PHY_REG_CAL_CHAINMASK (AR9170_PHY_REG_BASE + 0x0b9c)
530#define AR9170_PHY_REG_VIT_MASK2_M_46_61 (AR9170_PHY_REG_BASE + 0x0ba0)
531#define AR9170_PHY_REG_MASK2_M_31_45 (AR9170_PHY_REG_BASE + 0x0ba4)
532#define AR9170_PHY_REG_MASK2_M_16_30 (AR9170_PHY_REG_BASE + 0x0ba8)
533#define AR9170_PHY_REG_MASK2_M_00_15 (AR9170_PHY_REG_BASE + 0x0bac)
534#define AR9170_PHY_REG_PILOT_MASK_01_30 (AR9170_PHY_REG_BASE + 0x0bb0)
535#define AR9170_PHY_REG_PILOT_MASK_31_60 (AR9170_PHY_REG_BASE + 0x0bb4)
536#define AR9170_PHY_REG_MASK2_P_15_01 (AR9170_PHY_REG_BASE + 0x0bb8)
537#define AR9170_PHY_REG_MASK2_P_30_16 (AR9170_PHY_REG_BASE + 0x0bbc)
538#define AR9170_PHY_REG_MASK2_P_45_31 (AR9170_PHY_REG_BASE + 0x0bc0)
539#define AR9170_PHY_REG_MASK2_P_61_45 (AR9170_PHY_REG_BASE + 0x0bc4)
540#define AR9170_PHY_REG_POWER_TX_SUB (AR9170_PHY_REG_BASE + 0x0bc8)
541#define AR9170_PHY_REG_POWER_TX_RATE7 (AR9170_PHY_REG_BASE + 0x0bcc)
542#define AR9170_PHY_REG_POWER_TX_RATE8 (AR9170_PHY_REG_BASE + 0x0bd0)
543#define AR9170_PHY_REG_POWER_TX_RATE9 (AR9170_PHY_REG_BASE + 0x0bd4)
544#define AR9170_PHY_REG_XPA_CFG (AR9170_PHY_REG_BASE + 0x0bd8)
545#define AR9170_PHY_FORCE_XPA_CFG 0x000000001
546#define AR9170_PHY_FORCE_XPA_CFG_S 0
547
548#define AR9170_PHY_REG_CH1_CCA (AR9170_PHY_REG_BASE + 0x1064)
549#define AR9170_PHY_CH1_MINCCA_PWR 0x0ff80000
550#define AR9170_PHY_CH1_MINCCA_PWR_S 19
551
552#define AR9170_PHY_REG_CH2_CCA (AR9170_PHY_REG_BASE + 0x2064)
553#define AR9170_PHY_CH2_MINCCA_PWR 0x0ff80000
554#define AR9170_PHY_CH2_MINCCA_PWR_S 19
555
556#define AR9170_PHY_REG_CH1_EXT_CCA (AR9170_PHY_REG_BASE + 0x11bc)
557#define AR9170_PHY_CH1_EXT_MINCCA_PWR 0xff800000
558#define AR9170_PHY_CH1_EXT_MINCCA_PWR_S 23
559
560#define AR9170_PHY_REG_CH2_EXT_CCA (AR9170_PHY_REG_BASE + 0x21bc)
561#define AR9170_PHY_CH2_EXT_MINCCA_PWR 0xff800000
562#define AR9170_PHY_CH2_EXT_MINCCA_PWR_S 23
563
564#define REDUCE_CHAIN_0 0x00000050
565#define REDUCE_CHAIN_1 0x00000051
566
567#endif /* __CARL9170_SHARED_PHY_H */
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
new file mode 100644
index 000000000000..671dbc429547
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -0,0 +1,909 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * 802.11 & command trap routines
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/etherdevice.h>
44#include <linux/crc32.h>
45#include <net/mac80211.h>
46#include "carl9170.h"
47#include "hw.h"
48#include "cmd.h"
49
50static void carl9170_dbg_message(struct ar9170 *ar, const char *buf, u32 len)
51{
52 bool restart = false;
53 enum carl9170_restart_reasons reason = CARL9170_RR_NO_REASON;
54
55 if (len > 3) {
56 if (memcmp(buf, CARL9170_ERR_MAGIC, 3) == 0) {
57 ar->fw.err_counter++;
58 if (ar->fw.err_counter > 3) {
59 restart = true;
60 reason = CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS;
61 }
62 }
63
64 if (memcmp(buf, CARL9170_BUG_MAGIC, 3) == 0) {
65 ar->fw.bug_counter++;
66 restart = true;
67 reason = CARL9170_RR_FATAL_FIRMWARE_ERROR;
68 }
69 }
70
71 wiphy_info(ar->hw->wiphy, "FW: %.*s\n", len, buf);
72
73 if (restart)
74 carl9170_restart(ar, reason);
75}
76
77static void carl9170_handle_ps(struct ar9170 *ar, struct carl9170_rsp *rsp)
78{
79 u32 ps;
80 bool new_ps;
81
82 ps = le32_to_cpu(rsp->psm.state);
83
84 new_ps = (ps & CARL9170_PSM_COUNTER) != CARL9170_PSM_WAKE;
85 if (ar->ps.state != new_ps) {
86 if (!new_ps) {
87 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
88 ar->ps.last_action);
89 }
90
91 ar->ps.last_action = jiffies;
92
93 ar->ps.state = new_ps;
94 }
95}
96
97static int carl9170_check_sequence(struct ar9170 *ar, unsigned int seq)
98{
99 if (ar->cmd_seq < -1)
100 return 0;
101
102 /*
103 * Initialize Counter
104 */
105 if (ar->cmd_seq < 0)
106 ar->cmd_seq = seq;
107
108 /*
109 * The sequence is strictly monotonic increasing and it never skips!
110 *
111 * Therefore we can safely assume that whenever we received an
112 * unexpected sequence we have lost some valuable data.
113 */
114 if (seq != ar->cmd_seq) {
115 int count;
116
117 count = (seq - ar->cmd_seq) % ar->fw.cmd_bufs;
118
119 wiphy_err(ar->hw->wiphy, "lost %d command responses/traps! "
120 "w:%d g:%d\n", count, ar->cmd_seq, seq);
121
122 carl9170_restart(ar, CARL9170_RR_LOST_RSP);
123 return -EIO;
124 }
125
126 ar->cmd_seq = (ar->cmd_seq + 1) % ar->fw.cmd_bufs;
127 return 0;
128}
129
130static void carl9170_cmd_callback(struct ar9170 *ar, u32 len, void *buffer)
131{
132 /*
133 * Some commands may have a variable response length
134 * and we cannot predict the correct length in advance.
135 * So we only check if we provided enough space for the data.
136 */
137 if (unlikely(ar->readlen != (len - 4))) {
138 dev_warn(&ar->udev->dev, "received invalid command response:"
139 "got %d, instead of %d\n", len - 4, ar->readlen);
140 print_hex_dump_bytes("carl9170 cmd:", DUMP_PREFIX_OFFSET,
141 ar->cmd_buf, (ar->cmd.hdr.len + 4) & 0x3f);
142 print_hex_dump_bytes("carl9170 rsp:", DUMP_PREFIX_OFFSET,
143 buffer, len);
144 /*
145 * Do not complete. The command times out,
146 * and we get a stack trace from there.
147 */
148 carl9170_restart(ar, CARL9170_RR_INVALID_RSP);
149 }
150
151 spin_lock(&ar->cmd_lock);
152 if (ar->readbuf) {
153 if (len >= 4)
154 memcpy(ar->readbuf, buffer + 4, len - 4);
155
156 ar->readbuf = NULL;
157 }
158 complete(&ar->cmd_wait);
159 spin_unlock(&ar->cmd_lock);
160}
161
162void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
163{
164 struct carl9170_rsp *cmd = (void *) buf;
165 struct ieee80211_vif *vif;
166
167 if (carl9170_check_sequence(ar, cmd->hdr.seq))
168 return;
169
170 if ((cmd->hdr.cmd & CARL9170_RSP_FLAG) != CARL9170_RSP_FLAG) {
171 if (!(cmd->hdr.cmd & CARL9170_CMD_ASYNC_FLAG))
172 carl9170_cmd_callback(ar, len, buf);
173
174 return;
175 }
176
177 if (unlikely(cmd->hdr.len != (len - 4))) {
178 if (net_ratelimit()) {
179 wiphy_err(ar->hw->wiphy, "FW: received over-/under"
180 "sized event %x (%d, but should be %d).\n",
181 cmd->hdr.cmd, cmd->hdr.len, len - 4);
182
183 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE,
184 buf, len);
185 }
186
187 return;
188 }
189
190 /* hardware event handlers */
191 switch (cmd->hdr.cmd) {
192 case CARL9170_RSP_PRETBTT:
193 /* pre-TBTT event */
194 rcu_read_lock();
195 vif = carl9170_get_main_vif(ar);
196
197 if (!vif) {
198 rcu_read_unlock();
199 break;
200 }
201
202 switch (vif->type) {
203 case NL80211_IFTYPE_STATION:
204 carl9170_handle_ps(ar, cmd);
205 break;
206
207 case NL80211_IFTYPE_AP:
208 case NL80211_IFTYPE_ADHOC:
209 carl9170_update_beacon(ar, true);
210 break;
211
212 default:
213 break;
214 }
215 rcu_read_unlock();
216
217 break;
218
219
220 case CARL9170_RSP_TXCOMP:
221 /* TX status notification */
222 carl9170_tx_process_status(ar, cmd);
223 break;
224
225 case CARL9170_RSP_BEACON_CONFIG:
226 /*
227 * (IBSS) beacon send notification
228 * bytes: 04 c2 XX YY B4 B3 B2 B1
229 *
230 * XX always 80
231 * YY always 00
232 * B1-B4 "should" be the number of send out beacons.
233 */
234 break;
235
236 case CARL9170_RSP_ATIM:
237 /* End of Atim Window */
238 break;
239
240 case CARL9170_RSP_WATCHDOG:
241 /* Watchdog Interrupt */
242 carl9170_restart(ar, CARL9170_RR_WATCHDOG);
243 break;
244
245 case CARL9170_RSP_TEXT:
246 /* firmware debug */
247 carl9170_dbg_message(ar, (char *)buf + 4, len - 4);
248 break;
249
250 case CARL9170_RSP_HEXDUMP:
251 wiphy_dbg(ar->hw->wiphy, "FW: HD %d\n", len - 4);
252 print_hex_dump_bytes("FW:", DUMP_PREFIX_NONE,
253 (char *)buf + 4, len - 4);
254 break;
255
256 case CARL9170_RSP_RADAR:
257 if (!net_ratelimit())
258 break;
259
260 wiphy_info(ar->hw->wiphy, "FW: RADAR! Please report this "
261 "incident to linux-wireless@vger.kernel.org !\n");
262 break;
263
264 case CARL9170_RSP_GPIO:
265#ifdef CONFIG_CARL9170_WPC
266 if (ar->wps.pbc) {
267 bool state = !!(cmd->gpio.gpio & cpu_to_le32(
268 AR9170_GPIO_PORT_WPS_BUTTON_PRESSED));
269
270 if (state != ar->wps.pbc_state) {
271 ar->wps.pbc_state = state;
272 input_report_key(ar->wps.pbc, KEY_WPS_BUTTON,
273 state);
274 input_sync(ar->wps.pbc);
275 }
276 }
277#endif /* CONFIG_CARL9170_WPC */
278 break;
279
280 case CARL9170_RSP_BOOT:
281 complete(&ar->fw_boot_wait);
282 break;
283
284 default:
285 wiphy_err(ar->hw->wiphy, "FW: received unhandled event %x\n",
286 cmd->hdr.cmd);
287 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
288 break;
289 }
290}
291
292static int carl9170_rx_mac_status(struct ar9170 *ar,
293 struct ar9170_rx_head *head, struct ar9170_rx_macstatus *mac,
294 struct ieee80211_rx_status *status)
295{
296 struct ieee80211_channel *chan;
297 u8 error, decrypt;
298
299 BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
300 BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
301
302 error = mac->error;
303
304 if (error & AR9170_RX_ERROR_WRONG_RA) {
305 if (!ar->sniffer_enabled)
306 return -EINVAL;
307 }
308
309 if (error & AR9170_RX_ERROR_PLCP) {
310 if (!(ar->filter_state & FIF_PLCPFAIL))
311 return -EINVAL;
312
313 status->flag |= RX_FLAG_FAILED_PLCP_CRC;
314 }
315
316 if (error & AR9170_RX_ERROR_FCS) {
317 ar->tx_fcs_errors++;
318
319 if (!(ar->filter_state & FIF_FCSFAIL))
320 return -EINVAL;
321
322 status->flag |= RX_FLAG_FAILED_FCS_CRC;
323 }
324
325 decrypt = ar9170_get_decrypt_type(mac);
326 if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
327 decrypt != AR9170_ENC_ALG_NONE) {
328 if ((decrypt == AR9170_ENC_ALG_TKIP) &&
329 (error & AR9170_RX_ERROR_MMIC))
330 status->flag |= RX_FLAG_MMIC_ERROR;
331
332 status->flag |= RX_FLAG_DECRYPTED;
333 }
334
335 if (error & AR9170_RX_ERROR_DECRYPT && !ar->sniffer_enabled)
336 return -ENODATA;
337
338 error &= ~(AR9170_RX_ERROR_MMIC |
339 AR9170_RX_ERROR_FCS |
340 AR9170_RX_ERROR_WRONG_RA |
341 AR9170_RX_ERROR_DECRYPT |
342 AR9170_RX_ERROR_PLCP);
343
344 /* drop any other error frames */
345 if (unlikely(error)) {
346 /* TODO: update netdevice's RX dropped/errors statistics */
347
348 if (net_ratelimit())
349 wiphy_dbg(ar->hw->wiphy, "received frame with "
350 "suspicious error code (%#x).\n", error);
351
352 return -EINVAL;
353 }
354
355 chan = ar->channel;
356 if (chan) {
357 status->band = chan->band;
358 status->freq = chan->center_freq;
359 }
360
361 switch (mac->status & AR9170_RX_STATUS_MODULATION) {
362 case AR9170_RX_STATUS_MODULATION_CCK:
363 if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
364 status->flag |= RX_FLAG_SHORTPRE;
365 switch (head->plcp[0]) {
366 case AR9170_RX_PHY_RATE_CCK_1M:
367 status->rate_idx = 0;
368 break;
369 case AR9170_RX_PHY_RATE_CCK_2M:
370 status->rate_idx = 1;
371 break;
372 case AR9170_RX_PHY_RATE_CCK_5M:
373 status->rate_idx = 2;
374 break;
375 case AR9170_RX_PHY_RATE_CCK_11M:
376 status->rate_idx = 3;
377 break;
378 default:
379 if (net_ratelimit()) {
380 wiphy_err(ar->hw->wiphy, "invalid plcp cck "
381 "rate (%x).\n", head->plcp[0]);
382 }
383
384 return -EINVAL;
385 }
386 break;
387
388 case AR9170_RX_STATUS_MODULATION_DUPOFDM:
389 case AR9170_RX_STATUS_MODULATION_OFDM:
390 switch (head->plcp[0] & 0xf) {
391 case AR9170_TXRX_PHY_RATE_OFDM_6M:
392 status->rate_idx = 0;
393 break;
394 case AR9170_TXRX_PHY_RATE_OFDM_9M:
395 status->rate_idx = 1;
396 break;
397 case AR9170_TXRX_PHY_RATE_OFDM_12M:
398 status->rate_idx = 2;
399 break;
400 case AR9170_TXRX_PHY_RATE_OFDM_18M:
401 status->rate_idx = 3;
402 break;
403 case AR9170_TXRX_PHY_RATE_OFDM_24M:
404 status->rate_idx = 4;
405 break;
406 case AR9170_TXRX_PHY_RATE_OFDM_36M:
407 status->rate_idx = 5;
408 break;
409 case AR9170_TXRX_PHY_RATE_OFDM_48M:
410 status->rate_idx = 6;
411 break;
412 case AR9170_TXRX_PHY_RATE_OFDM_54M:
413 status->rate_idx = 7;
414 break;
415 default:
416 if (net_ratelimit()) {
417 wiphy_err(ar->hw->wiphy, "invalid plcp ofdm "
418 "rate (%x).\n", head->plcp[0]);
419 }
420
421 return -EINVAL;
422 }
423 if (status->band == IEEE80211_BAND_2GHZ)
424 status->rate_idx += 4;
425 break;
426
427 case AR9170_RX_STATUS_MODULATION_HT:
428 if (head->plcp[3] & 0x80)
429 status->flag |= RX_FLAG_40MHZ;
430 if (head->plcp[6] & 0x80)
431 status->flag |= RX_FLAG_SHORT_GI;
432
433 status->rate_idx = clamp(0, 75, head->plcp[3] & 0x7f);
434 status->flag |= RX_FLAG_HT;
435 break;
436
437 default:
438 BUG();
439 return -ENOSYS;
440 }
441
442 return 0;
443}
444
445static void carl9170_rx_phy_status(struct ar9170 *ar,
446 struct ar9170_rx_phystatus *phy, struct ieee80211_rx_status *status)
447{
448 int i;
449
450 BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
451
452 for (i = 0; i < 3; i++)
453 if (phy->rssi[i] != 0x80)
454 status->antenna |= BIT(i);
455
456 /* post-process RSSI */
457 for (i = 0; i < 7; i++)
458 if (phy->rssi[i] & 0x80)
459 phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
460
461 /* TODO: we could do something with phy_errors */
462 status->signal = ar->noise[0] + phy->rssi_combined;
463}
464
465static struct sk_buff *carl9170_rx_copy_data(u8 *buf, int len)
466{
467 struct sk_buff *skb;
468 int reserved = 0;
469 struct ieee80211_hdr *hdr = (void *) buf;
470
471 if (ieee80211_is_data_qos(hdr->frame_control)) {
472 u8 *qc = ieee80211_get_qos_ctl(hdr);
473 reserved += NET_IP_ALIGN;
474
475 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
476 reserved += NET_IP_ALIGN;
477 }
478
479 if (ieee80211_has_a4(hdr->frame_control))
480 reserved += NET_IP_ALIGN;
481
482 reserved = 32 + (reserved & NET_IP_ALIGN);
483
484 skb = dev_alloc_skb(len + reserved);
485 if (likely(skb)) {
486 skb_reserve(skb, reserved);
487 memcpy(skb_put(skb, len), buf, len);
488 }
489
490 return skb;
491}
492
493static u8 *carl9170_find_ie(u8 *data, unsigned int len, u8 ie)
494{
495 struct ieee80211_mgmt *mgmt = (void *)data;
496 u8 *pos, *end;
497
498 pos = (u8 *)mgmt->u.beacon.variable;
499 end = data + len;
500 while (pos < end) {
501 if (pos + 2 + pos[1] > end)
502 return NULL;
503
504 if (pos[0] == ie)
505 return pos;
506
507 pos += 2 + pos[1];
508 }
509 return NULL;
510}
511
512/*
513 * NOTE:
514 *
515 * The firmware is in charge of waking up the device just before
516 * the AP is expected to transmit the next beacon.
517 *
518 * This leaves the driver with the important task of deciding when
519 * to set the PHY back to bed again.
520 */
521static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
522{
523 struct ieee80211_hdr *hdr = (void *) data;
524 struct ieee80211_tim_ie *tim_ie;
525 u8 *tim;
526 u8 tim_len;
527 bool cam;
528
529 if (likely(!(ar->hw->conf.flags & IEEE80211_CONF_PS)))
530 return;
531
532 /* check if this really is a beacon */
533 if (!ieee80211_is_beacon(hdr->frame_control))
534 return;
535
536 /* min. beacon length + FCS_LEN */
537 if (len <= 40 + FCS_LEN)
538 return;
539
540 /* and only beacons from the associated BSSID, please */
541 if (compare_ether_addr(hdr->addr3, ar->common.curbssid) ||
542 !ar->common.curaid)
543 return;
544
545 ar->ps.last_beacon = jiffies;
546
547 tim = carl9170_find_ie(data, len - FCS_LEN, WLAN_EID_TIM);
548 if (!tim)
549 return;
550
551 if (tim[1] < sizeof(*tim_ie))
552 return;
553
554 tim_len = tim[1];
555 tim_ie = (struct ieee80211_tim_ie *) &tim[2];
556
557 if (!WARN_ON_ONCE(!ar->hw->conf.ps_dtim_period))
558 ar->ps.dtim_counter = (tim_ie->dtim_count - 1) %
559 ar->hw->conf.ps_dtim_period;
560
561 /* Check whenever the PHY can be turned off again. */
562
563 /* 1. What about buffered unicast traffic for our AID? */
564 cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
565
566 /* 2. Maybe the AP wants to send multicast/broadcast data? */
567 cam = !!(tim_ie->bitmap_ctrl & 0x01);
568
569 if (!cam) {
570 /* back to low-power land. */
571 ar->ps.off_override &= ~PS_OFF_BCN;
572 carl9170_ps_check(ar);
573 } else {
574 /* force CAM */
575 ar->ps.off_override |= PS_OFF_BCN;
576 }
577}
578
579/*
580 * If the frame alignment is right (or the kernel has
581 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
582 * is only a single MPDU in the USB frame, then we could
583 * submit to mac80211 the SKB directly. However, since
584 * there may be multiple packets in one SKB in stream
585 * mode, and we need to observe the proper ordering,
586 * this is non-trivial.
587 */
588
589static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
590{
591 struct ar9170_rx_head *head;
592 struct ar9170_rx_macstatus *mac;
593 struct ar9170_rx_phystatus *phy = NULL;
594 struct ieee80211_rx_status status;
595 struct sk_buff *skb;
596 int mpdu_len;
597
598 if (!IS_STARTED(ar))
599 return;
600
601 if (unlikely(len < sizeof(*mac))) {
602 ar->rx_dropped++;
603 return;
604 }
605
606 mpdu_len = len - sizeof(*mac);
607
608 mac = (void *)(buf + mpdu_len);
609 if (unlikely(mac->error & AR9170_RX_ERROR_FATAL)) {
610 ar->rx_dropped++;
611 return;
612 }
613
614 switch (mac->status & AR9170_RX_STATUS_MPDU) {
615 case AR9170_RX_STATUS_MPDU_FIRST:
616 /* Aggregated MPDUs start with an PLCP header */
617 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
618 head = (void *) buf;
619
620 /*
621 * The PLCP header needs to be cached for the
622 * following MIDDLE + LAST A-MPDU packets.
623 *
624 * So, if you are wondering why all frames seem
625 * to share a common RX status information,
626 * then you have the answer right here...
627 */
628 memcpy(&ar->rx_plcp, (void *) buf,
629 sizeof(struct ar9170_rx_head));
630
631 mpdu_len -= sizeof(struct ar9170_rx_head);
632 buf += sizeof(struct ar9170_rx_head);
633
634 ar->rx_has_plcp = true;
635 } else {
636 if (net_ratelimit()) {
637 wiphy_err(ar->hw->wiphy, "plcp info "
638 "is clipped.\n");
639 }
640
641 ar->rx_dropped++;
642 return;
643 }
644 break;
645
646 case AR9170_RX_STATUS_MPDU_LAST:
647 /*
648 * The last frame of an A-MPDU has an extra tail
649 * which does contain the phy status of the whole
650 * aggregate.
651 */
652
653 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
654 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
655 phy = (void *)(buf + mpdu_len);
656 } else {
657 if (net_ratelimit()) {
658 wiphy_err(ar->hw->wiphy, "frame tail "
659 "is clipped.\n");
660 }
661
662 ar->rx_dropped++;
663 return;
664 }
665
666 case AR9170_RX_STATUS_MPDU_MIDDLE:
667 /* These are just data + mac status */
668 if (unlikely(!ar->rx_has_plcp)) {
669 if (!net_ratelimit())
670 return;
671
672 wiphy_err(ar->hw->wiphy, "rx stream does not start "
673 "with a first_mpdu frame tag.\n");
674
675 ar->rx_dropped++;
676 return;
677 }
678
679 head = &ar->rx_plcp;
680 break;
681
682 case AR9170_RX_STATUS_MPDU_SINGLE:
683 /* single mpdu has both: plcp (head) and phy status (tail) */
684 head = (void *) buf;
685
686 mpdu_len -= sizeof(struct ar9170_rx_head);
687 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
688
689 buf += sizeof(struct ar9170_rx_head);
690 phy = (void *)(buf + mpdu_len);
691 break;
692
693 default:
694 BUG_ON(1);
695 break;
696 }
697
698 /* FC + DU + RA + FCS */
699 if (unlikely(mpdu_len < (2 + 2 + 6 + FCS_LEN))) {
700 ar->rx_dropped++;
701 return;
702 }
703
704 memset(&status, 0, sizeof(status));
705 if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status))) {
706 ar->rx_dropped++;
707 return;
708 }
709
710 if (phy)
711 carl9170_rx_phy_status(ar, phy, &status);
712
713 carl9170_ps_beacon(ar, buf, mpdu_len);
714
715 skb = carl9170_rx_copy_data(buf, mpdu_len);
716 if (likely(skb)) {
717 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
718 ieee80211_rx(ar->hw, skb);
719 } else {
720 ar->rx_dropped++;
721 }
722}
723
724static void carl9170_rx_untie_cmds(struct ar9170 *ar, const u8 *respbuf,
725 const unsigned int resplen)
726{
727 struct carl9170_rsp *cmd;
728 int i = 0;
729
730 while (i < resplen) {
731 cmd = (void *) &respbuf[i];
732
733 i += cmd->hdr.len + 4;
734 if (unlikely(i > resplen))
735 break;
736
737 carl9170_handle_command_response(ar, cmd, cmd->hdr.len + 4);
738 }
739
740 if (unlikely(i != resplen)) {
741 if (!net_ratelimit())
742 return;
743
744 wiphy_err(ar->hw->wiphy, "malformed firmware trap:\n");
745 print_hex_dump_bytes("rxcmd:", DUMP_PREFIX_OFFSET,
746 respbuf, resplen);
747 }
748}
749
750static void __carl9170_rx(struct ar9170 *ar, u8 *buf, unsigned int len)
751{
752 unsigned int i = 0;
753
754 /* weird thing, but this is the same in the original driver */
755 while (len > 2 && i < 12 && buf[0] == 0xff && buf[1] == 0xff) {
756 i += 2;
757 len -= 2;
758 buf += 2;
759 }
760
761 if (unlikely(len < 4))
762 return;
763
764 /* found the 6 * 0xffff marker? */
765 if (i == 12)
766 carl9170_rx_untie_cmds(ar, buf, len);
767 else
768 carl9170_handle_mpdu(ar, buf, len);
769}
770
771static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len)
772{
773 unsigned int tlen, wlen = 0, clen = 0;
774 struct ar9170_stream *rx_stream;
775 u8 *tbuf;
776
777 tbuf = buf;
778 tlen = len;
779
780 while (tlen >= 4) {
781 rx_stream = (void *) tbuf;
782 clen = le16_to_cpu(rx_stream->length);
783 wlen = ALIGN(clen, 4);
784
785 /* check if this is stream has a valid tag.*/
786 if (rx_stream->tag != cpu_to_le16(AR9170_RX_STREAM_TAG)) {
787 /*
788 * TODO: handle the highly unlikely event that the
789 * corrupted stream has the TAG at the right position.
790 */
791
792 /* check if the frame can be repaired. */
793 if (!ar->rx_failover_missing) {
794
795 /* this is not "short read". */
796 if (net_ratelimit()) {
797 wiphy_err(ar->hw->wiphy,
798 "missing tag!\n");
799 }
800
801 __carl9170_rx(ar, tbuf, tlen);
802 return;
803 }
804
805 if (ar->rx_failover_missing > tlen) {
806 if (net_ratelimit()) {
807 wiphy_err(ar->hw->wiphy,
808 "possible multi "
809 "stream corruption!\n");
810 goto err_telluser;
811 } else {
812 goto err_silent;
813 }
814 }
815
816 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
817 ar->rx_failover_missing -= tlen;
818
819 if (ar->rx_failover_missing <= 0) {
820 /*
821 * nested carl9170_rx_stream call!
822 *
823 * termination is guranteed, even when the
824 * combined frame also have an element with
825 * a bad tag.
826 */
827
828 ar->rx_failover_missing = 0;
829 carl9170_rx_stream(ar, ar->rx_failover->data,
830 ar->rx_failover->len);
831
832 skb_reset_tail_pointer(ar->rx_failover);
833 skb_trim(ar->rx_failover, 0);
834 }
835
836 return;
837 }
838
839 /* check if stream is clipped */
840 if (wlen > tlen - 4) {
841 if (ar->rx_failover_missing) {
842 /* TODO: handle double stream corruption. */
843 if (net_ratelimit()) {
844 wiphy_err(ar->hw->wiphy, "double rx "
845 "stream corruption!\n");
846 goto err_telluser;
847 } else {
848 goto err_silent;
849 }
850 }
851
852 /*
853 * save incomplete data set.
854 * the firmware will resend the missing bits when
855 * the rx - descriptor comes round again.
856 */
857
858 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
859 ar->rx_failover_missing = clen - tlen;
860 return;
861 }
862 __carl9170_rx(ar, rx_stream->payload, clen);
863
864 tbuf += wlen + 4;
865 tlen -= wlen + 4;
866 }
867
868 if (tlen) {
869 if (net_ratelimit()) {
870 wiphy_err(ar->hw->wiphy, "%d bytes of unprocessed "
871 "data left in rx stream!\n", tlen);
872 }
873
874 goto err_telluser;
875 }
876
877 return;
878
879err_telluser:
880 wiphy_err(ar->hw->wiphy, "damaged RX stream data [want:%d, "
881 "data:%d, rx:%d, pending:%d ]\n", clen, wlen, tlen,
882 ar->rx_failover_missing);
883
884 if (ar->rx_failover_missing)
885 print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
886 ar->rx_failover->data,
887 ar->rx_failover->len);
888
889 print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
890 buf, len);
891
892 wiphy_err(ar->hw->wiphy, "please check your hardware and cables, if "
893 "you see this message frequently.\n");
894
895err_silent:
896 if (ar->rx_failover_missing) {
897 skb_reset_tail_pointer(ar->rx_failover);
898 skb_trim(ar->rx_failover, 0);
899 ar->rx_failover_missing = 0;
900 }
901}
902
903void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len)
904{
905 if (ar->fw.rx_stream)
906 carl9170_rx_stream(ar, buf, len);
907 else
908 __carl9170_rx(ar, buf, len);
909}
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
new file mode 100644
index 000000000000..e0d2374e0c77
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -0,0 +1,1373 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * 802.11 xmit & status routines
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/etherdevice.h>
44#include <net/mac80211.h>
45#include "carl9170.h"
46#include "hw.h"
47#include "cmd.h"
48
49static inline unsigned int __carl9170_get_queue(struct ar9170 *ar,
50 unsigned int queue)
51{
52 if (unlikely(modparam_noht)) {
53 return queue;
54 } else {
55 /*
56 * This is just another workaround, until
57 * someone figures out how to get QoS and
58 * AMPDU to play nicely together.
59 */
60
61 return 2; /* AC_BE */
62 }
63}
64
65static inline unsigned int carl9170_get_queue(struct ar9170 *ar,
66 struct sk_buff *skb)
67{
68 return __carl9170_get_queue(ar, skb_get_queue_mapping(skb));
69}
70
71static bool is_mem_full(struct ar9170 *ar)
72{
73 return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) >
74 atomic_read(&ar->mem_free_blocks));
75}
76
77static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
78{
79 int queue, i;
80 bool mem_full;
81
82 atomic_inc(&ar->tx_total_queued);
83
84 queue = skb_get_queue_mapping(skb);
85 spin_lock_bh(&ar->tx_stats_lock);
86
87 /*
88 * The driver has to accept the frame, regardless if the queue is
89 * full to the brim, or not. We have to do the queuing internally,
90 * since mac80211 assumes that a driver which can operate with
91 * aggregated frames does not reject frames for this reason.
92 */
93 ar->tx_stats[queue].len++;
94 ar->tx_stats[queue].count++;
95
96 mem_full = is_mem_full(ar);
97 for (i = 0; i < ar->hw->queues; i++) {
98 if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
99 ieee80211_stop_queue(ar->hw, i);
100 ar->queue_stop_timeout[i] = jiffies;
101 }
102 }
103
104 spin_unlock_bh(&ar->tx_stats_lock);
105}
106
107static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
108{
109 struct ieee80211_tx_info *txinfo;
110 int queue;
111
112 txinfo = IEEE80211_SKB_CB(skb);
113 queue = skb_get_queue_mapping(skb);
114
115 spin_lock_bh(&ar->tx_stats_lock);
116
117 ar->tx_stats[queue].len--;
118
119 if (!is_mem_full(ar)) {
120 unsigned int i;
121 for (i = 0; i < ar->hw->queues; i++) {
122 if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT)
123 continue;
124
125 if (ieee80211_queue_stopped(ar->hw, i)) {
126 unsigned long tmp;
127
128 tmp = jiffies - ar->queue_stop_timeout[i];
129 if (tmp > ar->max_queue_stop_timeout[i])
130 ar->max_queue_stop_timeout[i] = tmp;
131 }
132
133 ieee80211_wake_queue(ar->hw, i);
134 }
135 }
136
137 spin_unlock_bh(&ar->tx_stats_lock);
138 if (atomic_dec_and_test(&ar->tx_total_queued))
139 complete(&ar->tx_flush);
140}
141
142static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
143{
144 struct _carl9170_tx_superframe *super = (void *) skb->data;
145 unsigned int chunks;
146 int cookie = -1;
147
148 atomic_inc(&ar->mem_allocs);
149
150 chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
151 if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
152 atomic_add(chunks, &ar->mem_free_blocks);
153 return -ENOSPC;
154 }
155
156 spin_lock_bh(&ar->mem_lock);
157 cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0);
158 spin_unlock_bh(&ar->mem_lock);
159
160 if (unlikely(cookie < 0)) {
161 atomic_add(chunks, &ar->mem_free_blocks);
162 return -ENOSPC;
163 }
164
165 super = (void *) skb->data;
166
167 /*
168 * Cookie #0 serves two special purposes:
169 * 1. The firmware might use it generate BlockACK frames
170 * in responds of an incoming BlockAckReqs.
171 *
172 * 2. Prevent double-free bugs.
173 */
174 super->s.cookie = (u8) cookie + 1;
175 return 0;
176}
177
178static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
179{
180 struct _carl9170_tx_superframe *super = (void *) skb->data;
181 int cookie;
182
183 /* make a local copy of the cookie */
184 cookie = super->s.cookie;
185 /* invalidate cookie */
186 super->s.cookie = 0;
187
188 /*
189 * Do a out-of-bounds check on the cookie:
190 *
191 * * cookie "0" is reserved and won't be assigned to any
192 * out-going frame. Internally however, it is used to
193 * mark no longer/un-accounted frames and serves as a
194 * cheap way of preventing frames from being freed
195 * twice by _accident_. NB: There is a tiny race...
196 *
197 * * obviously, cookie number is limited by the amount
198 * of available memory blocks, so the number can
199 * never execeed the mem_blocks count.
200 */
201 if (unlikely(WARN_ON_ONCE(cookie == 0) ||
202 WARN_ON_ONCE(cookie > ar->fw.mem_blocks)))
203 return;
204
205 atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
206 &ar->mem_free_blocks);
207
208 spin_lock_bh(&ar->mem_lock);
209 bitmap_release_region(ar->mem_bitmap, cookie - 1, 0);
210 spin_unlock_bh(&ar->mem_lock);
211}
212
213/* Called from any context */
214static void carl9170_tx_release(struct kref *ref)
215{
216 struct ar9170 *ar;
217 struct carl9170_tx_info *arinfo;
218 struct ieee80211_tx_info *txinfo;
219 struct sk_buff *skb;
220
221 arinfo = container_of(ref, struct carl9170_tx_info, ref);
222 txinfo = container_of((void *) arinfo, struct ieee80211_tx_info,
223 rate_driver_data);
224 skb = container_of((void *) txinfo, struct sk_buff, cb);
225
226 ar = arinfo->ar;
227 if (WARN_ON_ONCE(!ar))
228 return;
229
230 BUILD_BUG_ON(
231 offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23);
232
233 memset(&txinfo->status.ampdu_ack_len, 0,
234 sizeof(struct ieee80211_tx_info) -
235 offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
236
237 if (atomic_read(&ar->tx_total_queued))
238 ar->tx_schedule = true;
239
240 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) {
241 if (!atomic_read(&ar->tx_ampdu_upload))
242 ar->tx_ampdu_schedule = true;
243
244 if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
245 txinfo->status.ampdu_len = txinfo->pad[0];
246 txinfo->status.ampdu_ack_len = txinfo->pad[1];
247 txinfo->pad[0] = txinfo->pad[1] = 0;
248 } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) {
249 /*
250 * drop redundant tx_status reports:
251 *
252 * 1. ampdu_ack_len of the final tx_status does
253 * include the feedback of this particular frame.
254 *
255 * 2. tx_status_irqsafe only queues up to 128
256 * tx feedback reports and discards the rest.
257 *
258 * 3. minstrel_ht is picky, it only accepts
259 * reports of frames with the TX_STATUS_AMPDU flag.
260 */
261
262 dev_kfree_skb_any(skb);
263 return;
264 } else {
265 /*
266 * Frame has failed, but we want to keep it in
267 * case it was lost due to a power-state
268 * transition.
269 */
270 }
271 }
272
273 skb_pull(skb, sizeof(struct _carl9170_tx_superframe));
274 ieee80211_tx_status_irqsafe(ar->hw, skb);
275}
276
277void carl9170_tx_get_skb(struct sk_buff *skb)
278{
279 struct carl9170_tx_info *arinfo = (void *)
280 (IEEE80211_SKB_CB(skb))->rate_driver_data;
281 kref_get(&arinfo->ref);
282}
283
284int carl9170_tx_put_skb(struct sk_buff *skb)
285{
286 struct carl9170_tx_info *arinfo = (void *)
287 (IEEE80211_SKB_CB(skb))->rate_driver_data;
288
289 return kref_put(&arinfo->ref, carl9170_tx_release);
290}
291
292/* Caller must hold the tid_info->lock & rcu_read_lock */
293static void carl9170_tx_shift_bm(struct ar9170 *ar,
294 struct carl9170_sta_tid *tid_info, u16 seq)
295{
296 u16 off;
297
298 off = SEQ_DIFF(seq, tid_info->bsn);
299
300 if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
301 return;
302
303 /*
304 * Sanity check. For each MPDU we set the bit in bitmap and
305 * clear it once we received the tx_status.
306 * But if the bit is already cleared then we've been bitten
307 * by a bug.
308 */
309 WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap));
310
311 off = SEQ_DIFF(tid_info->snx, tid_info->bsn);
312 if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
313 return;
314
315 if (!bitmap_empty(tid_info->bitmap, off))
316 off = find_first_bit(tid_info->bitmap, off);
317
318 tid_info->bsn += off;
319 tid_info->bsn &= 0x0fff;
320
321 bitmap_shift_right(tid_info->bitmap, tid_info->bitmap,
322 off, CARL9170_BAW_BITS);
323}
324
325static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
326 struct sk_buff *skb, struct ieee80211_tx_info *txinfo)
327{
328 struct _carl9170_tx_superframe *super = (void *) skb->data;
329 struct ieee80211_hdr *hdr = (void *) super->frame_data;
330 struct ieee80211_tx_info *tx_info;
331 struct carl9170_tx_info *ar_info;
332 struct carl9170_sta_info *sta_info;
333 struct ieee80211_sta *sta;
334 struct carl9170_sta_tid *tid_info;
335 struct ieee80211_vif *vif;
336 unsigned int vif_id;
337 u8 tid;
338
339 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
340 txinfo->flags & IEEE80211_TX_CTL_INJECTED)
341 return;
342
343 tx_info = IEEE80211_SKB_CB(skb);
344 ar_info = (void *) tx_info->rate_driver_data;
345
346 vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
347 CARL9170_TX_SUPER_MISC_VIF_ID_S;
348
349 if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC))
350 return;
351
352 rcu_read_lock();
353 vif = rcu_dereference(ar->vif_priv[vif_id].vif);
354 if (unlikely(!vif))
355 goto out_rcu;
356
357 /*
358 * Normally we should use wrappers like ieee80211_get_DA to get
359 * the correct peer ieee80211_sta.
360 *
361 * But there is a problem with indirect traffic (broadcasts, or
362 * data which is designated for other stations) in station mode.
363 * The frame will be directed to the AP for distribution and not
364 * to the actual destination.
365 */
366 sta = ieee80211_find_sta(vif, hdr->addr1);
367 if (unlikely(!sta))
368 goto out_rcu;
369
370 tid = get_tid_h(hdr);
371
372 sta_info = (void *) sta->drv_priv;
373 tid_info = rcu_dereference(sta_info->agg[tid]);
374 if (!tid_info)
375 goto out_rcu;
376
377 spin_lock_bh(&tid_info->lock);
378 if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE))
379 carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr));
380
381 if (sta_info->stats[tid].clear) {
382 sta_info->stats[tid].clear = false;
383 sta_info->stats[tid].ampdu_len = 0;
384 sta_info->stats[tid].ampdu_ack_len = 0;
385 }
386
387 sta_info->stats[tid].ampdu_len++;
388 if (txinfo->status.rates[0].count == 1)
389 sta_info->stats[tid].ampdu_ack_len++;
390
391 if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
392 txinfo->pad[0] = sta_info->stats[tid].ampdu_len;
393 txinfo->pad[1] = sta_info->stats[tid].ampdu_ack_len;
394 txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
395 sta_info->stats[tid].clear = true;
396 }
397 spin_unlock_bh(&tid_info->lock);
398
399out_rcu:
400 rcu_read_unlock();
401}
402
403void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
404 const bool success)
405{
406 struct ieee80211_tx_info *txinfo;
407
408 carl9170_tx_accounting_free(ar, skb);
409
410 txinfo = IEEE80211_SKB_CB(skb);
411
412 if (success)
413 txinfo->flags |= IEEE80211_TX_STAT_ACK;
414 else
415 ar->tx_ack_failures++;
416
417 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
418 carl9170_tx_status_process_ampdu(ar, skb, txinfo);
419
420 carl9170_tx_put_skb(skb);
421}
422
423/* This function may be called form any context */
424void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
425{
426 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
427
428 atomic_dec(&ar->tx_total_pending);
429
430 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
431 atomic_dec(&ar->tx_ampdu_upload);
432
433 if (carl9170_tx_put_skb(skb))
434 tasklet_hi_schedule(&ar->usb_tasklet);
435}
436
437static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie,
438 struct sk_buff_head *queue)
439{
440 struct sk_buff *skb;
441
442 spin_lock_bh(&queue->lock);
443 skb_queue_walk(queue, skb) {
444 struct _carl9170_tx_superframe *txc = (void *) skb->data;
445
446 if (txc->s.cookie != cookie)
447 continue;
448
449 __skb_unlink(skb, queue);
450 spin_unlock_bh(&queue->lock);
451
452 carl9170_release_dev_space(ar, skb);
453 return skb;
454 }
455 spin_unlock_bh(&queue->lock);
456
457 return NULL;
458}
459
460static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix,
461 unsigned int tries, struct ieee80211_tx_info *txinfo)
462{
463 unsigned int i;
464
465 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
466 if (txinfo->status.rates[i].idx < 0)
467 break;
468
469 if (i == rix) {
470 txinfo->status.rates[i].count = tries;
471 i++;
472 break;
473 }
474 }
475
476 for (; i < IEEE80211_TX_MAX_RATES; i++) {
477 txinfo->status.rates[i].idx = -1;
478 txinfo->status.rates[i].count = 0;
479 }
480}
481
482static void carl9170_check_queue_stop_timeout(struct ar9170 *ar)
483{
484 int i;
485 struct sk_buff *skb;
486 struct ieee80211_tx_info *txinfo;
487 struct carl9170_tx_info *arinfo;
488 bool restart = false;
489
490 for (i = 0; i < ar->hw->queues; i++) {
491 spin_lock_bh(&ar->tx_status[i].lock);
492
493 skb = skb_peek(&ar->tx_status[i]);
494
495 if (!skb)
496 goto next;
497
498 txinfo = IEEE80211_SKB_CB(skb);
499 arinfo = (void *) txinfo->rate_driver_data;
500
501 if (time_is_before_jiffies(arinfo->timeout +
502 msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true)
503 restart = true;
504
505next:
506 spin_unlock_bh(&ar->tx_status[i].lock);
507 }
508
509 if (restart) {
510 /*
511 * At least one queue has been stuck for long enough.
512 * Give the device a kick and hope it gets back to
513 * work.
514 *
515 * possible reasons may include:
516 * - frames got lost/corrupted (bad connection to the device)
517 * - stalled rx processing/usb controller hiccups
518 * - firmware errors/bugs
519 * - every bug you can think of.
520 * - all bugs you can't...
521 * - ...
522 */
523 carl9170_restart(ar, CARL9170_RR_STUCK_TX);
524 }
525}
526
527void carl9170_tx_janitor(struct work_struct *work)
528{
529 struct ar9170 *ar = container_of(work, struct ar9170,
530 tx_janitor.work);
531 if (!IS_STARTED(ar))
532 return;
533
534 ar->tx_janitor_last_run = jiffies;
535
536 carl9170_check_queue_stop_timeout(ar);
537
538 if (!atomic_read(&ar->tx_total_queued))
539 return;
540
541 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
542 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
543}
544
545static void __carl9170_tx_process_status(struct ar9170 *ar,
546 const uint8_t cookie, const uint8_t info)
547{
548 struct sk_buff *skb;
549 struct ieee80211_tx_info *txinfo;
550 struct carl9170_tx_info *arinfo;
551 unsigned int r, t, q;
552 bool success = true;
553
554 q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE];
555
556 skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
557 if (!skb) {
558 /*
559 * We have lost the race to another thread.
560 */
561
562 return ;
563 }
564
565 txinfo = IEEE80211_SKB_CB(skb);
566 arinfo = (void *) txinfo->rate_driver_data;
567
568 if (!(info & CARL9170_TX_STATUS_SUCCESS))
569 success = false;
570
571 r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S;
572 t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S;
573
574 carl9170_tx_fill_rateinfo(ar, r, t, txinfo);
575 carl9170_tx_status(ar, skb, success);
576}
577
578void carl9170_tx_process_status(struct ar9170 *ar,
579 const struct carl9170_rsp *cmd)
580{
581 unsigned int i;
582
583 for (i = 0; i < cmd->hdr.ext; i++) {
584 if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) {
585 print_hex_dump_bytes("UU:", DUMP_PREFIX_NONE,
586 (void *) cmd, cmd->hdr.len + 4);
587 break;
588 }
589
590 __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie,
591 cmd->_tx_status[i].info);
592 }
593}
594
595static __le32 carl9170_tx_physet(struct ar9170 *ar,
596 struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
597{
598 struct ieee80211_rate *rate = NULL;
599 u32 power, chains;
600 __le32 tmp;
601
602 tmp = cpu_to_le32(0);
603
604 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
605 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ <<
606 AR9170_TX_PHY_BW_S);
607 /* this works because 40 MHz is 2 and dup is 3 */
608 if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
609 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP <<
610 AR9170_TX_PHY_BW_S);
611
612 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
613 tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
614
615 if (txrate->flags & IEEE80211_TX_RC_MCS) {
616 u32 r = txrate->idx;
617 u8 *txpower;
618
619 /* heavy clip control */
620 tmp |= cpu_to_le32((r & 0x7) <<
621 AR9170_TX_PHY_TX_HEAVY_CLIP_S);
622
623 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
624 if (info->band == IEEE80211_BAND_5GHZ)
625 txpower = ar->power_5G_ht40;
626 else
627 txpower = ar->power_2G_ht40;
628 } else {
629 if (info->band == IEEE80211_BAND_5GHZ)
630 txpower = ar->power_5G_ht20;
631 else
632 txpower = ar->power_2G_ht20;
633 }
634
635 power = txpower[r & 7];
636
637 /* +1 dBm for HT40 */
638 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
639 power += 2;
640
641 r <<= AR9170_TX_PHY_MCS_S;
642 BUG_ON(r & ~AR9170_TX_PHY_MCS);
643
644 tmp |= cpu_to_le32(r & AR9170_TX_PHY_MCS);
645 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
646
647 /*
648 * green field preamble does not work.
649 *
650 * if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
651 * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
652 */
653 } else {
654 u8 *txpower;
655 u32 mod;
656 u32 phyrate;
657 u8 idx = txrate->idx;
658
659 if (info->band != IEEE80211_BAND_2GHZ) {
660 idx += 4;
661 txpower = ar->power_5G_leg;
662 mod = AR9170_TX_PHY_MOD_OFDM;
663 } else {
664 if (idx < 4) {
665 txpower = ar->power_2G_cck;
666 mod = AR9170_TX_PHY_MOD_CCK;
667 } else {
668 mod = AR9170_TX_PHY_MOD_OFDM;
669 txpower = ar->power_2G_ofdm;
670 }
671 }
672
673 rate = &__carl9170_ratetable[idx];
674
675 phyrate = rate->hw_value & 0xF;
676 power = txpower[(rate->hw_value & 0x30) >> 4];
677 phyrate <<= AR9170_TX_PHY_MCS_S;
678
679 tmp |= cpu_to_le32(mod);
680 tmp |= cpu_to_le32(phyrate);
681
682 /*
683 * short preamble seems to be broken too.
684 *
685 * if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
686 * tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
687 */
688 }
689 power <<= AR9170_TX_PHY_TX_PWR_S;
690 power &= AR9170_TX_PHY_TX_PWR;
691 tmp |= cpu_to_le32(power);
692
693 /* set TX chains */
694 if (ar->eeprom.tx_mask == 1) {
695 chains = AR9170_TX_PHY_TXCHAIN_1;
696 } else {
697 chains = AR9170_TX_PHY_TXCHAIN_2;
698
699 /* >= 36M legacy OFDM - use only one chain */
700 if (rate && rate->bitrate >= 360 &&
701 !(txrate->flags & IEEE80211_TX_RC_MCS))
702 chains = AR9170_TX_PHY_TXCHAIN_1;
703 }
704 tmp |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_S);
705
706 return tmp;
707}
708
709static bool carl9170_tx_rts_check(struct ar9170 *ar,
710 struct ieee80211_tx_rate *rate,
711 bool ampdu, bool multi)
712{
713 switch (ar->erp_mode) {
714 case CARL9170_ERP_AUTO:
715 if (ampdu)
716 break;
717
718 case CARL9170_ERP_MAC80211:
719 if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
720 break;
721
722 case CARL9170_ERP_RTS:
723 if (likely(!multi))
724 return true;
725
726 default:
727 break;
728 }
729
730 return false;
731}
732
733static bool carl9170_tx_cts_check(struct ar9170 *ar,
734 struct ieee80211_tx_rate *rate)
735{
736 switch (ar->erp_mode) {
737 case CARL9170_ERP_AUTO:
738 case CARL9170_ERP_MAC80211:
739 if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
740 break;
741
742 case CARL9170_ERP_CTS:
743 return true;
744
745 default:
746 break;
747 }
748
749 return false;
750}
751
752static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
753{
754 struct ieee80211_hdr *hdr;
755 struct _carl9170_tx_superframe *txc;
756 struct carl9170_vif_info *cvif;
757 struct ieee80211_tx_info *info;
758 struct ieee80211_tx_rate *txrate;
759 struct ieee80211_sta *sta;
760 struct carl9170_tx_info *arinfo;
761 unsigned int hw_queue;
762 int i;
763 u16 keytype = 0;
764 u16 len, icv = 0;
765 bool ampdu, no_ack;
766
767 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
768 BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
769 CARL9170_TX_SUPERDESC_LEN);
770
771 BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
772 AR9170_TX_HWDESC_LEN);
773
774 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
775
776 hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)];
777
778 hdr = (void *)skb->data;
779 info = IEEE80211_SKB_CB(skb);
780 len = skb->len;
781
782 /*
783 * Note: If the frame was sent through a monitor interface,
784 * the ieee80211_vif pointer can be NULL.
785 */
786 if (likely(info->control.vif))
787 cvif = (void *) info->control.vif->drv_priv;
788 else
789 cvif = NULL;
790
791 sta = info->control.sta;
792
793 txc = (void *)skb_push(skb, sizeof(*txc));
794 memset(txc, 0, sizeof(*txc));
795
796 ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
797 no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
798
799 if (info->control.hw_key) {
800 icv = info->control.hw_key->icv_len;
801
802 switch (info->control.hw_key->cipher) {
803 case WLAN_CIPHER_SUITE_WEP40:
804 case WLAN_CIPHER_SUITE_WEP104:
805 case WLAN_CIPHER_SUITE_TKIP:
806 keytype = AR9170_TX_MAC_ENCR_RC4;
807 break;
808 case WLAN_CIPHER_SUITE_CCMP:
809 keytype = AR9170_TX_MAC_ENCR_AES;
810 break;
811 default:
812 WARN_ON(1);
813 goto err_out;
814 }
815 }
816
817 BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
818 ((CARL9170_TX_SUPER_MISC_VIF_ID >>
819 CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
820
821 txc->s.len = cpu_to_le16(len + sizeof(*txc));
822 txc->f.length = cpu_to_le16(len + icv + 4);
823 SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc,
824 cvif ? cvif->id : 0);
825
826 txc->f.mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
827 AR9170_TX_MAC_BACKOFF);
828
829 SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue);
830
831 txc->f.mac_control |= cpu_to_le16(hw_queue << AR9170_TX_MAC_QOS_S);
832 txc->f.mac_control |= cpu_to_le16(keytype);
833 txc->f.phy_control = cpu_to_le32(0);
834
835 if (no_ack)
836 txc->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
837
838 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
839 txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
840
841 txrate = &info->control.rates[0];
842 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
843 txc->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
844 else if (carl9170_tx_cts_check(ar, txrate))
845 txc->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
846
847 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count);
848 txc->f.phy_control |= carl9170_tx_physet(ar, info, txrate);
849
850 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
851 for (i = 1; i < CARL9170_TX_MAX_RATES; i++) {
852 txrate = &info->control.rates[i];
853 if (txrate->idx >= 0)
854 continue;
855
856 txrate->idx = 0;
857 txrate->count = ar->hw->max_rate_tries;
858 }
859 }
860
861 /*
862 * NOTE: For the first rate, the ERP & AMPDU flags are directly
863 * taken from mac_control. For all fallback rate, the firmware
864 * updates the mac_control flags from the rate info field.
865 */
866 for (i = 1; i < CARL9170_TX_MAX_RATES; i++) {
867 txrate = &info->control.rates[i];
868 if (txrate->idx < 0)
869 break;
870
871 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
872 txrate->count);
873
874 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
875 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
876 CARL9170_TX_SUPER_RI_ERP_PROT_S);
877 else if (carl9170_tx_cts_check(ar, txrate))
878 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
879 CARL9170_TX_SUPER_RI_ERP_PROT_S);
880
881 /*
882 * unaggregated fallback, in case aggregation
883 * proves to be unsuccessful and unreliable.
884 */
885 if (ampdu && i < 3)
886 txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
887
888 txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate);
889 }
890
891 if (ieee80211_is_probe_resp(hdr->frame_control))
892 txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
893
894 if (ampdu) {
895 unsigned int density, factor;
896
897 if (unlikely(!sta || !cvif))
898 goto err_out;
899
900 density = info->control.sta->ht_cap.ampdu_density;
901 factor = info->control.sta->ht_cap.ampdu_factor;
902
903 if (density) {
904 /*
905 * Watch out!
906 *
907 * Otus uses slightly different density values than
908 * those from the 802.11n spec.
909 */
910
911 density = max_t(unsigned int, density + 1, 7u);
912 }
913
914 factor = min_t(unsigned int, 1u, factor);
915
916 SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY,
917 txc->s.ampdu_settings, density);
918
919 SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
920 txc->s.ampdu_settings, factor);
921
922 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) {
923 txc->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
924 } else {
925 /*
926 * Not sure if it's even possible to aggregate
927 * non-ht rates with this HW.
928 */
929 WARN_ON_ONCE(1);
930 }
931 }
932
933 arinfo = (void *)info->rate_driver_data;
934 arinfo->timeout = jiffies;
935 arinfo->ar = ar;
936 kref_init(&arinfo->ref);
937 return 0;
938
939err_out:
940 skb_pull(skb, sizeof(*txc));
941 return -EINVAL;
942}
943
944static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb)
945{
946 struct _carl9170_tx_superframe *super;
947
948 super = (void *) skb->data;
949 super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA);
950}
951
952static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
953{
954 struct _carl9170_tx_superframe *super;
955 int tmp;
956
957 super = (void *) skb->data;
958
959 tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) <<
960 CARL9170_TX_SUPER_AMPDU_DENSITY_S;
961
962 /*
963 * If you haven't noticed carl9170_tx_prepare has already filled
964 * in all ampdu spacing & factor parameters.
965 * Now it's the time to check whenever the settings have to be
966 * updated by the firmware, or if everything is still the same.
967 *
968 * There's no sane way to handle different density values with
969 * this hardware, so we may as well just do the compare in the
970 * driver.
971 */
972
973 if (tmp != ar->current_density) {
974 ar->current_density = tmp;
975 super->s.ampdu_settings |=
976 CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY;
977 }
978
979 tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) <<
980 CARL9170_TX_SUPER_AMPDU_FACTOR_S;
981
982 if (tmp != ar->current_factor) {
983 ar->current_factor = tmp;
984 super->s.ampdu_settings |=
985 CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR;
986 }
987}
988
989static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest,
990 struct sk_buff *_src)
991{
992 struct _carl9170_tx_superframe *dest, *src;
993
994 dest = (void *) _dest->data;
995 src = (void *) _src->data;
996
997 /*
998 * The mac80211 rate control algorithm expects that all MPDUs in
999 * an AMPDU share the same tx vectors.
1000 * This is not really obvious right now, because the hardware
1001 * does the AMPDU setup according to its own rulebook.
1002 * Our nicely assembled, strictly monotonic increasing mpdu
1003 * chains will be broken up, mashed back together...
1004 */
1005
1006 return (dest->f.phy_control == src->f.phy_control);
1007}
1008
1009static void carl9170_tx_ampdu(struct ar9170 *ar)
1010{
1011 struct sk_buff_head agg;
1012 struct carl9170_sta_tid *tid_info;
1013 struct sk_buff *skb, *first;
1014 unsigned int i = 0, done_ampdus = 0;
1015 u16 seq, queue, tmpssn;
1016
1017 atomic_inc(&ar->tx_ampdu_scheduler);
1018 ar->tx_ampdu_schedule = false;
1019
1020 if (atomic_read(&ar->tx_ampdu_upload))
1021 return;
1022
1023 if (!ar->tx_ampdu_list_len)
1024 return;
1025
1026 __skb_queue_head_init(&agg);
1027
1028 rcu_read_lock();
1029 tid_info = rcu_dereference(ar->tx_ampdu_iter);
1030 if (WARN_ON_ONCE(!tid_info)) {
1031 rcu_read_unlock();
1032 return;
1033 }
1034
1035retry:
1036 list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) {
1037 i++;
1038
1039 if (tid_info->state < CARL9170_TID_STATE_PROGRESS)
1040 continue;
1041
1042 queue = TID_TO_WME_AC(tid_info->tid);
1043
1044 spin_lock_bh(&tid_info->lock);
1045 if (tid_info->state != CARL9170_TID_STATE_XMIT) {
1046 first = skb_peek(&tid_info->queue);
1047 if (first) {
1048 struct ieee80211_tx_info *txinfo;
1049 struct carl9170_tx_info *arinfo;
1050
1051 txinfo = IEEE80211_SKB_CB(first);
1052 arinfo = (void *) txinfo->rate_driver_data;
1053
1054 if (time_is_after_jiffies(arinfo->timeout +
1055 msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT))
1056 == true)
1057 goto processed;
1058
1059 /*
1060 * We've been waiting for the frame which
1061 * matches "snx" (start sequence of the
1062 * next aggregate) for some time now.
1063 *
1064 * But it never arrived. Therefore
1065 * jump to the next available frame
1066 * and kick-start the transmission.
1067 *
1068 * Note: This might induce odd latency
1069 * spikes because the receiver will be
1070 * waiting for the lost frame too.
1071 */
1072 ar->tx_ampdu_timeout++;
1073
1074 tid_info->snx = carl9170_get_seq(first);
1075 tid_info->state = CARL9170_TID_STATE_XMIT;
1076 } else {
1077 goto processed;
1078 }
1079 }
1080
1081 tid_info->counter++;
1082 first = skb_peek(&tid_info->queue);
1083 tmpssn = carl9170_get_seq(first);
1084 seq = tid_info->snx;
1085
1086 if (unlikely(tmpssn != seq)) {
1087 tid_info->state = CARL9170_TID_STATE_IDLE;
1088
1089 goto processed;
1090 }
1091
1092 while ((skb = skb_peek(&tid_info->queue))) {
1093 /* strict 0, 1, ..., n - 1, n frame sequence order */
1094 if (unlikely(carl9170_get_seq(skb) != seq))
1095 break;
1096
1097 /* don't upload more than AMPDU FACTOR allows. */
1098 if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >=
1099 (tid_info->max - 1)))
1100 break;
1101
1102 if (!carl9170_tx_rate_check(ar, skb, first))
1103 break;
1104
1105 atomic_inc(&ar->tx_ampdu_upload);
1106 tid_info->snx = seq = SEQ_NEXT(seq);
1107 __skb_unlink(skb, &tid_info->queue);
1108
1109 __skb_queue_tail(&agg, skb);
1110
1111 if (skb_queue_len(&agg) >= CARL9170_NUM_TX_AGG_MAX)
1112 break;
1113 }
1114
1115 if (skb_queue_empty(&tid_info->queue) ||
1116 carl9170_get_seq(skb_peek(&tid_info->queue)) !=
1117 tid_info->snx) {
1118 /*
1119 * stop TID, if A-MPDU frames are still missing,
1120 * or whenever the queue is empty.
1121 */
1122
1123 tid_info->state = CARL9170_TID_STATE_IDLE;
1124 }
1125 done_ampdus++;
1126
1127processed:
1128 spin_unlock_bh(&tid_info->lock);
1129
1130 if (skb_queue_empty(&agg))
1131 continue;
1132
1133 /* apply ampdu spacing & factor settings */
1134 carl9170_set_ampdu_params(ar, skb_peek(&agg));
1135
1136 /* set aggregation push bit */
1137 carl9170_set_immba(ar, skb_peek_tail(&agg));
1138
1139 spin_lock_bh(&ar->tx_pending[queue].lock);
1140 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1141 spin_unlock_bh(&ar->tx_pending[queue].lock);
1142 ar->tx_schedule = true;
1143 }
1144 if ((done_ampdus++ == 0) && (i++ == 0))
1145 goto retry;
1146
1147 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
1148 rcu_read_unlock();
1149}
1150
1151static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar,
1152 struct sk_buff_head *queue)
1153{
1154 struct sk_buff *skb;
1155 struct ieee80211_tx_info *info;
1156 struct carl9170_tx_info *arinfo;
1157
1158 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1159
1160 spin_lock_bh(&queue->lock);
1161 skb = skb_peek(queue);
1162 if (unlikely(!skb))
1163 goto err_unlock;
1164
1165 if (carl9170_alloc_dev_space(ar, skb))
1166 goto err_unlock;
1167
1168 __skb_unlink(skb, queue);
1169 spin_unlock_bh(&queue->lock);
1170
1171 info = IEEE80211_SKB_CB(skb);
1172 arinfo = (void *) info->rate_driver_data;
1173
1174 arinfo->timeout = jiffies;
1175
1176 /*
1177 * increase ref count to "2".
1178 * Ref counting is the easiest way to solve the race between
1179 * the the urb's completion routine: carl9170_tx_callback and
1180 * wlan tx status functions: carl9170_tx_status/janitor.
1181 */
1182 carl9170_tx_get_skb(skb);
1183
1184 return skb;
1185
1186err_unlock:
1187 spin_unlock_bh(&queue->lock);
1188 return NULL;
1189}
1190
1191void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
1192{
1193 struct _carl9170_tx_superframe *super;
1194 uint8_t q = 0;
1195
1196 ar->tx_dropped++;
1197
1198 super = (void *)skb->data;
1199 SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
1200 ar9170_qmap[carl9170_get_queue(ar, skb)]);
1201 __carl9170_tx_process_status(ar, super->s.cookie, q);
1202}
1203
1204static void carl9170_tx(struct ar9170 *ar)
1205{
1206 struct sk_buff *skb;
1207 unsigned int i, q;
1208 bool schedule_garbagecollector = false;
1209
1210 ar->tx_schedule = false;
1211
1212 if (unlikely(!IS_STARTED(ar)))
1213 return;
1214
1215 carl9170_usb_handle_tx_err(ar);
1216
1217 for (i = 0; i < ar->hw->queues; i++) {
1218 while (!skb_queue_empty(&ar->tx_pending[i])) {
1219 skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]);
1220 if (unlikely(!skb))
1221 break;
1222
1223 atomic_inc(&ar->tx_total_pending);
1224
1225 q = __carl9170_get_queue(ar, i);
1226 /*
1227 * NB: tx_status[i] vs. tx_status[q],
1228 * TODO: Move into pick_skb or alloc_dev_space.
1229 */
1230 skb_queue_tail(&ar->tx_status[q], skb);
1231
1232 carl9170_usb_tx(ar, skb);
1233 schedule_garbagecollector = true;
1234 }
1235 }
1236
1237 if (!schedule_garbagecollector)
1238 return;
1239
1240 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
1241 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
1242}
1243
1244static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1245 struct ieee80211_sta *sta, struct sk_buff *skb)
1246{
1247 struct carl9170_sta_info *sta_info;
1248 struct carl9170_sta_tid *agg;
1249 struct sk_buff *iter;
1250 unsigned int max;
1251 u16 tid, seq, qseq, off;
1252 bool run = false;
1253
1254 tid = carl9170_get_tid(skb);
1255 seq = carl9170_get_seq(skb);
1256 sta_info = (void *) sta->drv_priv;
1257
1258 rcu_read_lock();
1259 agg = rcu_dereference(sta_info->agg[tid]);
1260 max = sta_info->ampdu_max_len;
1261
1262 if (!agg)
1263 goto err_unlock_rcu;
1264
1265 spin_lock_bh(&agg->lock);
1266 if (unlikely(agg->state < CARL9170_TID_STATE_IDLE))
1267 goto err_unlock;
1268
1269 /* check if sequence is within the BA window */
1270 if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq)))
1271 goto err_unlock;
1272
1273 if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq)))
1274 goto err_unlock;
1275
1276 off = SEQ_DIFF(seq, agg->bsn);
1277 if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap)))
1278 goto err_unlock;
1279
1280 if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) {
1281 __skb_queue_tail(&agg->queue, skb);
1282 agg->hsn = seq;
1283 goto queued;
1284 }
1285
1286 skb_queue_reverse_walk(&agg->queue, iter) {
1287 qseq = carl9170_get_seq(iter);
1288
1289 if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) {
1290 __skb_queue_after(&agg->queue, iter, skb);
1291 goto queued;
1292 }
1293 }
1294
1295 __skb_queue_head(&agg->queue, skb);
1296queued:
1297
1298 if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) {
1299 if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) {
1300 agg->state = CARL9170_TID_STATE_XMIT;
1301 run = true;
1302 }
1303 }
1304
1305 spin_unlock_bh(&agg->lock);
1306 rcu_read_unlock();
1307
1308 return run;
1309
1310err_unlock:
1311 spin_unlock_bh(&agg->lock);
1312
1313err_unlock_rcu:
1314 rcu_read_unlock();
1315 carl9170_tx_status(ar, skb, false);
1316 ar->tx_dropped++;
1317 return false;
1318}
1319
1320int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1321{
1322 struct ar9170 *ar = hw->priv;
1323 struct ieee80211_tx_info *info;
1324 struct ieee80211_sta *sta;
1325 bool run;
1326
1327 if (unlikely(!IS_STARTED(ar)))
1328 goto err_free;
1329
1330 info = IEEE80211_SKB_CB(skb);
1331 sta = info->control.sta;
1332
1333 if (unlikely(carl9170_tx_prepare(ar, skb)))
1334 goto err_free;
1335
1336 carl9170_tx_accounting(ar, skb);
1337 /*
1338 * from now on, one has to use carl9170_tx_status to free
1339 * all ressouces which are associated with the frame.
1340 */
1341
1342 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1343 if (WARN_ON_ONCE(!sta))
1344 goto err_free;
1345
1346 run = carl9170_tx_ampdu_queue(ar, sta, skb);
1347 if (run)
1348 carl9170_tx_ampdu(ar);
1349
1350 } else {
1351 unsigned int queue = skb_get_queue_mapping(skb);
1352
1353 skb_queue_tail(&ar->tx_pending[queue], skb);
1354 }
1355
1356 carl9170_tx(ar);
1357 return NETDEV_TX_OK;
1358
1359err_free:
1360 ar->tx_dropped++;
1361 dev_kfree_skb_any(skb);
1362 return NETDEV_TX_OK;
1363}
1364
1365void carl9170_tx_scheduler(struct ar9170 *ar)
1366{
1367
1368 if (ar->tx_ampdu_schedule)
1369 carl9170_tx_ampdu(ar);
1370
1371 if (ar->tx_schedule)
1372 carl9170_tx(ar);
1373}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
new file mode 100644
index 000000000000..eb789a9e4f15
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -0,0 +1,1138 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * USB - frontend
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/module.h>
41#include <linux/slab.h>
42#include <linux/usb.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/device.h>
46#include <net/mac80211.h>
47#include "carl9170.h"
48#include "cmd.h"
49#include "hw.h"
50#include "fwcmd.h"
51
52MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
53MODULE_AUTHOR("Christian Lamparter <chunkeey@googlemail.com>");
54MODULE_LICENSE("GPL");
55MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
56MODULE_FIRMWARE(CARL9170FW_NAME);
57MODULE_ALIAS("ar9170usb");
58MODULE_ALIAS("arusb_lnx");
59
60/*
61 * Note:
62 *
63 * Always update our wiki's device list (located at:
64 * http://wireless.kernel.org/en/users/Drivers/ar9170/devices ),
65 * whenever you add a new device.
66 */
67static struct usb_device_id carl9170_usb_ids[] = {
68 /* Atheros 9170 */
69 { USB_DEVICE(0x0cf3, 0x9170) },
70 /* Atheros TG121N */
71 { USB_DEVICE(0x0cf3, 0x1001) },
72 /* TP-Link TL-WN821N v2 */
73 { USB_DEVICE(0x0cf3, 0x1002), .driver_info = CARL9170_WPS_BUTTON |
74 CARL9170_ONE_LED },
75 /* 3Com Dual Band 802.11n USB Adapter */
76 { USB_DEVICE(0x0cf3, 0x1010) },
77 /* H3C Dual Band 802.11n USB Adapter */
78 { USB_DEVICE(0x0cf3, 0x1011) },
79 /* Cace Airpcap NX */
80 { USB_DEVICE(0xcace, 0x0300) },
81 /* D-Link DWA 160 A1 */
82 { USB_DEVICE(0x07d1, 0x3c10) },
83 /* D-Link DWA 160 A2 */
84 { USB_DEVICE(0x07d1, 0x3a09) },
85 /* Netgear WNA1000 */
86 { USB_DEVICE(0x0846, 0x9040) },
87 /* Netgear WNDA3100 */
88 { USB_DEVICE(0x0846, 0x9010) },
89 /* Netgear WN111 v2 */
90 { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED },
91 /* Zydas ZD1221 */
92 { USB_DEVICE(0x0ace, 0x1221) },
93 /* Proxim ORiNOCO 802.11n USB */
94 { USB_DEVICE(0x1435, 0x0804) },
95 /* WNC Generic 11n USB Dongle */
96 { USB_DEVICE(0x1435, 0x0326) },
97 /* ZyXEL NWD271N */
98 { USB_DEVICE(0x0586, 0x3417) },
99 /* Z-Com UB81 BG */
100 { USB_DEVICE(0x0cde, 0x0023) },
101 /* Z-Com UB82 ABG */
102 { USB_DEVICE(0x0cde, 0x0026) },
103 /* Sphairon Homelink 1202 */
104 { USB_DEVICE(0x0cde, 0x0027) },
105 /* Arcadyan WN7512 */
106 { USB_DEVICE(0x083a, 0xf522) },
107 /* Planex GWUS300 */
108 { USB_DEVICE(0x2019, 0x5304) },
109 /* IO-Data WNGDNUS2 */
110 { USB_DEVICE(0x04bb, 0x093f) },
111 /* NEC WL300NU-G */
112 { USB_DEVICE(0x0409, 0x0249) },
113 /* AVM FRITZ!WLAN USB Stick N */
114 { USB_DEVICE(0x057c, 0x8401) },
115 /* AVM FRITZ!WLAN USB Stick N 2.4 */
116 { USB_DEVICE(0x057c, 0x8402) },
117 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
118 { USB_DEVICE(0x1668, 0x1200) },
119
120 /* terminate */
121 {}
122};
123MODULE_DEVICE_TABLE(usb, carl9170_usb_ids);
124
125static void carl9170_usb_submit_data_urb(struct ar9170 *ar)
126{
127 struct urb *urb;
128 int err;
129
130 if (atomic_inc_return(&ar->tx_anch_urbs) > AR9170_NUM_TX_URBS)
131 goto err_acc;
132
133 urb = usb_get_from_anchor(&ar->tx_wait);
134 if (!urb)
135 goto err_acc;
136
137 usb_anchor_urb(urb, &ar->tx_anch);
138
139 err = usb_submit_urb(urb, GFP_ATOMIC);
140 if (unlikely(err)) {
141 if (net_ratelimit()) {
142 dev_err(&ar->udev->dev, "tx submit failed (%d)\n",
143 urb->status);
144 }
145
146 usb_unanchor_urb(urb);
147 usb_anchor_urb(urb, &ar->tx_err);
148 }
149
150 usb_free_urb(urb);
151
152 if (likely(err == 0))
153 return;
154
155err_acc:
156 atomic_dec(&ar->tx_anch_urbs);
157}
158
159static void carl9170_usb_tx_data_complete(struct urb *urb)
160{
161 struct ar9170 *ar = (struct ar9170 *)
162 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
163
164 if (WARN_ON_ONCE(!ar)) {
165 dev_kfree_skb_irq(urb->context);
166 return;
167 }
168
169 atomic_dec(&ar->tx_anch_urbs);
170
171 switch (urb->status) {
172 /* everything is fine */
173 case 0:
174 carl9170_tx_callback(ar, (void *)urb->context);
175 break;
176
177 /* disconnect */
178 case -ENOENT:
179 case -ECONNRESET:
180 case -ENODEV:
181 case -ESHUTDOWN:
182 /*
183 * Defer the frame clean-up to the tasklet worker.
184 * This is necessary, because carl9170_tx_drop
185 * does not work in an irqsave context.
186 */
187 usb_anchor_urb(urb, &ar->tx_err);
188 return;
189
190 /* a random transmission error has occurred? */
191 default:
192 if (net_ratelimit()) {
193 dev_err(&ar->udev->dev, "tx failed (%d)\n",
194 urb->status);
195 }
196
197 usb_anchor_urb(urb, &ar->tx_err);
198 break;
199 }
200
201 if (likely(IS_STARTED(ar)))
202 carl9170_usb_submit_data_urb(ar);
203}
204
205static int carl9170_usb_submit_cmd_urb(struct ar9170 *ar)
206{
207 struct urb *urb;
208 int err;
209
210 if (atomic_inc_return(&ar->tx_cmd_urbs) != 1) {
211 atomic_dec(&ar->tx_cmd_urbs);
212 return 0;
213 }
214
215 urb = usb_get_from_anchor(&ar->tx_cmd);
216 if (!urb) {
217 atomic_dec(&ar->tx_cmd_urbs);
218 return 0;
219 }
220
221 usb_anchor_urb(urb, &ar->tx_anch);
222 err = usb_submit_urb(urb, GFP_ATOMIC);
223 if (unlikely(err)) {
224 usb_unanchor_urb(urb);
225 atomic_dec(&ar->tx_cmd_urbs);
226 }
227 usb_free_urb(urb);
228
229 return err;
230}
231
232static void carl9170_usb_cmd_complete(struct urb *urb)
233{
234 struct ar9170 *ar = urb->context;
235 int err = 0;
236
237 if (WARN_ON_ONCE(!ar))
238 return;
239
240 atomic_dec(&ar->tx_cmd_urbs);
241
242 switch (urb->status) {
243 /* everything is fine */
244 case 0:
245 break;
246
247 /* disconnect */
248 case -ENOENT:
249 case -ECONNRESET:
250 case -ENODEV:
251 case -ESHUTDOWN:
252 return;
253
254 default:
255 err = urb->status;
256 break;
257 }
258
259 if (!IS_INITIALIZED(ar))
260 return;
261
262 if (err)
263 dev_err(&ar->udev->dev, "submit cmd cb failed (%d).\n", err);
264
265 err = carl9170_usb_submit_cmd_urb(ar);
266 if (err)
267 dev_err(&ar->udev->dev, "submit cmd failed (%d).\n", err);
268}
269
270static void carl9170_usb_rx_irq_complete(struct urb *urb)
271{
272 struct ar9170 *ar = urb->context;
273
274 if (WARN_ON_ONCE(!ar))
275 return;
276
277 switch (urb->status) {
278 /* everything is fine */
279 case 0:
280 break;
281
282 /* disconnect */
283 case -ENOENT:
284 case -ECONNRESET:
285 case -ENODEV:
286 case -ESHUTDOWN:
287 return;
288
289 default:
290 goto resubmit;
291 }
292
293 carl9170_handle_command_response(ar, urb->transfer_buffer,
294 urb->actual_length);
295
296resubmit:
297 usb_anchor_urb(urb, &ar->rx_anch);
298 if (unlikely(usb_submit_urb(urb, GFP_ATOMIC)))
299 usb_unanchor_urb(urb);
300}
301
302static int carl9170_usb_submit_rx_urb(struct ar9170 *ar, gfp_t gfp)
303{
304 struct urb *urb;
305 int err = 0, runs = 0;
306
307 while ((atomic_read(&ar->rx_anch_urbs) < AR9170_NUM_RX_URBS) &&
308 (runs++ < AR9170_NUM_RX_URBS)) {
309 err = -ENOSPC;
310 urb = usb_get_from_anchor(&ar->rx_pool);
311 if (urb) {
312 usb_anchor_urb(urb, &ar->rx_anch);
313 err = usb_submit_urb(urb, gfp);
314 if (unlikely(err)) {
315 usb_unanchor_urb(urb);
316 usb_anchor_urb(urb, &ar->rx_pool);
317 } else {
318 atomic_dec(&ar->rx_pool_urbs);
319 atomic_inc(&ar->rx_anch_urbs);
320 }
321 usb_free_urb(urb);
322 }
323 }
324
325 return err;
326}
327
328static void carl9170_usb_rx_work(struct ar9170 *ar)
329{
330 struct urb *urb;
331 int i;
332
333 for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) {
334 urb = usb_get_from_anchor(&ar->rx_work);
335 if (!urb)
336 break;
337
338 atomic_dec(&ar->rx_work_urbs);
339 if (IS_INITIALIZED(ar)) {
340 carl9170_rx(ar, urb->transfer_buffer,
341 urb->actual_length);
342 }
343
344 usb_anchor_urb(urb, &ar->rx_pool);
345 atomic_inc(&ar->rx_pool_urbs);
346
347 usb_free_urb(urb);
348
349 carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC);
350 }
351}
352
353void carl9170_usb_handle_tx_err(struct ar9170 *ar)
354{
355 struct urb *urb;
356
357 while ((urb = usb_get_from_anchor(&ar->tx_err))) {
358 struct sk_buff *skb = (void *)urb->context;
359
360 carl9170_tx_drop(ar, skb);
361 carl9170_tx_callback(ar, skb);
362 usb_free_urb(urb);
363 }
364}
365
366static void carl9170_usb_tasklet(unsigned long data)
367{
368 struct ar9170 *ar = (struct ar9170 *) data;
369
370 if (!IS_INITIALIZED(ar))
371 return;
372
373 carl9170_usb_rx_work(ar);
374
375 /*
376 * Strictly speaking: The tx scheduler is not part of the USB system.
377 * But the rx worker returns frames back to the mac80211-stack and
378 * this is the _perfect_ place to generate the next transmissions.
379 */
380 if (IS_STARTED(ar))
381 carl9170_tx_scheduler(ar);
382}
383
384static void carl9170_usb_rx_complete(struct urb *urb)
385{
386 struct ar9170 *ar = (struct ar9170 *)urb->context;
387 int err;
388
389 if (WARN_ON_ONCE(!ar))
390 return;
391
392 atomic_dec(&ar->rx_anch_urbs);
393
394 switch (urb->status) {
395 case 0:
396 /* rx path */
397 usb_anchor_urb(urb, &ar->rx_work);
398 atomic_inc(&ar->rx_work_urbs);
399 break;
400
401 case -ENOENT:
402 case -ECONNRESET:
403 case -ENODEV:
404 case -ESHUTDOWN:
405 /* handle disconnect events*/
406 return;
407
408 default:
409 /* handle all other errors */
410 usb_anchor_urb(urb, &ar->rx_pool);
411 atomic_inc(&ar->rx_pool_urbs);
412 break;
413 }
414
415 err = carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC);
416 if (unlikely(err)) {
417 /*
418 * usb_submit_rx_urb reported a problem.
419 * In case this is due to a rx buffer shortage,
420 * elevate the tasklet worker priority to
421 * the highest available level.
422 */
423 tasklet_hi_schedule(&ar->usb_tasklet);
424
425 if (atomic_read(&ar->rx_anch_urbs) == 0) {
426 /*
427 * The system is too slow to cope with
428 * the enormous workload. We have simply
429 * run out of active rx urbs and this
430 * unfortunatly leads to an unpredictable
431 * device.
432 */
433
434 carl9170_restart(ar, CARL9170_RR_SLOW_SYSTEM);
435 }
436 } else {
437 /*
438 * Using anything less than _high_ priority absolutely
439 * kills the rx performance my UP-System...
440 */
441 tasklet_hi_schedule(&ar->usb_tasklet);
442 }
443}
444
445static struct urb *carl9170_usb_alloc_rx_urb(struct ar9170 *ar, gfp_t gfp)
446{
447 struct urb *urb;
448 void *buf;
449
450 buf = kmalloc(ar->fw.rx_size, gfp);
451 if (!buf)
452 return NULL;
453
454 urb = usb_alloc_urb(0, gfp);
455 if (!urb) {
456 kfree(buf);
457 return NULL;
458 }
459
460 usb_fill_bulk_urb(urb, ar->udev, usb_rcvbulkpipe(ar->udev,
461 AR9170_USB_EP_RX), buf, ar->fw.rx_size,
462 carl9170_usb_rx_complete, ar);
463
464 urb->transfer_flags |= URB_FREE_BUFFER;
465
466 return urb;
467}
468
469static int carl9170_usb_send_rx_irq_urb(struct ar9170 *ar)
470{
471 struct urb *urb = NULL;
472 void *ibuf;
473 int err = -ENOMEM;
474
475 urb = usb_alloc_urb(0, GFP_KERNEL);
476 if (!urb)
477 goto out;
478
479 ibuf = kmalloc(AR9170_USB_EP_CTRL_MAX, GFP_KERNEL);
480 if (!ibuf)
481 goto out;
482
483 usb_fill_int_urb(urb, ar->udev, usb_rcvintpipe(ar->udev,
484 AR9170_USB_EP_IRQ), ibuf, AR9170_USB_EP_CTRL_MAX,
485 carl9170_usb_rx_irq_complete, ar, 1);
486
487 urb->transfer_flags |= URB_FREE_BUFFER;
488
489 usb_anchor_urb(urb, &ar->rx_anch);
490 err = usb_submit_urb(urb, GFP_KERNEL);
491 if (err)
492 usb_unanchor_urb(urb);
493
494out:
495 usb_free_urb(urb);
496 return err;
497}
498
499static int carl9170_usb_init_rx_bulk_urbs(struct ar9170 *ar)
500{
501 struct urb *urb;
502 int i, err = -EINVAL;
503
504 /*
505 * The driver actively maintains a second shadow
506 * pool for inactive, but fully-prepared rx urbs.
507 *
508 * The pool should help the driver to master huge
509 * workload spikes without running the risk of
510 * undersupplying the hardware or wasting time by
511 * processing rx data (streams) inside the urb
512 * completion (hardirq context).
513 */
514 for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) {
515 urb = carl9170_usb_alloc_rx_urb(ar, GFP_KERNEL);
516 if (!urb) {
517 err = -ENOMEM;
518 goto err_out;
519 }
520
521 usb_anchor_urb(urb, &ar->rx_pool);
522 atomic_inc(&ar->rx_pool_urbs);
523 usb_free_urb(urb);
524 }
525
526 err = carl9170_usb_submit_rx_urb(ar, GFP_KERNEL);
527 if (err)
528 goto err_out;
529
530 /* the device now waiting for the firmware. */
531 carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE);
532 return 0;
533
534err_out:
535
536 usb_scuttle_anchored_urbs(&ar->rx_pool);
537 usb_scuttle_anchored_urbs(&ar->rx_work);
538 usb_kill_anchored_urbs(&ar->rx_anch);
539 return err;
540}
541
542static int carl9170_usb_flush(struct ar9170 *ar)
543{
544 struct urb *urb;
545 int ret, err = 0;
546
547 while ((urb = usb_get_from_anchor(&ar->tx_wait))) {
548 struct sk_buff *skb = (void *)urb->context;
549 carl9170_tx_drop(ar, skb);
550 carl9170_tx_callback(ar, skb);
551 usb_free_urb(urb);
552 }
553
554 ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, HZ);
555 if (ret == 0)
556 err = -ETIMEDOUT;
557
558 /* lets wait a while until the tx - queues are dried out */
559 ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, HZ);
560 if (ret == 0)
561 err = -ETIMEDOUT;
562
563 usb_kill_anchored_urbs(&ar->tx_anch);
564 carl9170_usb_handle_tx_err(ar);
565
566 return err;
567}
568
569static void carl9170_usb_cancel_urbs(struct ar9170 *ar)
570{
571 int err;
572
573 carl9170_set_state(ar, CARL9170_UNKNOWN_STATE);
574
575 err = carl9170_usb_flush(ar);
576 if (err)
577 dev_err(&ar->udev->dev, "stuck tx urbs!\n");
578
579 usb_poison_anchored_urbs(&ar->tx_anch);
580 carl9170_usb_handle_tx_err(ar);
581 usb_poison_anchored_urbs(&ar->rx_anch);
582
583 tasklet_kill(&ar->usb_tasklet);
584
585 usb_scuttle_anchored_urbs(&ar->rx_work);
586 usb_scuttle_anchored_urbs(&ar->rx_pool);
587 usb_scuttle_anchored_urbs(&ar->tx_cmd);
588}
589
590int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
591 const bool free_buf)
592{
593 struct urb *urb;
594
595 if (!IS_INITIALIZED(ar))
596 return -EPERM;
597
598 if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4))
599 return -EINVAL;
600
601 urb = usb_alloc_urb(0, GFP_ATOMIC);
602 if (!urb)
603 return -ENOMEM;
604
605 usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
606 AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
607 carl9170_usb_cmd_complete, ar, 1);
608
609 urb->transfer_flags |= URB_ZERO_PACKET;
610
611 if (free_buf)
612 urb->transfer_flags |= URB_FREE_BUFFER;
613
614 usb_anchor_urb(urb, &ar->tx_cmd);
615 usb_free_urb(urb);
616
617 return carl9170_usb_submit_cmd_urb(ar);
618}
619
620int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
621 unsigned int plen, void *payload, unsigned int outlen, void *out)
622{
623 int err = -ENOMEM;
624
625 if (!IS_ACCEPTING_CMD(ar))
626 return -EIO;
627
628 if (!(cmd & CARL9170_CMD_ASYNC_FLAG))
629 might_sleep();
630
631 ar->cmd.hdr.len = plen;
632 ar->cmd.hdr.cmd = cmd;
633 /* writing multiple regs fills this buffer already */
634 if (plen && payload != (u8 *)(ar->cmd.data))
635 memcpy(ar->cmd.data, payload, plen);
636
637 spin_lock_bh(&ar->cmd_lock);
638 ar->readbuf = (u8 *)out;
639 ar->readlen = outlen;
640 spin_unlock_bh(&ar->cmd_lock);
641
642 err = __carl9170_exec_cmd(ar, &ar->cmd, false);
643
644 if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
645 err = wait_for_completion_timeout(&ar->cmd_wait, HZ);
646 if (err == 0) {
647 err = -ETIMEDOUT;
648 goto err_unbuf;
649 }
650
651 if (ar->readlen != outlen) {
652 err = -EMSGSIZE;
653 goto err_unbuf;
654 }
655 }
656
657 return 0;
658
659err_unbuf:
660 /* Maybe the device was removed in the moment we were waiting? */
661 if (IS_STARTED(ar)) {
662 dev_err(&ar->udev->dev, "no command feedback "
663 "received (%d).\n", err);
664
665 /* provide some maybe useful debug information */
666 print_hex_dump_bytes("carl9170 cmd: ", DUMP_PREFIX_NONE,
667 &ar->cmd, plen + 4);
668
669 carl9170_restart(ar, CARL9170_RR_COMMAND_TIMEOUT);
670 }
671
672 /* invalidate to avoid completing the next command prematurely */
673 spin_lock_bh(&ar->cmd_lock);
674 ar->readbuf = NULL;
675 ar->readlen = 0;
676 spin_unlock_bh(&ar->cmd_lock);
677
678 return err;
679}
680
681void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb)
682{
683 struct urb *urb;
684 struct ar9170_stream *tx_stream;
685 void *data;
686 unsigned int len;
687
688 if (!IS_STARTED(ar))
689 goto err_drop;
690
691 urb = usb_alloc_urb(0, GFP_ATOMIC);
692 if (!urb)
693 goto err_drop;
694
695 if (ar->fw.tx_stream) {
696 tx_stream = (void *) (skb->data - sizeof(*tx_stream));
697
698 len = skb->len + sizeof(*tx_stream);
699 tx_stream->length = cpu_to_le16(len);
700 tx_stream->tag = cpu_to_le16(AR9170_TX_STREAM_TAG);
701 data = tx_stream;
702 } else {
703 data = skb->data;
704 len = skb->len;
705 }
706
707 usb_fill_bulk_urb(urb, ar->udev, usb_sndbulkpipe(ar->udev,
708 AR9170_USB_EP_TX), data, len,
709 carl9170_usb_tx_data_complete, skb);
710
711 urb->transfer_flags |= URB_ZERO_PACKET;
712
713 usb_anchor_urb(urb, &ar->tx_wait);
714
715 usb_free_urb(urb);
716
717 carl9170_usb_submit_data_urb(ar);
718 return;
719
720err_drop:
721 carl9170_tx_drop(ar, skb);
722 carl9170_tx_callback(ar, skb);
723}
724
725static void carl9170_release_firmware(struct ar9170 *ar)
726{
727 if (ar->fw.fw) {
728 release_firmware(ar->fw.fw);
729 memset(&ar->fw, 0, sizeof(ar->fw));
730 }
731}
732
733void carl9170_usb_stop(struct ar9170 *ar)
734{
735 int ret;
736
737 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STOPPED);
738
739 ret = carl9170_usb_flush(ar);
740 if (ret)
741 dev_err(&ar->udev->dev, "kill pending tx urbs.\n");
742
743 usb_poison_anchored_urbs(&ar->tx_anch);
744 carl9170_usb_handle_tx_err(ar);
745
746 /* kill any pending command */
747 spin_lock_bh(&ar->cmd_lock);
748 ar->readlen = 0;
749 spin_unlock_bh(&ar->cmd_lock);
750 complete_all(&ar->cmd_wait);
751
752 /* This is required to prevent an early completion on _start */
753 INIT_COMPLETION(ar->cmd_wait);
754
755 /*
756 * Note:
757 * So far we freed all tx urbs, but we won't dare to touch any rx urbs.
758 * Else we would end up with a unresponsive device...
759 */
760}
761
762int carl9170_usb_open(struct ar9170 *ar)
763{
764 usb_unpoison_anchored_urbs(&ar->tx_anch);
765
766 carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE);
767 return 0;
768}
769
770static int carl9170_usb_load_firmware(struct ar9170 *ar)
771{
772 const u8 *data;
773 u8 *buf;
774 unsigned int transfer;
775 size_t len;
776 u32 addr;
777 int err = 0;
778
779 buf = kmalloc(4096, GFP_KERNEL);
780 if (!buf) {
781 err = -ENOMEM;
782 goto err_out;
783 }
784
785 data = ar->fw.fw->data;
786 len = ar->fw.fw->size;
787 addr = ar->fw.address;
788
789 /* this removes the miniboot image */
790 data += ar->fw.offset;
791 len -= ar->fw.offset;
792
793 while (len) {
794 transfer = min_t(unsigned int, len, 4096u);
795 memcpy(buf, data, transfer);
796
797 err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0),
798 0x30 /* FW DL */, 0x40 | USB_DIR_OUT,
799 addr >> 8, 0, buf, transfer, 100);
800
801 if (err < 0) {
802 kfree(buf);
803 goto err_out;
804 }
805
806 len -= transfer;
807 data += transfer;
808 addr += transfer;
809 }
810 kfree(buf);
811
812 err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0),
813 0x31 /* FW DL COMPLETE */,
814 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 200);
815
816 if (wait_for_completion_timeout(&ar->fw_boot_wait, HZ) == 0) {
817 err = -ETIMEDOUT;
818 goto err_out;
819 }
820
821 err = carl9170_echo_test(ar, 0x4a110123);
822 if (err)
823 goto err_out;
824
825 /* firmware restarts cmd counter */
826 ar->cmd_seq = -1;
827
828 return 0;
829
830err_out:
831 dev_err(&ar->udev->dev, "firmware upload failed (%d).\n", err);
832 return err;
833}
834
835int carl9170_usb_restart(struct ar9170 *ar)
836{
837 int err = 0;
838
839 if (ar->intf->condition != USB_INTERFACE_BOUND)
840 return 0;
841
842 /* Disable command response sequence counter. */
843 ar->cmd_seq = -2;
844
845 err = carl9170_reboot(ar);
846
847 carl9170_usb_stop(ar);
848
849 if (err)
850 goto err_out;
851
852 tasklet_schedule(&ar->usb_tasklet);
853
854 /* The reboot procedure can take quite a while to complete. */
855 msleep(1100);
856
857 err = carl9170_usb_open(ar);
858 if (err)
859 goto err_out;
860
861 err = carl9170_usb_load_firmware(ar);
862 if (err)
863 goto err_out;
864
865 return 0;
866
867err_out:
868 carl9170_usb_cancel_urbs(ar);
869 return err;
870}
871
872void carl9170_usb_reset(struct ar9170 *ar)
873{
874 /*
875 * This is the last resort to get the device going again
876 * without any *user replugging action*.
877 *
878 * But there is a catch: usb_reset really is like a physical
879 * *reconnect*. The mac80211 state will be lost in the process.
880 * Therefore a userspace application, which is monitoring
881 * the link must step in.
882 */
883 carl9170_usb_cancel_urbs(ar);
884
885 carl9170_usb_stop(ar);
886
887 usb_queue_reset_device(ar->intf);
888}
889
890static int carl9170_usb_init_device(struct ar9170 *ar)
891{
892 int err;
893
894 err = carl9170_usb_send_rx_irq_urb(ar);
895 if (err)
896 goto err_out;
897
898 err = carl9170_usb_init_rx_bulk_urbs(ar);
899 if (err)
900 goto err_unrx;
901
902 mutex_lock(&ar->mutex);
903 err = carl9170_usb_load_firmware(ar);
904 mutex_unlock(&ar->mutex);
905 if (err)
906 goto err_unrx;
907
908 return 0;
909
910err_unrx:
911 carl9170_usb_cancel_urbs(ar);
912
913err_out:
914 return err;
915}
916
917static void carl9170_usb_firmware_failed(struct ar9170 *ar)
918{
919 struct device *parent = ar->udev->dev.parent;
920 struct usb_device *udev;
921
922 /*
923 * Store a copy of the usb_device pointer locally.
924 * This is because device_release_driver initiates
925 * carl9170_usb_disconnect, which in turn frees our
926 * driver context (ar).
927 */
928 udev = ar->udev;
929
930 complete(&ar->fw_load_wait);
931
932 /* unbind anything failed */
933 if (parent)
934 device_lock(parent);
935
936 device_release_driver(&udev->dev);
937 if (parent)
938 device_unlock(parent);
939
940 usb_put_dev(udev);
941}
942
943static void carl9170_usb_firmware_finish(struct ar9170 *ar)
944{
945 int err;
946
947 err = carl9170_parse_firmware(ar);
948 if (err)
949 goto err_freefw;
950
951 err = carl9170_usb_init_device(ar);
952 if (err)
953 goto err_freefw;
954
955 err = carl9170_usb_open(ar);
956 if (err)
957 goto err_unrx;
958
959 err = carl9170_register(ar);
960
961 carl9170_usb_stop(ar);
962 if (err)
963 goto err_unrx;
964
965 complete(&ar->fw_load_wait);
966 usb_put_dev(ar->udev);
967 return;
968
969err_unrx:
970 carl9170_usb_cancel_urbs(ar);
971
972err_freefw:
973 carl9170_release_firmware(ar);
974 carl9170_usb_firmware_failed(ar);
975}
976
977static void carl9170_usb_firmware_step2(const struct firmware *fw,
978 void *context)
979{
980 struct ar9170 *ar = context;
981
982 if (fw) {
983 ar->fw.fw = fw;
984 carl9170_usb_firmware_finish(ar);
985 return;
986 }
987
988 dev_err(&ar->udev->dev, "firmware not found.\n");
989 carl9170_usb_firmware_failed(ar);
990}
991
992static int carl9170_usb_probe(struct usb_interface *intf,
993 const struct usb_device_id *id)
994{
995 struct ar9170 *ar;
996 struct usb_device *udev;
997 int err;
998
999 err = usb_reset_device(interface_to_usbdev(intf));
1000 if (err)
1001 return err;
1002
1003 ar = carl9170_alloc(sizeof(*ar));
1004 if (IS_ERR(ar))
1005 return PTR_ERR(ar);
1006
1007 udev = interface_to_usbdev(intf);
1008 usb_get_dev(udev);
1009 ar->udev = udev;
1010 ar->intf = intf;
1011 ar->features = id->driver_info;
1012
1013 usb_set_intfdata(intf, ar);
1014 SET_IEEE80211_DEV(ar->hw, &intf->dev);
1015
1016 init_usb_anchor(&ar->rx_anch);
1017 init_usb_anchor(&ar->rx_pool);
1018 init_usb_anchor(&ar->rx_work);
1019 init_usb_anchor(&ar->tx_wait);
1020 init_usb_anchor(&ar->tx_anch);
1021 init_usb_anchor(&ar->tx_cmd);
1022 init_usb_anchor(&ar->tx_err);
1023 init_completion(&ar->cmd_wait);
1024 init_completion(&ar->fw_boot_wait);
1025 init_completion(&ar->fw_load_wait);
1026 tasklet_init(&ar->usb_tasklet, carl9170_usb_tasklet,
1027 (unsigned long)ar);
1028
1029 atomic_set(&ar->tx_cmd_urbs, 0);
1030 atomic_set(&ar->tx_anch_urbs, 0);
1031 atomic_set(&ar->rx_work_urbs, 0);
1032 atomic_set(&ar->rx_anch_urbs, 0);
1033 atomic_set(&ar->rx_pool_urbs, 0);
1034 ar->cmd_seq = -2;
1035
1036 usb_get_dev(ar->udev);
1037
1038 carl9170_set_state(ar, CARL9170_STOPPED);
1039
1040 return request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
1041 &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
1042}
1043
1044static void carl9170_usb_disconnect(struct usb_interface *intf)
1045{
1046 struct ar9170 *ar = usb_get_intfdata(intf);
1047 struct usb_device *udev;
1048
1049 if (WARN_ON(!ar))
1050 return;
1051
1052 udev = ar->udev;
1053 wait_for_completion(&ar->fw_load_wait);
1054
1055 if (IS_INITIALIZED(ar)) {
1056 carl9170_reboot(ar);
1057 carl9170_usb_stop(ar);
1058 }
1059
1060 carl9170_usb_cancel_urbs(ar);
1061 carl9170_unregister(ar);
1062
1063 usb_set_intfdata(intf, NULL);
1064
1065 carl9170_release_firmware(ar);
1066 carl9170_free(ar);
1067 usb_put_dev(udev);
1068}
1069
1070#ifdef CONFIG_PM
1071static int carl9170_usb_suspend(struct usb_interface *intf,
1072 pm_message_t message)
1073{
1074 struct ar9170 *ar = usb_get_intfdata(intf);
1075
1076 if (!ar)
1077 return -ENODEV;
1078
1079 carl9170_usb_cancel_urbs(ar);
1080
1081 /*
1082 * firmware automatically reboots for usb suspend.
1083 */
1084
1085 return 0;
1086}
1087
1088static int carl9170_usb_resume(struct usb_interface *intf)
1089{
1090 struct ar9170 *ar = usb_get_intfdata(intf);
1091 int err;
1092
1093 if (!ar)
1094 return -ENODEV;
1095
1096 usb_unpoison_anchored_urbs(&ar->rx_anch);
1097
1098 err = carl9170_usb_init_device(ar);
1099 if (err)
1100 goto err_unrx;
1101
1102 err = carl9170_usb_open(ar);
1103 if (err)
1104 goto err_unrx;
1105
1106 return 0;
1107
1108err_unrx:
1109 carl9170_usb_cancel_urbs(ar);
1110
1111 return err;
1112}
1113#endif /* CONFIG_PM */
1114
1115static struct usb_driver carl9170_driver = {
1116 .name = KBUILD_MODNAME,
1117 .probe = carl9170_usb_probe,
1118 .disconnect = carl9170_usb_disconnect,
1119 .id_table = carl9170_usb_ids,
1120 .soft_unbind = 1,
1121#ifdef CONFIG_PM
1122 .suspend = carl9170_usb_suspend,
1123 .resume = carl9170_usb_resume,
1124#endif /* CONFIG_PM */
1125};
1126
1127static int __init carl9170_usb_init(void)
1128{
1129 return usb_register(&carl9170_driver);
1130}
1131
1132static void __exit carl9170_usb_exit(void)
1133{
1134 usb_deregister(&carl9170_driver);
1135}
1136
1137module_init(carl9170_usb_init);
1138module_exit(carl9170_usb_exit);
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
new file mode 100644
index 000000000000..0e917f80eab4
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -0,0 +1,7 @@
1#ifndef __CARL9170_SHARED_VERSION_H
2#define __CARL9170_SHARED_VERSION_H
3#define CARL9170FW_VERSION_YEAR 10
4#define CARL9170FW_VERSION_MONTH 8
5#define CARL9170FW_VERSION_DAY 30
6#define CARL9170FW_VERSION_GIT "1.8.8.1"
7#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
new file mode 100644
index 000000000000..48ead2268f50
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -0,0 +1,412 @@
1/*
2 * Shared Atheros AR9170 Header
3 *
4 * RX/TX meta descriptor format
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#ifndef __CARL9170_SHARED_WLAN_H
40#define __CARL9170_SHARED_WLAN_H
41
42#include "fwcmd.h"
43
44#define AR9170_RX_PHY_RATE_CCK_1M 0x0a
45#define AR9170_RX_PHY_RATE_CCK_2M 0x14
46#define AR9170_RX_PHY_RATE_CCK_5M 0x37
47#define AR9170_RX_PHY_RATE_CCK_11M 0x6e
48
49#define AR9170_ENC_ALG_NONE 0x0
50#define AR9170_ENC_ALG_WEP64 0x1
51#define AR9170_ENC_ALG_TKIP 0x2
52#define AR9170_ENC_ALG_AESCCMP 0x4
53#define AR9170_ENC_ALG_WEP128 0x5
54#define AR9170_ENC_ALG_WEP256 0x6
55#define AR9170_ENC_ALG_CENC 0x7
56
57#define AR9170_RX_ENC_SOFTWARE 0x8
58
59#define AR9170_RX_STATUS_MODULATION 0x03
60#define AR9170_RX_STATUS_MODULATION_S 0
61#define AR9170_RX_STATUS_MODULATION_CCK 0x00
62#define AR9170_RX_STATUS_MODULATION_OFDM 0x01
63#define AR9170_RX_STATUS_MODULATION_HT 0x02
64#define AR9170_RX_STATUS_MODULATION_DUPOFDM 0x03
65
66/* depends on modulation */
67#define AR9170_RX_STATUS_SHORT_PREAMBLE 0x08
68#define AR9170_RX_STATUS_GREENFIELD 0x08
69
70#define AR9170_RX_STATUS_MPDU 0x30
71#define AR9170_RX_STATUS_MPDU_S 4
72#define AR9170_RX_STATUS_MPDU_SINGLE 0x00
73#define AR9170_RX_STATUS_MPDU_FIRST 0x20
74#define AR9170_RX_STATUS_MPDU_MIDDLE 0x30
75#define AR9170_RX_STATUS_MPDU_LAST 0x10
76
77#define AR9170_RX_ERROR_RXTO 0x01
78#define AR9170_RX_ERROR_OVERRUN 0x02
79#define AR9170_RX_ERROR_DECRYPT 0x04
80#define AR9170_RX_ERROR_FCS 0x08
81#define AR9170_RX_ERROR_WRONG_RA 0x10
82#define AR9170_RX_ERROR_PLCP 0x20
83#define AR9170_RX_ERROR_MMIC 0x40
84#define AR9170_RX_ERROR_FATAL 0x80
85
86/* these are either-or */
87#define AR9170_TX_MAC_PROT_RTS 0x0001
88#define AR9170_TX_MAC_PROT_CTS 0x0002
89#define AR9170_TX_MAC_PROT 0x0003
90
91#define AR9170_TX_MAC_NO_ACK 0x0004
92/* if unset, MAC will only do SIFS space before frame */
93#define AR9170_TX_MAC_BACKOFF 0x0008
94#define AR9170_TX_MAC_BURST 0x0010
95#define AR9170_TX_MAC_AGGR 0x0020
96
97/* encryption is a two-bit field */
98#define AR9170_TX_MAC_ENCR_NONE 0x0000
99#define AR9170_TX_MAC_ENCR_RC4 0x0040
100#define AR9170_TX_MAC_ENCR_CENC 0x0080
101#define AR9170_TX_MAC_ENCR_AES 0x00c0
102
103#define AR9170_TX_MAC_MMIC 0x0100
104#define AR9170_TX_MAC_HW_DURATION 0x0200
105#define AR9170_TX_MAC_QOS_S 10
106#define AR9170_TX_MAC_QOS 0x0c00
107#define AR9170_TX_MAC_DISABLE_TXOP 0x1000
108#define AR9170_TX_MAC_TXOP_RIFS 0x2000
109#define AR9170_TX_MAC_IMM_BA 0x4000
110
111/* either-or */
112#define AR9170_TX_PHY_MOD_CCK 0x00000000
113#define AR9170_TX_PHY_MOD_OFDM 0x00000001
114#define AR9170_TX_PHY_MOD_HT 0x00000002
115
116/* depends on modulation */
117#define AR9170_TX_PHY_SHORT_PREAMBLE 0x00000004
118#define AR9170_TX_PHY_GREENFIELD 0x00000004
119
120#define AR9170_TX_PHY_BW_S 3
121#define AR9170_TX_PHY_BW (3 << AR9170_TX_PHY_BW_SHIFT)
122#define AR9170_TX_PHY_BW_20MHZ 0
123#define AR9170_TX_PHY_BW_40MHZ 2
124#define AR9170_TX_PHY_BW_40MHZ_DUP 3
125
126#define AR9170_TX_PHY_TX_HEAVY_CLIP_S 6
127#define AR9170_TX_PHY_TX_HEAVY_CLIP (7 << \
128 AR9170_TX_PHY_TX_HEAVY_CLIP_S)
129
130#define AR9170_TX_PHY_TX_PWR_S 9
131#define AR9170_TX_PHY_TX_PWR (0x3f << \
132 AR9170_TX_PHY_TX_PWR_S)
133
134#define AR9170_TX_PHY_TXCHAIN_S 15
135#define AR9170_TX_PHY_TXCHAIN (7 << \
136 AR9170_TX_PHY_TXCHAIN_S)
137#define AR9170_TX_PHY_TXCHAIN_1 1
138/* use for cck, ofdm 6/9/12/18/24 and HT if capable */
139#define AR9170_TX_PHY_TXCHAIN_2 5
140
141#define AR9170_TX_PHY_MCS_S 18
142#define AR9170_TX_PHY_MCS (0x7f << \
143 AR9170_TX_PHY_MCS_S)
144
145#define AR9170_TX_PHY_RATE_CCK_1M 0x0
146#define AR9170_TX_PHY_RATE_CCK_2M 0x1
147#define AR9170_TX_PHY_RATE_CCK_5M 0x2
148#define AR9170_TX_PHY_RATE_CCK_11M 0x3
149
150/* same as AR9170_RX_PHY_RATE */
151#define AR9170_TXRX_PHY_RATE_OFDM_6M 0xb
152#define AR9170_TXRX_PHY_RATE_OFDM_9M 0xf
153#define AR9170_TXRX_PHY_RATE_OFDM_12M 0xa
154#define AR9170_TXRX_PHY_RATE_OFDM_18M 0xe
155#define AR9170_TXRX_PHY_RATE_OFDM_24M 0x9
156#define AR9170_TXRX_PHY_RATE_OFDM_36M 0xd
157#define AR9170_TXRX_PHY_RATE_OFDM_48M 0x8
158#define AR9170_TXRX_PHY_RATE_OFDM_54M 0xc
159
160#define AR9170_TXRX_PHY_RATE_HT_MCS0 0x0
161#define AR9170_TXRX_PHY_RATE_HT_MCS1 0x1
162#define AR9170_TXRX_PHY_RATE_HT_MCS2 0x2
163#define AR9170_TXRX_PHY_RATE_HT_MCS3 0x3
164#define AR9170_TXRX_PHY_RATE_HT_MCS4 0x4
165#define AR9170_TXRX_PHY_RATE_HT_MCS5 0x5
166#define AR9170_TXRX_PHY_RATE_HT_MCS6 0x6
167#define AR9170_TXRX_PHY_RATE_HT_MCS7 0x7
168#define AR9170_TXRX_PHY_RATE_HT_MCS8 0x8
169#define AR9170_TXRX_PHY_RATE_HT_MCS9 0x9
170#define AR9170_TXRX_PHY_RATE_HT_MCS10 0xa
171#define AR9170_TXRX_PHY_RATE_HT_MCS11 0xb
172#define AR9170_TXRX_PHY_RATE_HT_MCS12 0xc
173#define AR9170_TXRX_PHY_RATE_HT_MCS13 0xd
174#define AR9170_TXRX_PHY_RATE_HT_MCS14 0xe
175#define AR9170_TXRX_PHY_RATE_HT_MCS15 0xf
176
177#define AR9170_TX_PHY_SHORT_GI 0x80000000
178
179#ifdef __CARL9170FW__
180struct ar9170_tx_hw_mac_control {
181 union {
182 struct {
183 /*
184 * Beware of compiler bugs in all gcc pre 4.4!
185 */
186
187 u8 erp_prot:2;
188 u8 no_ack:1;
189 u8 backoff:1;
190 u8 burst:1;
191 u8 ampdu:1;
192
193 u8 enc_mode:2;
194
195 u8 hw_mmic:1;
196 u8 hw_duration:1;
197
198 u8 qos_queue:2;
199
200 u8 disable_txop:1;
201 u8 txop_rifs:1;
202
203 u8 ba_end:1;
204 u8 probe:1;
205 } __packed;
206
207 __le16 set;
208 } __packed;
209} __packed;
210
211struct ar9170_tx_hw_phy_control {
212 union {
213 struct {
214 /*
215 * Beware of compiler bugs in all gcc pre 4.4!
216 */
217
218 u8 modulation:2;
219 u8 preamble:1;
220 u8 bandwidth:2;
221 u8:1;
222 u8 heavy_clip:3;
223 u8 tx_power:6;
224 u8 chains:3;
225 u8 mcs:7;
226 u8:6;
227 u8 short_gi:1;
228 } __packed;
229
230 __le32 set;
231 } __packed;
232} __packed;
233
234struct ar9170_tx_rate_info {
235 u8 tries:3;
236 u8 erp_prot:2;
237 u8 ampdu:1;
238 u8 free:2; /* free for use (e.g.:RIFS/TXOP/AMPDU) */
239} __packed;
240
241struct carl9170_tx_superdesc {
242 __le16 len;
243 u8 rix;
244 u8 cnt;
245 u8 cookie;
246 u8 ampdu_density:3;
247 u8 ampdu_factor:2;
248 u8 ampdu_commit_density:1;
249 u8 ampdu_commit_factor:1;
250 u8 ampdu_unused_bit:1;
251 u8 queue:2;
252 u8 reserved:1;
253 u8 vif_id:3;
254 u8 fill_in_tsf:1;
255 u8 cab:1;
256 u8 padding2;
257 struct ar9170_tx_rate_info ri[CARL9170_TX_MAX_RATES];
258 struct ar9170_tx_hw_phy_control rr[CARL9170_TX_MAX_RETRY_RATES];
259} __packed;
260
261struct ar9170_tx_hwdesc {
262 __le16 length;
263 struct ar9170_tx_hw_mac_control mac;
264 struct ar9170_tx_hw_phy_control phy;
265} __packed;
266
267struct ar9170_tx_frame {
268 struct ar9170_tx_hwdesc hdr;
269
270 union {
271 struct ieee80211_hdr i3e;
272 u8 payload[0];
273 } data;
274} __packed;
275
276struct carl9170_tx_superframe {
277 struct carl9170_tx_superdesc s;
278 struct ar9170_tx_frame f;
279} __packed;
280
281#endif /* __CARL9170FW__ */
282
283struct _ar9170_tx_hwdesc {
284 __le16 length;
285 __le16 mac_control;
286 __le32 phy_control;
287} __packed;
288
289#define CARL9170_TX_SUPER_AMPDU_DENSITY_S 0
290#define CARL9170_TX_SUPER_AMPDU_DENSITY 0x7
291#define CARL9170_TX_SUPER_AMPDU_FACTOR 0x18
292#define CARL9170_TX_SUPER_AMPDU_FACTOR_S 3
293#define CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY 0x20
294#define CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY_S 5
295#define CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR 0x40
296#define CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR_S 6
297
298#define CARL9170_TX_SUPER_MISC_QUEUE 0x3
299#define CARL9170_TX_SUPER_MISC_QUEUE_S 0
300#define CARL9170_TX_SUPER_MISC_VIF_ID 0x38
301#define CARL9170_TX_SUPER_MISC_VIF_ID_S 3
302#define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40
303#define CARL9170_TX_SUPER_MISC_CAB 0x80
304
305#define CARL9170_TX_SUPER_RI_TRIES 0x7
306#define CARL9170_TX_SUPER_RI_TRIES_S 0
307#define CARL9170_TX_SUPER_RI_ERP_PROT 0x18
308#define CARL9170_TX_SUPER_RI_ERP_PROT_S 3
309#define CARL9170_TX_SUPER_RI_AMPDU 0x20
310#define CARL9170_TX_SUPER_RI_AMPDU_S 5
311
312struct _carl9170_tx_superdesc {
313 __le16 len;
314 u8 rix;
315 u8 cnt;
316 u8 cookie;
317 u8 ampdu_settings;
318 u8 misc;
319 u8 padding;
320 u8 ri[CARL9170_TX_MAX_RATES];
321 __le32 rr[CARL9170_TX_MAX_RETRY_RATES];
322} __packed;
323
324struct _carl9170_tx_superframe {
325 struct _carl9170_tx_superdesc s;
326 struct _ar9170_tx_hwdesc f;
327 u8 frame_data[0];
328} __packed;
329
330#define CARL9170_TX_SUPERDESC_LEN 24
331#define AR9170_TX_HWDESC_LEN 8
332#define AR9170_TX_SUPERFRAME_LEN (CARL9170_TX_HWDESC_LEN + \
333 AR9170_TX_SUPERDESC_LEN)
334
335struct ar9170_rx_head {
336 u8 plcp[12];
337} __packed;
338
339struct ar9170_rx_phystatus {
340 union {
341 struct {
342 u8 rssi_ant0, rssi_ant1, rssi_ant2,
343 rssi_ant0x, rssi_ant1x, rssi_ant2x,
344 rssi_combined;
345 } __packed;
346 u8 rssi[7];
347 } __packed;
348
349 u8 evm_stream0[6], evm_stream1[6];
350 u8 phy_err;
351} __packed;
352
353struct ar9170_rx_macstatus {
354 u8 SAidx, DAidx;
355 u8 error;
356 u8 status;
357} __packed;
358
359struct ar9170_rx_frame_single {
360 struct ar9170_rx_head phy_head;
361 struct ieee80211_hdr i3e;
362 struct ar9170_rx_phystatus phy_tail;
363 struct ar9170_rx_macstatus macstatus;
364} __packed;
365
366struct ar9170_rx_frame_head {
367 struct ar9170_rx_head phy_head;
368 struct ieee80211_hdr i3e;
369 struct ar9170_rx_macstatus macstatus;
370} __packed;
371
372struct ar9170_rx_frame_middle {
373 struct ieee80211_hdr i3e;
374 struct ar9170_rx_macstatus macstatus;
375} __packed;
376
377struct ar9170_rx_frame_tail {
378 struct ieee80211_hdr i3e;
379 struct ar9170_rx_phystatus phy_tail;
380 struct ar9170_rx_macstatus macstatus;
381} __packed;
382
383struct ar9170_rx_frame {
384 union {
385 struct ar9170_rx_frame_single single;
386 struct ar9170_rx_frame_head head;
387 struct ar9170_rx_frame_middle middle;
388 struct ar9170_rx_frame_tail tail;
389 } __packed;
390} __packed;
391
392static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t)
393{
394 return (t->SAidx & 0xc0) >> 4 |
395 (t->DAidx & 0xc0) >> 6;
396}
397
398enum ar9170_txq {
399 AR9170_TXQ_BE,
400
401 AR9170_TXQ_VI,
402 AR9170_TXQ_VO,
403 AR9170_TXQ_BK,
404
405 __AR9170_NUM_TXQ,
406};
407
408static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 };
409
410#define AR9170_TXQ_DEPTH 32
411
412#endif /* __CARL9170_SHARED_WLAN_H */
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index 873bf526e11f..fd3a020682dc 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -36,6 +36,7 @@
36 * @ATH_DBG_PS: power save processing 36 * @ATH_DBG_PS: power save processing
37 * @ATH_DBG_HWTIMER: hardware timer handling 37 * @ATH_DBG_HWTIMER: hardware timer handling
38 * @ATH_DBG_BTCOEX: bluetooth coexistance 38 * @ATH_DBG_BTCOEX: bluetooth coexistance
39 * @ATH_DBG_BSTUCK: stuck beacons
39 * @ATH_DBG_ANY: enable all debugging 40 * @ATH_DBG_ANY: enable all debugging
40 * 41 *
41 * The debug level is used to control the amount and type of debugging output 42 * The debug level is used to control the amount and type of debugging output
@@ -60,6 +61,7 @@ enum ATH_DEBUG {
60 ATH_DBG_HWTIMER = 0x00001000, 61 ATH_DBG_HWTIMER = 0x00001000,
61 ATH_DBG_BTCOEX = 0x00002000, 62 ATH_DBG_BTCOEX = 0x00002000,
62 ATH_DBG_WMI = 0x00004000, 63 ATH_DBG_WMI = 0x00004000,
64 ATH_DBG_BSTUCK = 0x00008000,
63 ATH_DBG_ANY = 0xffffffff 65 ATH_DBG_ANY = 0xffffffff
64}; 66};
65 67
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
new file mode 100644
index 000000000000..bd21a4d82085
--- /dev/null
+++ b/drivers/net/wireless/ath/key.c
@@ -0,0 +1,568 @@
1/*
2 * Copyright (c) 2009 Atheros Communications Inc.
3 * Copyright (c) 2010 Bruno Randolf <br1@einfach.org>
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <asm/unaligned.h>
19#include <net/mac80211.h>
20
21#include "ath.h"
22#include "reg.h"
23#include "debug.h"
24
25#define REG_READ (common->ops->read)
26#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
27
28#define IEEE80211_WEP_NKID 4 /* number of key ids */
29
30/************************/
31/* Key Cache Management */
32/************************/
33
34bool ath_hw_keyreset(struct ath_common *common, u16 entry)
35{
36 u32 keyType;
37 void *ah = common->ah;
38
39 if (entry >= common->keymax) {
40 ath_print(common, ATH_DBG_FATAL,
41 "keychache entry %u out of range\n", entry);
42 return false;
43 }
44
45 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
46
47 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
48 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
49 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
50 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
51 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
52 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
53 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
54 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
55
56 if (keyType == AR_KEYTABLE_TYPE_TKIP) {
57 u16 micentry = entry + 64;
58
59 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
60 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
61 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
62 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
63
64 }
65
66 return true;
67}
68EXPORT_SYMBOL(ath_hw_keyreset);
69
70bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
71{
72 u32 macHi, macLo;
73 u32 unicast_flag = AR_KEYTABLE_VALID;
74 void *ah = common->ah;
75
76 if (entry >= common->keymax) {
77 ath_print(common, ATH_DBG_FATAL,
78 "keychache entry %u out of range\n", entry);
79 return false;
80 }
81
82 if (mac != NULL) {
83 /*
84 * AR_KEYTABLE_VALID indicates that the address is a unicast
85 * address, which must match the transmitter address for
86 * decrypting frames.
87 * Not setting this bit allows the hardware to use the key
88 * for multicast frame decryption.
89 */
90 if (mac[0] & 0x01)
91 unicast_flag = 0;
92
93 macHi = (mac[5] << 8) | mac[4];
94 macLo = (mac[3] << 24) |
95 (mac[2] << 16) |
96 (mac[1] << 8) |
97 mac[0];
98 macLo >>= 1;
99 macLo |= (macHi & 1) << 31;
100 macHi >>= 1;
101 } else {
102 macLo = macHi = 0;
103 }
104 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
105 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag);
106
107 return true;
108}
109
110bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
111 const struct ath_keyval *k,
112 const u8 *mac)
113{
114 void *ah = common->ah;
115 u32 key0, key1, key2, key3, key4;
116 u32 keyType;
117
118 if (entry >= common->keymax) {
119 ath_print(common, ATH_DBG_FATAL,
120 "keycache entry %u out of range\n", entry);
121 return false;
122 }
123
124 switch (k->kv_type) {
125 case ATH_CIPHER_AES_OCB:
126 keyType = AR_KEYTABLE_TYPE_AES;
127 break;
128 case ATH_CIPHER_AES_CCM:
129 if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) {
130 ath_print(common, ATH_DBG_ANY,
131 "AES-CCM not supported by this mac rev\n");
132 return false;
133 }
134 keyType = AR_KEYTABLE_TYPE_CCM;
135 break;
136 case ATH_CIPHER_TKIP:
137 keyType = AR_KEYTABLE_TYPE_TKIP;
138 if (entry + 64 >= common->keymax) {
139 ath_print(common, ATH_DBG_ANY,
140 "entry %u inappropriate for TKIP\n", entry);
141 return false;
142 }
143 break;
144 case ATH_CIPHER_WEP:
145 if (k->kv_len < WLAN_KEY_LEN_WEP40) {
146 ath_print(common, ATH_DBG_ANY,
147 "WEP key length %u too small\n", k->kv_len);
148 return false;
149 }
150 if (k->kv_len <= WLAN_KEY_LEN_WEP40)
151 keyType = AR_KEYTABLE_TYPE_40;
152 else if (k->kv_len <= WLAN_KEY_LEN_WEP104)
153 keyType = AR_KEYTABLE_TYPE_104;
154 else
155 keyType = AR_KEYTABLE_TYPE_128;
156 break;
157 case ATH_CIPHER_CLR:
158 keyType = AR_KEYTABLE_TYPE_CLR;
159 break;
160 default:
161 ath_print(common, ATH_DBG_FATAL,
162 "cipher %u not supported\n", k->kv_type);
163 return false;
164 }
165
166 key0 = get_unaligned_le32(k->kv_val + 0);
167 key1 = get_unaligned_le16(k->kv_val + 4);
168 key2 = get_unaligned_le32(k->kv_val + 6);
169 key3 = get_unaligned_le16(k->kv_val + 10);
170 key4 = get_unaligned_le32(k->kv_val + 12);
171 if (k->kv_len <= WLAN_KEY_LEN_WEP104)
172 key4 &= 0xff;
173
174 /*
175 * Note: Key cache registers access special memory area that requires
176 * two 32-bit writes to actually update the values in the internal
177 * memory. Consequently, the exact order and pairs used here must be
178 * maintained.
179 */
180
181 if (keyType == AR_KEYTABLE_TYPE_TKIP) {
182 u16 micentry = entry + 64;
183
184 /*
185 * Write inverted key[47:0] first to avoid Michael MIC errors
186 * on frames that could be sent or received at the same time.
187 * The correct key will be written in the end once everything
188 * else is ready.
189 */
190 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
191 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
192
193 /* Write key[95:48] */
194 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
195 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
196
197 /* Write key[127:96] and key type */
198 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
199 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
200
201 /* Write MAC address for the entry */
202 (void) ath_hw_keysetmac(common, entry, mac);
203
204 if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
205 /*
206 * TKIP uses two key cache entries:
207 * Michael MIC TX/RX keys in the same key cache entry
208 * (idx = main index + 64):
209 * key0 [31:0] = RX key [31:0]
210 * key1 [15:0] = TX key [31:16]
211 * key1 [31:16] = reserved
212 * key2 [31:0] = RX key [63:32]
213 * key3 [15:0] = TX key [15:0]
214 * key3 [31:16] = reserved
215 * key4 [31:0] = TX key [63:32]
216 */
217 u32 mic0, mic1, mic2, mic3, mic4;
218
219 mic0 = get_unaligned_le32(k->kv_mic + 0);
220 mic2 = get_unaligned_le32(k->kv_mic + 4);
221 mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
222 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
223 mic4 = get_unaligned_le32(k->kv_txmic + 4);
224
225 /* Write RX[31:0] and TX[31:16] */
226 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
227 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
228
229 /* Write RX[63:32] and TX[15:0] */
230 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
231 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
232
233 /* Write TX[63:32] and keyType(reserved) */
234 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
235 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
236 AR_KEYTABLE_TYPE_CLR);
237
238 } else {
239 /*
240 * TKIP uses four key cache entries (two for group
241 * keys):
242 * Michael MIC TX/RX keys are in different key cache
243 * entries (idx = main index + 64 for TX and
244 * main index + 32 + 96 for RX):
245 * key0 [31:0] = TX/RX MIC key [31:0]
246 * key1 [31:0] = reserved
247 * key2 [31:0] = TX/RX MIC key [63:32]
248 * key3 [31:0] = reserved
249 * key4 [31:0] = reserved
250 *
251 * Upper layer code will call this function separately
252 * for TX and RX keys when these registers offsets are
253 * used.
254 */
255 u32 mic0, mic2;
256
257 mic0 = get_unaligned_le32(k->kv_mic + 0);
258 mic2 = get_unaligned_le32(k->kv_mic + 4);
259
260 /* Write MIC key[31:0] */
261 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
262 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
263
264 /* Write MIC key[63:32] */
265 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
266 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
267
268 /* Write TX[63:32] and keyType(reserved) */
269 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
270 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
271 AR_KEYTABLE_TYPE_CLR);
272 }
273
274 /* MAC address registers are reserved for the MIC entry */
275 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
276 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
277
278 /*
279 * Write the correct (un-inverted) key[47:0] last to enable
280 * TKIP now that all other registers are set with correct
281 * values.
282 */
283 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
284 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
285 } else {
286 /* Write key[47:0] */
287 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
288 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
289
290 /* Write key[95:48] */
291 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
292 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
293
294 /* Write key[127:96] and key type */
295 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
296 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
297
298 /* Write MAC address for the entry */
299 (void) ath_hw_keysetmac(common, entry, mac);
300 }
301
302 return true;
303}
304
305static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
306 struct ath_keyval *hk, const u8 *addr,
307 bool authenticator)
308{
309 const u8 *key_rxmic;
310 const u8 *key_txmic;
311
312 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
313 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
314
315 if (addr == NULL) {
316 /*
317 * Group key installation - only two key cache entries are used
318 * regardless of splitmic capability since group key is only
319 * used either for TX or RX.
320 */
321 if (authenticator) {
322 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
323 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
324 } else {
325 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
326 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
327 }
328 return ath_hw_set_keycache_entry(common, keyix, hk, addr);
329 }
330 if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
331 /* TX and RX keys share the same key cache entry. */
332 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
333 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
334 return ath_hw_set_keycache_entry(common, keyix, hk, addr);
335 }
336
337 /* Separate key cache entries for TX and RX */
338
339 /* TX key goes at first index, RX key at +32. */
340 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
341 if (!ath_hw_set_keycache_entry(common, keyix, hk, NULL)) {
342 /* TX MIC entry failed. No need to proceed further */
343 ath_print(common, ATH_DBG_FATAL,
344 "Setting TX MIC Key Failed\n");
345 return 0;
346 }
347
348 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
349 /* XXX delete tx key on failure? */
350 return ath_hw_set_keycache_entry(common, keyix + 32, hk, addr);
351}
352
353static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
354{
355 int i;
356
357 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
358 if (test_bit(i, common->keymap) ||
359 test_bit(i + 64, common->keymap))
360 continue; /* At least one part of TKIP key allocated */
361 if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) &&
362 (test_bit(i + 32, common->keymap) ||
363 test_bit(i + 64 + 32, common->keymap)))
364 continue; /* At least one part of TKIP key allocated */
365
366 /* Found a free slot for a TKIP key */
367 return i;
368 }
369 return -1;
370}
371
372static int ath_reserve_key_cache_slot(struct ath_common *common,
373 u32 cipher)
374{
375 int i;
376
377 if (cipher == WLAN_CIPHER_SUITE_TKIP)
378 return ath_reserve_key_cache_slot_tkip(common);
379
380 /* First, try to find slots that would not be available for TKIP. */
381 if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
382 for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
383 if (!test_bit(i, common->keymap) &&
384 (test_bit(i + 32, common->keymap) ||
385 test_bit(i + 64, common->keymap) ||
386 test_bit(i + 64 + 32, common->keymap)))
387 return i;
388 if (!test_bit(i + 32, common->keymap) &&
389 (test_bit(i, common->keymap) ||
390 test_bit(i + 64, common->keymap) ||
391 test_bit(i + 64 + 32, common->keymap)))
392 return i + 32;
393 if (!test_bit(i + 64, common->keymap) &&
394 (test_bit(i , common->keymap) ||
395 test_bit(i + 32, common->keymap) ||
396 test_bit(i + 64 + 32, common->keymap)))
397 return i + 64;
398 if (!test_bit(i + 64 + 32, common->keymap) &&
399 (test_bit(i, common->keymap) ||
400 test_bit(i + 32, common->keymap) ||
401 test_bit(i + 64, common->keymap)))
402 return i + 64 + 32;
403 }
404 } else {
405 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
406 if (!test_bit(i, common->keymap) &&
407 test_bit(i + 64, common->keymap))
408 return i;
409 if (test_bit(i, common->keymap) &&
410 !test_bit(i + 64, common->keymap))
411 return i + 64;
412 }
413 }
414
415 /* No partially used TKIP slots, pick any available slot */
416 for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
417 /* Do not allow slots that could be needed for TKIP group keys
418 * to be used. This limitation could be removed if we know that
419 * TKIP will not be used. */
420 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
421 continue;
422 if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
423 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
424 continue;
425 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
426 continue;
427 }
428
429 if (!test_bit(i, common->keymap))
430 return i; /* Found a free slot for a key */
431 }
432
433 /* No free slot found */
434 return -1;
435}
436
437/*
438 * Configure encryption in the HW.
439 */
440int ath_key_config(struct ath_common *common,
441 struct ieee80211_vif *vif,
442 struct ieee80211_sta *sta,
443 struct ieee80211_key_conf *key)
444{
445 struct ath_keyval hk;
446 const u8 *mac = NULL;
447 u8 gmac[ETH_ALEN];
448 int ret = 0;
449 int idx;
450
451 memset(&hk, 0, sizeof(hk));
452
453 switch (key->cipher) {
454 case WLAN_CIPHER_SUITE_WEP40:
455 case WLAN_CIPHER_SUITE_WEP104:
456 hk.kv_type = ATH_CIPHER_WEP;
457 break;
458 case WLAN_CIPHER_SUITE_TKIP:
459 hk.kv_type = ATH_CIPHER_TKIP;
460 break;
461 case WLAN_CIPHER_SUITE_CCMP:
462 hk.kv_type = ATH_CIPHER_AES_CCM;
463 break;
464 default:
465 return -EOPNOTSUPP;
466 }
467
468 hk.kv_len = key->keylen;
469 memcpy(hk.kv_val, key->key, key->keylen);
470
471 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
472 switch (vif->type) {
473 case NL80211_IFTYPE_AP:
474 memcpy(gmac, vif->addr, ETH_ALEN);
475 gmac[0] |= 0x01;
476 mac = gmac;
477 idx = ath_reserve_key_cache_slot(common, key->cipher);
478 break;
479 case NL80211_IFTYPE_ADHOC:
480 if (!sta) {
481 idx = key->keyidx;
482 break;
483 }
484 memcpy(gmac, sta->addr, ETH_ALEN);
485 gmac[0] |= 0x01;
486 mac = gmac;
487 idx = ath_reserve_key_cache_slot(common, key->cipher);
488 break;
489 default:
490 idx = key->keyidx;
491 break;
492 }
493 } else if (key->keyidx) {
494 if (WARN_ON(!sta))
495 return -EOPNOTSUPP;
496 mac = sta->addr;
497
498 if (vif->type != NL80211_IFTYPE_AP) {
499 /* Only keyidx 0 should be used with unicast key, but
500 * allow this for client mode for now. */
501 idx = key->keyidx;
502 } else
503 return -EIO;
504 } else {
505 if (WARN_ON(!sta))
506 return -EOPNOTSUPP;
507 mac = sta->addr;
508
509 idx = ath_reserve_key_cache_slot(common, key->cipher);
510 }
511
512 if (idx < 0)
513 return -ENOSPC; /* no free key cache entries */
514
515 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
516 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
517 vif->type == NL80211_IFTYPE_AP);
518 else
519 ret = ath_hw_set_keycache_entry(common, idx, &hk, mac);
520
521 if (!ret)
522 return -EIO;
523
524 set_bit(idx, common->keymap);
525 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
526 set_bit(idx + 64, common->keymap);
527 set_bit(idx, common->tkip_keymap);
528 set_bit(idx + 64, common->tkip_keymap);
529 if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
530 set_bit(idx + 32, common->keymap);
531 set_bit(idx + 64 + 32, common->keymap);
532 set_bit(idx + 32, common->tkip_keymap);
533 set_bit(idx + 64 + 32, common->tkip_keymap);
534 }
535 }
536
537 return idx;
538}
539EXPORT_SYMBOL(ath_key_config);
540
541/*
542 * Delete Key.
543 */
544void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
545{
546 ath_hw_keyreset(common, key->hw_key_idx);
547 if (key->hw_key_idx < IEEE80211_WEP_NKID)
548 return;
549
550 clear_bit(key->hw_key_idx, common->keymap);
551 if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
552 return;
553
554 clear_bit(key->hw_key_idx + 64, common->keymap);
555
556 clear_bit(key->hw_key_idx, common->tkip_keymap);
557 clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
558
559 if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
560 ath_hw_keyreset(common, key->hw_key_idx + 32);
561 clear_bit(key->hw_key_idx + 32, common->keymap);
562 clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
563
564 clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
565 clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
566 }
567}
568EXPORT_SYMBOL(ath_key_delete);
diff --git a/drivers/net/wireless/ath/reg.h b/drivers/net/wireless/ath/reg.h
index dfe1fbec24f5..e798ef476581 100644
--- a/drivers/net/wireless/ath/reg.h
+++ b/drivers/net/wireless/ath/reg.h
@@ -24,4 +24,27 @@
24#define AR_BSSMSKL 0x80e0 24#define AR_BSSMSKL 0x80e0
25#define AR_BSSMSKU 0x80e4 25#define AR_BSSMSKU 0x80e4
26 26
27#define AR_KEYTABLE_0 0x8800
28#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32))
29#define AR_KEY_CACHE_SIZE 128
30#define AR_RSVD_KEYTABLE_ENTRIES 4
31#define AR_KEY_TYPE 0x00000007
32#define AR_KEYTABLE_TYPE_40 0x00000000
33#define AR_KEYTABLE_TYPE_104 0x00000001
34#define AR_KEYTABLE_TYPE_128 0x00000003
35#define AR_KEYTABLE_TYPE_TKIP 0x00000004
36#define AR_KEYTABLE_TYPE_AES 0x00000005
37#define AR_KEYTABLE_TYPE_CCM 0x00000006
38#define AR_KEYTABLE_TYPE_CLR 0x00000007
39#define AR_KEYTABLE_ANT 0x00000008
40#define AR_KEYTABLE_VALID 0x00008000
41#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0)
42#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4)
43#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8)
44#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12)
45#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16)
46#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20)
47#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
48#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
49
27#endif /* ATH_REGISTERS_H */ 50#endif /* ATH_REGISTERS_H */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 20631ae2ddd7..a1186525c70d 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2280,6 +2280,7 @@ out:
2280 2280
2281static int b43_upload_microcode(struct b43_wldev *dev) 2281static int b43_upload_microcode(struct b43_wldev *dev)
2282{ 2282{
2283 struct wiphy *wiphy = dev->wl->hw->wiphy;
2283 const size_t hdr_len = sizeof(struct b43_fw_header); 2284 const size_t hdr_len = sizeof(struct b43_fw_header);
2284 const __be32 *data; 2285 const __be32 *data;
2285 unsigned int i, len; 2286 unsigned int i, len;
@@ -2405,6 +2406,10 @@ static int b43_upload_microcode(struct b43_wldev *dev)
2405 } 2406 }
2406 } 2407 }
2407 2408
2409 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "%u.%u",
2410 dev->fw.rev, dev->fw.patch);
2411 wiphy->hw_version = dev->dev->id.coreid;
2412
2408 if (b43_is_old_txhdr_format(dev)) { 2413 if (b43_is_old_txhdr_format(dev)) {
2409 /* We're over the deadline, but we keep support for old fw 2414 /* We're over the deadline, but we keep support for old fw
2410 * until it turns out to be in major conflict with something new. */ 2415 * until it turns out to be in major conflict with something new. */
@@ -3754,17 +3759,17 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3754 } 3759 }
3755 3760
3756 err = -EINVAL; 3761 err = -EINVAL;
3757 switch (key->alg) { 3762 switch (key->cipher) {
3758 case ALG_WEP: 3763 case WLAN_CIPHER_SUITE_WEP40:
3759 if (key->keylen == WLAN_KEY_LEN_WEP40) 3764 algorithm = B43_SEC_ALGO_WEP40;
3760 algorithm = B43_SEC_ALGO_WEP40; 3765 break;
3761 else 3766 case WLAN_CIPHER_SUITE_WEP104:
3762 algorithm = B43_SEC_ALGO_WEP104; 3767 algorithm = B43_SEC_ALGO_WEP104;
3763 break; 3768 break;
3764 case ALG_TKIP: 3769 case WLAN_CIPHER_SUITE_TKIP:
3765 algorithm = B43_SEC_ALGO_TKIP; 3770 algorithm = B43_SEC_ALGO_TKIP;
3766 break; 3771 break;
3767 case ALG_CCMP: 3772 case WLAN_CIPHER_SUITE_CCMP:
3768 algorithm = B43_SEC_ALGO_AES; 3773 algorithm = B43_SEC_ALGO_AES;
3769 break; 3774 break;
3770 default: 3775 default:
@@ -4250,6 +4255,10 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
4250 B43_WARN_ON(dev && b43_status(dev) > B43_STAT_INITIALIZED); 4255 B43_WARN_ON(dev && b43_status(dev) > B43_STAT_INITIALIZED);
4251 if (!dev || b43_status(dev) != B43_STAT_INITIALIZED) 4256 if (!dev || b43_status(dev) != B43_STAT_INITIALIZED)
4252 return; 4257 return;
4258
4259 /* Unregister HW RNG driver */
4260 b43_rng_exit(dev->wl);
4261
4253 b43_set_status(dev, B43_STAT_UNINIT); 4262 b43_set_status(dev, B43_STAT_UNINIT);
4254 4263
4255 /* Stop the microcode PSM. */ 4264 /* Stop the microcode PSM. */
@@ -4379,6 +4388,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4379 4388
4380 b43_set_status(dev, B43_STAT_INITIALIZED); 4389 b43_set_status(dev, B43_STAT_INITIALIZED);
4381 4390
4391 /* Register HW RNG driver */
4392 b43_rng_init(dev->wl);
4393
4382out: 4394out:
4383 return err; 4395 return err;
4384 4396
@@ -4984,7 +4996,6 @@ static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id)
4984 if (err) 4996 if (err)
4985 goto err_one_core_detach; 4997 goto err_one_core_detach;
4986 b43_leds_register(wl->current_dev); 4998 b43_leds_register(wl->current_dev);
4987 b43_rng_init(wl);
4988 } 4999 }
4989 5000
4990 out: 5001 out:
@@ -5020,7 +5031,6 @@ static void b43_remove(struct ssb_device *dev)
5020 b43_one_core_detach(dev); 5031 b43_one_core_detach(dev);
5021 5032
5022 if (list_empty(&wl->devlist)) { 5033 if (list_empty(&wl->devlist)) {
5023 b43_rng_exit(wl);
5024 b43_leds_unregister(wl); 5034 b43_leds_unregister(wl);
5025 /* Last core on the chip unregistered. 5035 /* Last core on the chip unregistered.
5026 * We can destroy common struct b43_wl. 5036 * We can destroy common struct b43_wl.
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 5a725703770c..2466c0a52e5d 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -893,7 +893,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
893} 893}
894 894
895/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ 895/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
896static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev) 896static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
897{ 897{
898 struct b43_phy_n *nphy = dev->phy.n; 898 struct b43_phy_n *nphy = dev->phy.n;
899 u8 i, j; 899 u8 i, j;
@@ -1094,11 +1094,12 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
1094 b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7); 1094 b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
1095 b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7); 1095 b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
1096 1096
1097 b43_nphy_gain_crtl_workarounds(dev); 1097 b43_nphy_gain_ctrl_workarounds(dev);
1098 1098
1099 if (dev->phy.rev < 2) { 1099 if (dev->phy.rev < 2) {
1100 if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2) 1100 if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
1101 ; /*TODO: b43_mhf(dev, 2, 0x0010, 0x0010, 3);*/ 1101 b43_hf_write(dev, b43_hf_read(dev) |
1102 B43_HF_MLADVW);
1102 } else if (dev->phy.rev == 2) { 1103 } else if (dev->phy.rev == 2) {
1103 b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0); 1104 b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
1104 b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0); 1105 b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
@@ -1182,7 +1183,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
1182 len = bw << 1; 1183 len = bw << 1;
1183 } 1184 }
1184 1185
1185 samples = kzalloc(len * sizeof(struct b43_c32), GFP_KERNEL); 1186 samples = kcalloc(len, sizeof(struct b43_c32), GFP_KERNEL);
1186 if (!samples) { 1187 if (!samples) {
1187 b43err(dev->wl, "allocation for samples generation failed\n"); 1188 b43err(dev->wl, "allocation for samples generation failed\n");
1188 return 0; 1189 return 0;
@@ -3073,6 +3074,57 @@ static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
3073 return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug); 3074 return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
3074} 3075}
3075 3076
3077/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */
3078static void b43_nphy_mac_phy_clock_set(struct b43_wldev *dev, bool on)
3079{
3080 u32 tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
3081 if (on)
3082 tmslow |= SSB_TMSLOW_PHYCLK;
3083 else
3084 tmslow &= ~SSB_TMSLOW_PHYCLK;
3085 ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
3086}
3087
3088/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */
3089static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
3090{
3091 struct b43_phy *phy = &dev->phy;
3092 struct b43_phy_n *nphy = phy->n;
3093 u16 buf[16];
3094
3095 nphy->phyrxchain = mask;
3096
3097 if (0 /* FIXME clk */)
3098 return;
3099
3100 b43_mac_suspend(dev);
3101
3102 if (nphy->hang_avoid)
3103 b43_nphy_stay_in_carrier_search(dev, true);
3104
3105 b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
3106 (mask & 0x3) << B43_NPHY_RFSEQCA_RXEN_SHIFT);
3107
3108 if ((mask & 0x3) != 0x3) {
3109 b43_phy_write(dev, B43_NPHY_HPANT_SWTHRES, 1);
3110 if (dev->phy.rev >= 3) {
3111 /* TODO */
3112 }
3113 } else {
3114 b43_phy_write(dev, B43_NPHY_HPANT_SWTHRES, 0x1E);
3115 if (dev->phy.rev >= 3) {
3116 /* TODO */
3117 }
3118 }
3119
3120 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
3121
3122 if (nphy->hang_avoid)
3123 b43_nphy_stay_in_carrier_search(dev, false);
3124
3125 b43_mac_enable(dev);
3126}
3127
3076/* 3128/*
3077 * Init N-PHY 3129 * Init N-PHY
3078 * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N 3130 * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
@@ -3173,7 +3225,7 @@ int b43_phy_initn(struct b43_wldev *dev)
3173 b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA); 3225 b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA);
3174 b43_nphy_bmac_clock_fgc(dev, 0); 3226 b43_nphy_bmac_clock_fgc(dev, 0);
3175 3227
3176 /* TODO N PHY MAC PHY Clock Set with argument 1 */ 3228 b43_nphy_mac_phy_clock_set(dev, true);
3177 3229
3178 b43_nphy_pa_override(dev, false); 3230 b43_nphy_pa_override(dev, false);
3179 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX); 3231 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
@@ -3199,7 +3251,7 @@ int b43_phy_initn(struct b43_wldev *dev)
3199 } 3251 }
3200 3252
3201 if (nphy->phyrxchain != 3) 3253 if (nphy->phyrxchain != 3)
3202 ;/* TODO N PHY RX Core Set State with phyrxchain as argument */ 3254 b43_nphy_set_rx_core_state(dev, nphy->phyrxchain);
3203 if (nphy->mphase_cal_phase_id > 0) 3255 if (nphy->mphase_cal_phase_id > 0)
3204 ;/* TODO PHY Periodic Calibration Multi-Phase Restart */ 3256 ;/* TODO PHY Periodic Calibration Multi-Phase Restart */
3205 3257
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1713f5f7a58b..67f18ecdb3bf 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1623,6 +1623,7 @@ error:
1623 1623
1624static int b43legacy_upload_microcode(struct b43legacy_wldev *dev) 1624static int b43legacy_upload_microcode(struct b43legacy_wldev *dev)
1625{ 1625{
1626 struct wiphy *wiphy = dev->wl->hw->wiphy;
1626 const size_t hdr_len = sizeof(struct b43legacy_fw_header); 1627 const size_t hdr_len = sizeof(struct b43legacy_fw_header);
1627 const __be32 *data; 1628 const __be32 *data;
1628 unsigned int i; 1629 unsigned int i;
@@ -1732,6 +1733,10 @@ static int b43legacy_upload_microcode(struct b43legacy_wldev *dev)
1732 dev->fw.rev = fwrev; 1733 dev->fw.rev = fwrev;
1733 dev->fw.patch = fwpatch; 1734 dev->fw.patch = fwpatch;
1734 1735
1736 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "%u.%u",
1737 dev->fw.rev, dev->fw.patch);
1738 wiphy->hw_version = dev->dev->id.coreid;
1739
1735 return 0; 1740 return 0;
1736 1741
1737error: 1742error:
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index a85e43a8d758..6038633ef361 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1696,7 +1696,7 @@ static int prism2_request_scan(struct net_device *dev)
1696 hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE, 1696 hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE,
1697 HFA384X_ROAMING_FIRMWARE); 1697 HFA384X_ROAMING_FIRMWARE);
1698 1698
1699 return 0; 1699 return ret;
1700} 1700}
1701 1701
1702#else /* !PRISM2_NO_STATION_MODES */ 1702#else /* !PRISM2_NO_STATION_MODES */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 996e9d7d7586..61915f371416 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1921,9 +1921,9 @@ static int ipw2100_net_init(struct net_device *dev)
1921 1921
1922 bg_band->band = IEEE80211_BAND_2GHZ; 1922 bg_band->band = IEEE80211_BAND_2GHZ;
1923 bg_band->n_channels = geo->bg_channels; 1923 bg_band->n_channels = geo->bg_channels;
1924 bg_band->channels = 1924 bg_band->channels = kcalloc(geo->bg_channels,
1925 kzalloc(geo->bg_channels * 1925 sizeof(struct ieee80211_channel),
1926 sizeof(struct ieee80211_channel), GFP_KERNEL); 1926 GFP_KERNEL);
1927 if (!bg_band->channels) { 1927 if (!bg_band->channels) {
1928 ipw2100_down(priv); 1928 ipw2100_down(priv);
1929 return -ENOMEM; 1929 return -ENOMEM;
@@ -3056,9 +3056,9 @@ static void ipw2100_tx_send_commands(struct ipw2100_priv *priv)
3056 3056
3057 packet = list_entry(element, struct ipw2100_tx_packet, list); 3057 packet = list_entry(element, struct ipw2100_tx_packet, list);
3058 3058
3059 IPW_DEBUG_TX("using TBD at virt=%p, phys=%p\n", 3059 IPW_DEBUG_TX("using TBD at virt=%p, phys=%04X\n",
3060 &txq->drv[txq->next], 3060 &txq->drv[txq->next],
3061 (void *)(txq->nic + txq->next * 3061 (u32) (txq->nic + txq->next *
3062 sizeof(struct ipw2100_bd))); 3062 sizeof(struct ipw2100_bd)));
3063 3063
3064 packet->index = txq->next; 3064 packet->index = txq->next;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index cb2552a6777c..0f2508384c75 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11467,9 +11467,9 @@ static int ipw_net_init(struct net_device *dev)
11467 11467
11468 bg_band->band = IEEE80211_BAND_2GHZ; 11468 bg_band->band = IEEE80211_BAND_2GHZ;
11469 bg_band->n_channels = geo->bg_channels; 11469 bg_band->n_channels = geo->bg_channels;
11470 bg_band->channels = 11470 bg_band->channels = kcalloc(geo->bg_channels,
11471 kzalloc(geo->bg_channels * 11471 sizeof(struct ieee80211_channel),
11472 sizeof(struct ieee80211_channel), GFP_KERNEL); 11472 GFP_KERNEL);
11473 /* translate geo->bg to bg_band.channels */ 11473 /* translate geo->bg to bg_band.channels */
11474 for (i = 0; i < geo->bg_channels; i++) { 11474 for (i = 0; i < geo->bg_channels; i++) {
11475 bg_band->channels[i].band = IEEE80211_BAND_2GHZ; 11475 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
@@ -11502,9 +11502,9 @@ static int ipw_net_init(struct net_device *dev)
11502 11502
11503 a_band->band = IEEE80211_BAND_5GHZ; 11503 a_band->band = IEEE80211_BAND_5GHZ;
11504 a_band->n_channels = geo->a_channels; 11504 a_band->n_channels = geo->a_channels;
11505 a_band->channels = 11505 a_band->channels = kcalloc(geo->a_channels,
11506 kzalloc(geo->a_channels * 11506 sizeof(struct ieee80211_channel),
11507 sizeof(struct ieee80211_channel), GFP_KERNEL); 11507 GFP_KERNEL);
11508 /* translate geo->bg to a_band.channels */ 11508 /* translate geo->bg to a_band.channels */
11509 for (i = 0; i < geo->a_channels; i++) { 11509 for (i = 0; i < geo->a_channels; i++) {
11510 a_band->channels[i].band = IEEE80211_BAND_2GHZ; 11510 a_band->channels[i].band = IEEE80211_BAND_2GHZ;
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index a51e4da1bdfc..b82364258dc5 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -3,6 +3,9 @@ config IWLWIFI
3 depends on PCI && MAC80211 3 depends on PCI && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 5
6menu "Debugging Options"
7 depends on IWLWIFI
8
6config IWLWIFI_DEBUG 9config IWLWIFI_DEBUG
7 bool "Enable full debugging output in iwlagn and iwl3945 drivers" 10 bool "Enable full debugging output in iwlagn and iwl3945 drivers"
8 depends on IWLWIFI 11 depends on IWLWIFI
@@ -36,6 +39,12 @@ config IWLWIFI_DEBUGFS
36 is a low-impact option that allows getting insight into the 39 is a low-impact option that allows getting insight into the
37 driver's state at runtime. 40 driver's state at runtime.
38 41
42config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
43 bool "Experimental uCode support"
44 depends on IWLWIFI && IWLWIFI_DEBUG
45 ---help---
46 Enable use of experimental ucode for testing and debugging.
47
39config IWLWIFI_DEVICE_TRACING 48config IWLWIFI_DEVICE_TRACING
40 bool "iwlwifi device access tracing" 49 bool "iwlwifi device access tracing"
41 depends on IWLWIFI 50 depends on IWLWIFI
@@ -53,6 +62,7 @@ config IWLWIFI_DEVICE_TRACING
53 62
54 If unsure, say Y so we can help you better when problems 63 If unsure, say Y so we can help you better when problems
55 occur. 64 occur.
65endmenu
56 66
57config IWLAGN 67config IWLAGN
58 tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)" 68 tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 728bb858ba97..493163925a45 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_IWLAGN) += iwlagn.o
12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o 12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o 13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
14iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o 14iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
15iwlagn-objs += iwl-agn-tt.o
15iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o 16iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
16 17
17iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 18iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 0b779a41a142..56ef4ed0db47 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -130,7 +130,7 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
130 sizeof(struct iwlagn_scd_bc_tbl); 130 sizeof(struct iwlagn_scd_bc_tbl);
131 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 131 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
132 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 132 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
133 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; 133 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
134 134
135 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 135 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
136 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 136 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@@ -217,18 +217,24 @@ static struct iwl_lib_ops iwl1000_lib = {
217 .set_ct_kill = iwl1000_set_ct_threshold, 217 .set_ct_kill = iwl1000_set_ct_threshold,
218 }, 218 },
219 .manage_ibss_station = iwlagn_manage_ibss_station, 219 .manage_ibss_station = iwlagn_manage_ibss_station,
220 .update_bcast_station = iwl_update_bcast_station, 220 .update_bcast_stations = iwl_update_bcast_stations,
221 .debugfs_ops = { 221 .debugfs_ops = {
222 .rx_stats_read = iwl_ucode_rx_stats_read, 222 .rx_stats_read = iwl_ucode_rx_stats_read,
223 .tx_stats_read = iwl_ucode_tx_stats_read, 223 .tx_stats_read = iwl_ucode_tx_stats_read,
224 .general_stats_read = iwl_ucode_general_stats_read, 224 .general_stats_read = iwl_ucode_general_stats_read,
225 .bt_stats_read = iwl_ucode_bt_stats_read, 225 .bt_stats_read = iwl_ucode_bt_stats_read,
226 .reply_tx_error = iwl_reply_tx_error_read,
226 }, 227 },
227 .recover_from_tx_stall = iwl_bg_monitor_recover, 228 .recover_from_tx_stall = iwl_bg_monitor_recover,
228 .check_plcp_health = iwl_good_plcp_health, 229 .check_plcp_health = iwl_good_plcp_health,
229 .check_ack_health = iwl_good_ack_health, 230 .check_ack_health = iwl_good_ack_health,
230 .txfifo_flush = iwlagn_txfifo_flush, 231 .txfifo_flush = iwlagn_txfifo_flush,
231 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 232 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
233 .tt_ops = {
234 .lower_power_detection = iwl_tt_is_low_power_state,
235 .tt_power_mode = iwl_tt_current_power_mode,
236 .ct_kill_check = iwl_check_for_ct_kill,
237 }
232}; 238};
233 239
234static const struct iwl_ops iwl1000_ops = { 240static const struct iwl_ops iwl1000_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 7c731a793632..65b5834da28c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -62,7 +62,7 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-3945-hw.h) only for hardware-related definitions. 64 * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
65 * Please use iwl-3945-commands.h for uCode API definitions. 65 * Please use iwl-commands.h for uCode API definitions.
66 * Please use iwl-3945.h for driver implementation definitions. 66 * Please use iwl-3945.h for driver implementation definitions.
67 */ 67 */
68 68
@@ -226,6 +226,7 @@ struct iwl3945_eeprom {
226 226
227/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ 227/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
228#define IWL39_NUM_QUEUES 5 228#define IWL39_NUM_QUEUES 5
229#define IWL39_CMD_QUEUE_NUM 4
229 230
230#define IWL_DEFAULT_TX_RETRY 15 231#define IWL_DEFAULT_TX_RETRY 15
231 232
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 8e84a08ff951..d707f5bb1a8b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -343,7 +343,7 @@ void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 s
343 int i; 343 int i;
344 344
345 IWL_DEBUG_INFO(priv, "enter\n"); 345 IWL_DEBUG_INFO(priv, "enter\n");
346 if (sta_id == priv->hw_params.bcast_sta_id) 346 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
347 goto out; 347 goto out;
348 348
349 psta = (struct iwl3945_sta_priv *) sta->drv_priv; 349 psta = (struct iwl3945_sta_priv *) sta->drv_priv;
@@ -932,7 +932,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
932 932
933 rcu_read_lock(); 933 rcu_read_lock();
934 934
935 sta = ieee80211_find_sta(priv->vif, 935 sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
936 priv->stations[sta_id].sta.sta.addr); 936 priv->stations[sta_id].sta.sta.addr);
937 if (!sta) { 937 if (!sta) {
938 IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n"); 938 IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
@@ -949,7 +949,8 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
949 switch (priv->band) { 949 switch (priv->band) {
950 case IEEE80211_BAND_2GHZ: 950 case IEEE80211_BAND_2GHZ:
951 /* TODO: this always does G, not a regression */ 951 /* TODO: this always does G, not a regression */
952 if (priv->active_rxon.flags & RXON_FLG_TGG_PROTECT_MSK) { 952 if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
953 RXON_FLG_TGG_PROTECT_MSK) {
953 rs_sta->tgg = 1; 954 rs_sta->tgg = 1;
954 rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot; 955 rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
955 } else 956 } else
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 8ccfcd08218d..5d09686c3389 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -245,7 +245,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
245 break; 245 break;
246 case IEEE80211_BAND_2GHZ: 246 case IEEE80211_BAND_2GHZ:
247 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && 247 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
248 iwl_is_associated(priv)) { 248 iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
249 if (rate == IWL_RATE_11M_INDEX) 249 if (rate == IWL_RATE_11M_INDEX)
250 next_rate = IWL_RATE_5M_INDEX; 250 next_rate = IWL_RATE_5M_INDEX;
251 } 251 }
@@ -273,7 +273,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
273 struct iwl_queue *q = &txq->q; 273 struct iwl_queue *q = &txq->q;
274 struct iwl_tx_info *tx_info; 274 struct iwl_tx_info *tx_info;
275 275
276 BUG_ON(txq_id == IWL_CMD_QUEUE_NUM); 276 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
277 277
278 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; 278 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
279 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 279 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
@@ -285,7 +285,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
285 } 285 }
286 286
287 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && 287 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
288 (txq_id != IWL_CMD_QUEUE_NUM) && 288 (txq_id != IWL39_CMD_QUEUE_NUM) &&
289 priv->mac80211_registered) 289 priv->mac80211_registered)
290 iwl_wake_queue(priv, txq_id); 290 iwl_wake_queue(priv, txq_id);
291} 291}
@@ -760,7 +760,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
760 data_retry_limit = IWL_DEFAULT_TX_RETRY; 760 data_retry_limit = IWL_DEFAULT_TX_RETRY;
761 tx_cmd->data_retry_limit = data_retry_limit; 761 tx_cmd->data_retry_limit = data_retry_limit;
762 762
763 if (tx_id >= IWL_CMD_QUEUE_NUM) 763 if (tx_id >= IWL39_CMD_QUEUE_NUM)
764 rts_retry_limit = 3; 764 rts_retry_limit = 3;
765 else 765 else
766 rts_retry_limit = 7; 766 rts_retry_limit = 7;
@@ -909,7 +909,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
909 909
910 /* Tx queue(s) */ 910 /* Tx queue(s) */
911 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 911 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
912 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 912 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
913 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 913 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
914 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 914 rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
915 txq_id); 915 txq_id);
@@ -1072,7 +1072,7 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1072 if (priv->txq) 1072 if (priv->txq)
1073 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; 1073 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1074 txq_id++) 1074 txq_id++)
1075 if (txq_id == IWL_CMD_QUEUE_NUM) 1075 if (txq_id == IWL39_CMD_QUEUE_NUM)
1076 iwl_cmd_queue_free(priv); 1076 iwl_cmd_queue_free(priv);
1077 else 1077 else
1078 iwl_tx_queue_free(priv, txq_id); 1078 iwl_tx_queue_free(priv, txq_id);
@@ -1439,17 +1439,18 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1439 int rate_idx, i; 1439 int rate_idx, i;
1440 const struct iwl_channel_info *ch_info = NULL; 1440 const struct iwl_channel_info *ch_info = NULL;
1441 struct iwl3945_txpowertable_cmd txpower = { 1441 struct iwl3945_txpowertable_cmd txpower = {
1442 .channel = priv->active_rxon.channel, 1442 .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
1443 }; 1443 };
1444 u16 chan;
1445
1446 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1444 1447
1445 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; 1448 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1446 ch_info = iwl_get_channel_info(priv, 1449 ch_info = iwl_get_channel_info(priv, priv->band, chan);
1447 priv->band,
1448 le16_to_cpu(priv->active_rxon.channel));
1449 if (!ch_info) { 1450 if (!ch_info) {
1450 IWL_ERR(priv, 1451 IWL_ERR(priv,
1451 "Failed to get channel info for channel %d [%d]\n", 1452 "Failed to get channel info for channel %d [%d]\n",
1452 le16_to_cpu(priv->active_rxon.channel), priv->band); 1453 chan, priv->band);
1453 return -EINVAL; 1454 return -EINVAL;
1454 } 1455 }
1455 1456
@@ -1710,7 +1711,8 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1710 return 0; 1711 return 0;
1711} 1712}
1712 1713
1713static int iwl3945_send_rxon_assoc(struct iwl_priv *priv) 1714static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1715 struct iwl_rxon_context *ctx)
1714{ 1716{
1715 int rc = 0; 1717 int rc = 0;
1716 struct iwl_rx_packet *pkt; 1718 struct iwl_rx_packet *pkt;
@@ -1721,8 +1723,8 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1721 .flags = CMD_WANT_SKB, 1723 .flags = CMD_WANT_SKB,
1722 .data = &rxon_assoc, 1724 .data = &rxon_assoc,
1723 }; 1725 };
1724 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; 1726 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
1725 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; 1727 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
1726 1728
1727 if ((rxon1->flags == rxon2->flags) && 1729 if ((rxon1->flags == rxon2->flags) &&
1728 (rxon1->filter_flags == rxon2->filter_flags) && 1730 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1732,10 +1734,10 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1732 return 0; 1734 return 0;
1733 } 1735 }
1734 1736
1735 rxon_assoc.flags = priv->staging_rxon.flags; 1737 rxon_assoc.flags = ctx->staging.flags;
1736 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; 1738 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1737 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; 1739 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1738 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; 1740 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1739 rxon_assoc.reserved = 0; 1741 rxon_assoc.reserved = 0;
1740 1742
1741 rc = iwl_send_cmd_sync(priv, &cmd); 1743 rc = iwl_send_cmd_sync(priv, &cmd);
@@ -1761,14 +1763,14 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1761 * function correctly transitions out of the RXON_ASSOC_MSK state if 1763 * function correctly transitions out of the RXON_ASSOC_MSK state if
1762 * a HW tune is required based on the RXON structure changes. 1764 * a HW tune is required based on the RXON structure changes.
1763 */ 1765 */
1764static int iwl3945_commit_rxon(struct iwl_priv *priv) 1766static int iwl3945_commit_rxon(struct iwl_priv *priv,
1767 struct iwl_rxon_context *ctx)
1765{ 1768{
1766 /* cast away the const for active_rxon in this function */ 1769 /* cast away the const for active_rxon in this function */
1767 struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 1770 struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
1768 struct iwl3945_rxon_cmd *staging_rxon = (void *)&priv->staging_rxon; 1771 struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
1769 int rc = 0; 1772 int rc = 0;
1770 bool new_assoc = 1773 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1771 !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK);
1772 1774
1773 if (!iwl_is_alive(priv)) 1775 if (!iwl_is_alive(priv))
1774 return -1; 1776 return -1;
@@ -1781,7 +1783,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1781 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); 1783 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1782 staging_rxon->flags |= iwl3945_get_antenna_flags(priv); 1784 staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
1783 1785
1784 rc = iwl_check_rxon_cmd(priv); 1786 rc = iwl_check_rxon_cmd(priv, ctx);
1785 if (rc) { 1787 if (rc) {
1786 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 1788 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1787 return -EINVAL; 1789 return -EINVAL;
@@ -1790,8 +1792,9 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1790 /* If we don't need to send a full RXON, we can use 1792 /* If we don't need to send a full RXON, we can use
1791 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter 1793 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
1792 * and other flags for the current radio configuration. */ 1794 * and other flags for the current radio configuration. */
1793 if (!iwl_full_rxon_required(priv)) { 1795 if (!iwl_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) {
1794 rc = iwl_send_rxon_assoc(priv); 1796 rc = iwl_send_rxon_assoc(priv,
1797 &priv->contexts[IWL_RXON_CTX_BSS]);
1795 if (rc) { 1798 if (rc) {
1796 IWL_ERR(priv, "Error setting RXON_ASSOC " 1799 IWL_ERR(priv, "Error setting RXON_ASSOC "
1797 "configuration (%d).\n", rc); 1800 "configuration (%d).\n", rc);
@@ -1807,7 +1810,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1807 * an RXON_ASSOC and the new config wants the associated mask enabled, 1810 * an RXON_ASSOC and the new config wants the associated mask enabled,
1808 * we must clear the associated from the active configuration 1811 * we must clear the associated from the active configuration
1809 * before we apply the new config */ 1812 * before we apply the new config */
1810 if (iwl_is_associated(priv) && new_assoc) { 1813 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
1811 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); 1814 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1812 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1815 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1813 1816
@@ -1819,7 +1822,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1819 active_rxon->reserved5 = 0; 1822 active_rxon->reserved5 = 0;
1820 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 1823 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1821 sizeof(struct iwl3945_rxon_cmd), 1824 sizeof(struct iwl3945_rxon_cmd),
1822 &priv->active_rxon); 1825 &priv->contexts[IWL_RXON_CTX_BSS].active);
1823 1826
1824 /* If the mask clearing failed then we set 1827 /* If the mask clearing failed then we set
1825 * active_rxon back to what it was previously */ 1828 * active_rxon back to what it was previously */
@@ -1829,8 +1832,9 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1829 "configuration (%d).\n", rc); 1832 "configuration (%d).\n", rc);
1830 return rc; 1833 return rc;
1831 } 1834 }
1832 iwl_clear_ucode_stations(priv); 1835 iwl_clear_ucode_stations(priv,
1833 iwl_restore_stations(priv); 1836 &priv->contexts[IWL_RXON_CTX_BSS]);
1837 iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
1834 } 1838 }
1835 1839
1836 IWL_DEBUG_INFO(priv, "Sending RXON\n" 1840 IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1848,7 +1852,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1848 staging_rxon->reserved4 = 0; 1852 staging_rxon->reserved4 = 0;
1849 staging_rxon->reserved5 = 0; 1853 staging_rxon->reserved5 = 0;
1850 1854
1851 iwl_set_rxon_hwcrypto(priv, !iwl3945_mod_params.sw_crypto); 1855 iwl_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
1852 1856
1853 /* Apply the new configuration */ 1857 /* Apply the new configuration */
1854 rc = iwl_send_cmd_pdu(priv, REPLY_RXON, 1858 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
@@ -1862,8 +1866,9 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1862 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); 1866 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1863 1867
1864 if (!new_assoc) { 1868 if (!new_assoc) {
1865 iwl_clear_ucode_stations(priv); 1869 iwl_clear_ucode_stations(priv,
1866 iwl_restore_stations(priv); 1870 &priv->contexts[IWL_RXON_CTX_BSS]);
1871 iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
1867 } 1872 }
1868 1873
1869 /* If we issue a new RXON command which required a tune then we must 1874 /* If we issue a new RXON command which required a tune then we must
@@ -2302,8 +2307,10 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2302 int ret; 2307 int ret;
2303 2308
2304 if (add) { 2309 if (add) {
2305 ret = iwl_add_bssid_station(priv, vif->bss_conf.bssid, false, 2310 ret = iwl_add_bssid_station(
2306 &vif_priv->ibss_bssid_sta_id); 2311 priv, &priv->contexts[IWL_RXON_CTX_BSS],
2312 vif->bss_conf.bssid, false,
2313 &vif_priv->ibss_bssid_sta_id);
2307 if (ret) 2314 if (ret)
2308 return ret; 2315 return ret;
2309 2316
@@ -2366,7 +2373,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2366 * 1M CCK rates */ 2373 * 1M CCK rates */
2367 2374
2368 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && 2375 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2369 iwl_is_associated(priv)) { 2376 iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
2370 2377
2371 index = IWL_FIRST_CCK_RATE; 2378 index = IWL_FIRST_CCK_RATE;
2372 for (i = IWL_RATE_6M_INDEX_TABLE; 2379 for (i = IWL_RATE_6M_INDEX_TABLE;
@@ -2421,7 +2428,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2421 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 2428 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2422 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; 2429 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2423 priv->hw_params.max_stations = IWL3945_STATION_COUNT; 2430 priv->hw_params.max_stations = IWL3945_STATION_COUNT;
2424 priv->hw_params.bcast_sta_id = IWL3945_BROADCAST_ID; 2431 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
2432
2433 priv->sta_key_max_num = STA_KEY_MAX_NUM;
2425 2434
2426 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR; 2435 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2427 priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL; 2436 priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
@@ -2439,7 +2448,8 @@ unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
2439 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u; 2448 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
2440 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); 2449 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2441 2450
2442 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id; 2451 tx_beacon_cmd->tx.sta_id =
2452 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2443 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2453 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2444 2454
2445 frame_size = iwl3945_fill_beacon_frame(priv, 2455 frame_size = iwl3945_fill_beacon_frame(priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index bb2aeebf3652..98509c5e708d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -295,7 +295,7 @@ extern const struct iwl_channel_info *iwl3945_get_channel_info(
295extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate); 295extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
296 296
297/* scanning */ 297/* scanning */
298void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); 298int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
299 299
300/* Requires full declaration of iwl_priv before including */ 300/* Requires full declaration of iwl_priv before including */
301#include "iwl-io.h" 301#include "iwl-io.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index d92b72909233..943a9c7bfa7f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -347,7 +347,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
347 struct iwl_chain_noise_data *data = &(priv->chain_noise_data); 347 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
348 348
349 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 349 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
350 iwl_is_associated(priv)) { 350 iwl_is_any_associated(priv)) {
351 struct iwl_calib_diff_gain_cmd cmd; 351 struct iwl_calib_diff_gain_cmd cmd;
352 352
353 /* clear data for chain noise calibration algorithm */ 353 /* clear data for chain noise calibration algorithm */
@@ -576,7 +576,7 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
576 /* Activate all Tx DMA/FIFO channels */ 576 /* Activate all Tx DMA/FIFO channels */
577 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6)); 577 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
578 578
579 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 579 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
580 580
581 /* make sure all queue are not stopped */ 581 /* make sure all queue are not stopped */
582 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); 582 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
@@ -587,6 +587,7 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
587 priv->txq_ctx_active_msk = 0; 587 priv->txq_ctx_active_msk = 0;
588 /* Map each Tx/cmd queue to its corresponding fifo */ 588 /* Map each Tx/cmd queue to its corresponding fifo */
589 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7); 589 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
590
590 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { 591 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
591 int ac = default_queue_to_tx_fifo[i]; 592 int ac = default_queue_to_tx_fifo[i];
592 593
@@ -656,7 +657,7 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
656 sizeof(struct iwl4965_scd_bc_tbl); 657 sizeof(struct iwl4965_scd_bc_tbl);
657 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 658 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
658 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 659 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
659 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; 660 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
660 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE; 661 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
661 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE; 662 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
662 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE; 663 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
@@ -1374,6 +1375,7 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1374 u8 band = 0; 1375 u8 band = 0;
1375 bool is_ht40 = false; 1376 bool is_ht40 = false;
1376 u8 ctrl_chan_high = 0; 1377 u8 ctrl_chan_high = 0;
1378 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1377 1379
1378 if (test_bit(STATUS_SCANNING, &priv->status)) { 1380 if (test_bit(STATUS_SCANNING, &priv->status)) {
1379 /* If this gets hit a lot, switch it to a BUG() and catch 1381 /* If this gets hit a lot, switch it to a BUG() and catch
@@ -1385,17 +1387,16 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1385 1387
1386 band = priv->band == IEEE80211_BAND_2GHZ; 1388 band = priv->band == IEEE80211_BAND_2GHZ;
1387 1389
1388 is_ht40 = is_ht40_channel(priv->active_rxon.flags); 1390 is_ht40 = is_ht40_channel(ctx->active.flags);
1389 1391
1390 if (is_ht40 && 1392 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1391 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1392 ctrl_chan_high = 1; 1393 ctrl_chan_high = 1;
1393 1394
1394 cmd.band = band; 1395 cmd.band = band;
1395 cmd.channel = priv->active_rxon.channel; 1396 cmd.channel = ctx->active.channel;
1396 1397
1397 ret = iwl4965_fill_txpower_tbl(priv, band, 1398 ret = iwl4965_fill_txpower_tbl(priv, band,
1398 le16_to_cpu(priv->active_rxon.channel), 1399 le16_to_cpu(ctx->active.channel),
1399 is_ht40, ctrl_chan_high, &cmd.tx_power); 1400 is_ht40, ctrl_chan_high, &cmd.tx_power);
1400 if (ret) 1401 if (ret)
1401 goto out; 1402 goto out;
@@ -1406,12 +1407,13 @@ out:
1406 return ret; 1407 return ret;
1407} 1408}
1408 1409
1409static int iwl4965_send_rxon_assoc(struct iwl_priv *priv) 1410static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1411 struct iwl_rxon_context *ctx)
1410{ 1412{
1411 int ret = 0; 1413 int ret = 0;
1412 struct iwl4965_rxon_assoc_cmd rxon_assoc; 1414 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1413 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; 1415 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
1414 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; 1416 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
1415 1417
1416 if ((rxon1->flags == rxon2->flags) && 1418 if ((rxon1->flags == rxon2->flags) &&
1417 (rxon1->filter_flags == rxon2->filter_flags) && 1419 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -1426,16 +1428,16 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1426 return 0; 1428 return 0;
1427 } 1429 }
1428 1430
1429 rxon_assoc.flags = priv->staging_rxon.flags; 1431 rxon_assoc.flags = ctx->staging.flags;
1430 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; 1432 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1431 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; 1433 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1432 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; 1434 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1433 rxon_assoc.reserved = 0; 1435 rxon_assoc.reserved = 0;
1434 rxon_assoc.ofdm_ht_single_stream_basic_rates = 1436 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1435 priv->staging_rxon.ofdm_ht_single_stream_basic_rates; 1437 ctx->staging.ofdm_ht_single_stream_basic_rates;
1436 rxon_assoc.ofdm_ht_dual_stream_basic_rates = 1438 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1437 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; 1439 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1438 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; 1440 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1439 1441
1440 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, 1442 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1441 sizeof(rxon_assoc), &rxon_assoc, NULL); 1443 sizeof(rxon_assoc), &rxon_assoc, NULL);
@@ -1448,6 +1450,7 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1448static int iwl4965_hw_channel_switch(struct iwl_priv *priv, 1450static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1449 struct ieee80211_channel_switch *ch_switch) 1451 struct ieee80211_channel_switch *ch_switch)
1450{ 1452{
1453 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1451 int rc; 1454 int rc;
1452 u8 band = 0; 1455 u8 band = 0;
1453 bool is_ht40 = false; 1456 bool is_ht40 = false;
@@ -1458,22 +1461,22 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1458 u16 ch; 1461 u16 ch;
1459 u32 tsf_low; 1462 u32 tsf_low;
1460 u8 switch_count; 1463 u8 switch_count;
1461 u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval); 1464 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
1462 struct ieee80211_vif *vif = priv->vif; 1465 struct ieee80211_vif *vif = ctx->vif;
1463 band = priv->band == IEEE80211_BAND_2GHZ; 1466 band = priv->band == IEEE80211_BAND_2GHZ;
1464 1467
1465 is_ht40 = is_ht40_channel(priv->staging_rxon.flags); 1468 is_ht40 = is_ht40_channel(ctx->staging.flags);
1466 1469
1467 if (is_ht40 && 1470 if (is_ht40 &&
1468 (priv->staging_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) 1471 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1469 ctrl_chan_high = 1; 1472 ctrl_chan_high = 1;
1470 1473
1471 cmd.band = band; 1474 cmd.band = band;
1472 cmd.expect_beacon = 0; 1475 cmd.expect_beacon = 0;
1473 ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq); 1476 ch = ch_switch->channel->hw_value;
1474 cmd.channel = cpu_to_le16(ch); 1477 cmd.channel = cpu_to_le16(ch);
1475 cmd.rxon_flags = priv->staging_rxon.flags; 1478 cmd.rxon_flags = ctx->staging.flags;
1476 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; 1479 cmd.rxon_filter_flags = ctx->staging.filter_flags;
1477 switch_count = ch_switch->count; 1480 switch_count = ch_switch->count;
1478 tsf_low = ch_switch->timestamp & 0x0ffffffff; 1481 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1479 /* 1482 /*
@@ -1508,7 +1511,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1508 cmd.expect_beacon = is_channel_radar(ch_info); 1511 cmd.expect_beacon = is_channel_radar(ch_info);
1509 else { 1512 else {
1510 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 1513 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1511 priv->active_rxon.channel, ch); 1514 ctx->active.channel, ch);
1512 return -EFAULT; 1515 return -EFAULT;
1513 } 1516 }
1514 1517
@@ -2007,7 +2010,7 @@ static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
2007 start = IWL_STA_ID; 2010 start = IWL_STA_ID;
2008 2011
2009 if (is_broadcast_ether_addr(addr)) 2012 if (is_broadcast_ether_addr(addr))
2010 return priv->hw_params.bcast_sta_id; 2013 return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2011 2014
2012 spin_lock_irqsave(&priv->sta_lock, flags); 2015 spin_lock_irqsave(&priv->sta_lock, flags);
2013 for (i = start; i < priv->hw_params.max_stations; i++) 2016 for (i = start; i < priv->hw_params.max_stations; i++)
@@ -2280,12 +2283,13 @@ static struct iwl_lib_ops iwl4965_lib = {
2280 .set_ct_kill = iwl4965_set_ct_threshold, 2283 .set_ct_kill = iwl4965_set_ct_threshold,
2281 }, 2284 },
2282 .manage_ibss_station = iwlagn_manage_ibss_station, 2285 .manage_ibss_station = iwlagn_manage_ibss_station,
2283 .update_bcast_station = iwl_update_bcast_station, 2286 .update_bcast_stations = iwl_update_bcast_stations,
2284 .debugfs_ops = { 2287 .debugfs_ops = {
2285 .rx_stats_read = iwl_ucode_rx_stats_read, 2288 .rx_stats_read = iwl_ucode_rx_stats_read,
2286 .tx_stats_read = iwl_ucode_tx_stats_read, 2289 .tx_stats_read = iwl_ucode_tx_stats_read,
2287 .general_stats_read = iwl_ucode_general_stats_read, 2290 .general_stats_read = iwl_ucode_general_stats_read,
2288 .bt_stats_read = iwl_ucode_bt_stats_read, 2291 .bt_stats_read = iwl_ucode_bt_stats_read,
2292 .reply_tx_error = iwl_reply_tx_error_read,
2289 }, 2293 },
2290 .recover_from_tx_stall = iwl_bg_monitor_recover, 2294 .recover_from_tx_stall = iwl_bg_monitor_recover,
2291 .check_plcp_health = iwl_good_plcp_health, 2295 .check_plcp_health = iwl_good_plcp_health,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 146e6431ae95..3975e45e7500 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -62,7 +62,7 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-5000-hw.h) only for hardware-related definitions. 64 * Please use this file (iwl-5000-hw.h) only for hardware-related definitions.
65 * Use iwl-5000-commands.h for uCode API definitions. 65 * Use iwl-commands.h for uCode API definitions.
66 */ 66 */
67 67
68#ifndef __iwl_5000_hw_h__ 68#ifndef __iwl_5000_hw_h__
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 48bdcd8d2e94..21b4b23368e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -180,7 +180,7 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
180 sizeof(struct iwlagn_scd_bc_tbl); 180 sizeof(struct iwlagn_scd_bc_tbl);
181 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 181 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
182 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 182 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
183 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; 183 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
184 184
185 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 185 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
186 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 186 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@@ -227,7 +227,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
227 sizeof(struct iwlagn_scd_bc_tbl); 227 sizeof(struct iwlagn_scd_bc_tbl);
228 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 228 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
229 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 229 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
230 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; 230 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
231 231
232 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 232 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
233 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 233 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@@ -275,14 +275,19 @@ static void iwl5150_temperature(struct iwl_priv *priv)
275static int iwl5000_hw_channel_switch(struct iwl_priv *priv, 275static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
276 struct ieee80211_channel_switch *ch_switch) 276 struct ieee80211_channel_switch *ch_switch)
277{ 277{
278 /*
279 * MULTI-FIXME
280 * See iwl_mac_channel_switch.
281 */
282 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
278 struct iwl5000_channel_switch_cmd cmd; 283 struct iwl5000_channel_switch_cmd cmd;
279 const struct iwl_channel_info *ch_info; 284 const struct iwl_channel_info *ch_info;
280 u32 switch_time_in_usec, ucode_switch_time; 285 u32 switch_time_in_usec, ucode_switch_time;
281 u16 ch; 286 u16 ch;
282 u32 tsf_low; 287 u32 tsf_low;
283 u8 switch_count; 288 u8 switch_count;
284 u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval); 289 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
285 struct ieee80211_vif *vif = priv->vif; 290 struct ieee80211_vif *vif = ctx->vif;
286 struct iwl_host_cmd hcmd = { 291 struct iwl_host_cmd hcmd = {
287 .id = REPLY_CHANNEL_SWITCH, 292 .id = REPLY_CHANNEL_SWITCH,
288 .len = sizeof(cmd), 293 .len = sizeof(cmd),
@@ -291,12 +296,12 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
291 }; 296 };
292 297
293 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 298 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
294 ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq); 299 ch = ch_switch->channel->hw_value;
295 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n", 300 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
296 priv->active_rxon.channel, ch); 301 ctx->active.channel, ch);
297 cmd.channel = cpu_to_le16(ch); 302 cmd.channel = cpu_to_le16(ch);
298 cmd.rxon_flags = priv->staging_rxon.flags; 303 cmd.rxon_flags = ctx->staging.flags;
299 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; 304 cmd.rxon_filter_flags = ctx->staging.filter_flags;
300 switch_count = ch_switch->count; 305 switch_count = ch_switch->count;
301 tsf_low = ch_switch->timestamp & 0x0ffffffff; 306 tsf_low = ch_switch->timestamp & 0x0ffffffff;
302 /* 307 /*
@@ -331,7 +336,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
331 cmd.expect_beacon = is_channel_radar(ch_info); 336 cmd.expect_beacon = is_channel_radar(ch_info);
332 else { 337 else {
333 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 338 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
334 priv->active_rxon.channel, ch); 339 ctx->active.channel, ch);
335 return -EFAULT; 340 return -EFAULT;
336 } 341 }
337 priv->switch_rxon.channel = cmd.channel; 342 priv->switch_rxon.channel = cmd.channel;
@@ -393,18 +398,24 @@ static struct iwl_lib_ops iwl5000_lib = {
393 .set_ct_kill = iwl5000_set_ct_threshold, 398 .set_ct_kill = iwl5000_set_ct_threshold,
394 }, 399 },
395 .manage_ibss_station = iwlagn_manage_ibss_station, 400 .manage_ibss_station = iwlagn_manage_ibss_station,
396 .update_bcast_station = iwl_update_bcast_station, 401 .update_bcast_stations = iwl_update_bcast_stations,
397 .debugfs_ops = { 402 .debugfs_ops = {
398 .rx_stats_read = iwl_ucode_rx_stats_read, 403 .rx_stats_read = iwl_ucode_rx_stats_read,
399 .tx_stats_read = iwl_ucode_tx_stats_read, 404 .tx_stats_read = iwl_ucode_tx_stats_read,
400 .general_stats_read = iwl_ucode_general_stats_read, 405 .general_stats_read = iwl_ucode_general_stats_read,
401 .bt_stats_read = iwl_ucode_bt_stats_read, 406 .bt_stats_read = iwl_ucode_bt_stats_read,
407 .reply_tx_error = iwl_reply_tx_error_read,
402 }, 408 },
403 .recover_from_tx_stall = iwl_bg_monitor_recover, 409 .recover_from_tx_stall = iwl_bg_monitor_recover,
404 .check_plcp_health = iwl_good_plcp_health, 410 .check_plcp_health = iwl_good_plcp_health,
405 .check_ack_health = iwl_good_ack_health, 411 .check_ack_health = iwl_good_ack_health,
406 .txfifo_flush = iwlagn_txfifo_flush, 412 .txfifo_flush = iwlagn_txfifo_flush,
407 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 413 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
414 .tt_ops = {
415 .lower_power_detection = iwl_tt_is_low_power_state,
416 .tt_power_mode = iwl_tt_current_power_mode,
417 .ct_kill_check = iwl_check_for_ct_kill,
418 }
408}; 419};
409 420
410static struct iwl_lib_ops iwl5150_lib = { 421static struct iwl_lib_ops iwl5150_lib = {
@@ -459,17 +470,24 @@ static struct iwl_lib_ops iwl5150_lib = {
459 .set_ct_kill = iwl5150_set_ct_threshold, 470 .set_ct_kill = iwl5150_set_ct_threshold,
460 }, 471 },
461 .manage_ibss_station = iwlagn_manage_ibss_station, 472 .manage_ibss_station = iwlagn_manage_ibss_station,
462 .update_bcast_station = iwl_update_bcast_station, 473 .update_bcast_stations = iwl_update_bcast_stations,
463 .debugfs_ops = { 474 .debugfs_ops = {
464 .rx_stats_read = iwl_ucode_rx_stats_read, 475 .rx_stats_read = iwl_ucode_rx_stats_read,
465 .tx_stats_read = iwl_ucode_tx_stats_read, 476 .tx_stats_read = iwl_ucode_tx_stats_read,
466 .general_stats_read = iwl_ucode_general_stats_read, 477 .general_stats_read = iwl_ucode_general_stats_read,
478 .bt_stats_read = iwl_ucode_bt_stats_read,
479 .reply_tx_error = iwl_reply_tx_error_read,
467 }, 480 },
468 .recover_from_tx_stall = iwl_bg_monitor_recover, 481 .recover_from_tx_stall = iwl_bg_monitor_recover,
469 .check_plcp_health = iwl_good_plcp_health, 482 .check_plcp_health = iwl_good_plcp_health,
470 .check_ack_health = iwl_good_ack_health, 483 .check_ack_health = iwl_good_ack_health,
471 .txfifo_flush = iwlagn_txfifo_flush, 484 .txfifo_flush = iwlagn_txfifo_flush,
472 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 485 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
486 .tt_ops = {
487 .lower_power_detection = iwl_tt_is_low_power_state,
488 .tt_power_mode = iwl_tt_current_power_mode,
489 .ct_kill_check = iwl_check_for_ct_kill,
490 }
473}; 491};
474 492
475static const struct iwl_ops iwl5000_ops = { 493static const struct iwl_ops iwl5000_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
index ddba39999997..47891e16a758 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
@@ -62,7 +62,7 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-6000-hw.h) only for hardware-related definitions. 64 * Please use this file (iwl-6000-hw.h) only for hardware-related definitions.
65 * Use iwl-5000-commands.h for uCode API definitions. 65 * Use iwl-commands.h for uCode API definitions.
66 */ 66 */
67 67
68#ifndef __iwl_6000_hw_h__ 68#ifndef __iwl_6000_hw_h__
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index cee06b968de8..9f43f2770c96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -52,7 +52,7 @@
52/* Highest firmware API version supported */ 52/* Highest firmware API version supported */
53#define IWL6000_UCODE_API_MAX 4 53#define IWL6000_UCODE_API_MAX 4
54#define IWL6050_UCODE_API_MAX 4 54#define IWL6050_UCODE_API_MAX 4
55#define IWL6000G2_UCODE_API_MAX 4 55#define IWL6000G2_UCODE_API_MAX 5
56 56
57/* Lowest firmware API version supported */ 57/* Lowest firmware API version supported */
58#define IWL6000_UCODE_API_MIN 4 58#define IWL6000_UCODE_API_MIN 4
@@ -161,7 +161,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
161 sizeof(struct iwlagn_scd_bc_tbl); 161 sizeof(struct iwlagn_scd_bc_tbl);
162 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 162 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
163 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 163 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
164 priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID; 164 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
165 165
166 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; 166 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
167 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; 167 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
@@ -198,14 +198,19 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
198static int iwl6000_hw_channel_switch(struct iwl_priv *priv, 198static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
199 struct ieee80211_channel_switch *ch_switch) 199 struct ieee80211_channel_switch *ch_switch)
200{ 200{
201 /*
202 * MULTI-FIXME
203 * See iwl_mac_channel_switch.
204 */
205 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
201 struct iwl6000_channel_switch_cmd cmd; 206 struct iwl6000_channel_switch_cmd cmd;
202 const struct iwl_channel_info *ch_info; 207 const struct iwl_channel_info *ch_info;
203 u32 switch_time_in_usec, ucode_switch_time; 208 u32 switch_time_in_usec, ucode_switch_time;
204 u16 ch; 209 u16 ch;
205 u32 tsf_low; 210 u32 tsf_low;
206 u8 switch_count; 211 u8 switch_count;
207 u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval); 212 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
208 struct ieee80211_vif *vif = priv->vif; 213 struct ieee80211_vif *vif = ctx->vif;
209 struct iwl_host_cmd hcmd = { 214 struct iwl_host_cmd hcmd = {
210 .id = REPLY_CHANNEL_SWITCH, 215 .id = REPLY_CHANNEL_SWITCH,
211 .len = sizeof(cmd), 216 .len = sizeof(cmd),
@@ -214,12 +219,12 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
214 }; 219 };
215 220
216 cmd.band = priv->band == IEEE80211_BAND_2GHZ; 221 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
217 ch = ieee80211_frequency_to_channel(ch_switch->channel->center_freq); 222 ch = ch_switch->channel->hw_value;
218 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", 223 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
219 priv->active_rxon.channel, ch); 224 ctx->active.channel, ch);
220 cmd.channel = cpu_to_le16(ch); 225 cmd.channel = cpu_to_le16(ch);
221 cmd.rxon_flags = priv->staging_rxon.flags; 226 cmd.rxon_flags = ctx->staging.flags;
222 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags; 227 cmd.rxon_filter_flags = ctx->staging.filter_flags;
223 switch_count = ch_switch->count; 228 switch_count = ch_switch->count;
224 tsf_low = ch_switch->timestamp & 0x0ffffffff; 229 tsf_low = ch_switch->timestamp & 0x0ffffffff;
225 /* 230 /*
@@ -254,7 +259,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
254 cmd.expect_beacon = is_channel_radar(ch_info); 259 cmd.expect_beacon = is_channel_radar(ch_info);
255 else { 260 else {
256 IWL_ERR(priv, "invalid channel switch from %u to %u\n", 261 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
257 priv->active_rxon.channel, ch); 262 ctx->active.channel, ch);
258 return -EFAULT; 263 return -EFAULT;
259 } 264 }
260 priv->switch_rxon.channel = cmd.channel; 265 priv->switch_rxon.channel = cmd.channel;
@@ -318,18 +323,100 @@ static struct iwl_lib_ops iwl6000_lib = {
318 .set_calib_version = iwl6000_set_calib_version, 323 .set_calib_version = iwl6000_set_calib_version,
319 }, 324 },
320 .manage_ibss_station = iwlagn_manage_ibss_station, 325 .manage_ibss_station = iwlagn_manage_ibss_station,
321 .update_bcast_station = iwl_update_bcast_station, 326 .update_bcast_stations = iwl_update_bcast_stations,
322 .debugfs_ops = { 327 .debugfs_ops = {
323 .rx_stats_read = iwl_ucode_rx_stats_read, 328 .rx_stats_read = iwl_ucode_rx_stats_read,
324 .tx_stats_read = iwl_ucode_tx_stats_read, 329 .tx_stats_read = iwl_ucode_tx_stats_read,
325 .general_stats_read = iwl_ucode_general_stats_read, 330 .general_stats_read = iwl_ucode_general_stats_read,
326 .bt_stats_read = iwl_ucode_bt_stats_read, 331 .bt_stats_read = iwl_ucode_bt_stats_read,
332 .reply_tx_error = iwl_reply_tx_error_read,
327 }, 333 },
328 .recover_from_tx_stall = iwl_bg_monitor_recover, 334 .recover_from_tx_stall = iwl_bg_monitor_recover,
329 .check_plcp_health = iwl_good_plcp_health, 335 .check_plcp_health = iwl_good_plcp_health,
330 .check_ack_health = iwl_good_ack_health, 336 .check_ack_health = iwl_good_ack_health,
331 .txfifo_flush = iwlagn_txfifo_flush, 337 .txfifo_flush = iwlagn_txfifo_flush,
332 .dev_txfifo_flush = iwlagn_dev_txfifo_flush, 338 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
339 .tt_ops = {
340 .lower_power_detection = iwl_tt_is_low_power_state,
341 .tt_power_mode = iwl_tt_current_power_mode,
342 .ct_kill_check = iwl_check_for_ct_kill,
343 }
344};
345
346static struct iwl_lib_ops iwl6000g2b_lib = {
347 .set_hw_params = iwl6000_hw_set_hw_params,
348 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
349 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
350 .txq_set_sched = iwlagn_txq_set_sched,
351 .txq_agg_enable = iwlagn_txq_agg_enable,
352 .txq_agg_disable = iwlagn_txq_agg_disable,
353 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
354 .txq_free_tfd = iwl_hw_txq_free_tfd,
355 .txq_init = iwl_hw_tx_queue_init,
356 .rx_handler_setup = iwlagn_bt_rx_handler_setup,
357 .setup_deferred_work = iwlagn_bt_setup_deferred_work,
358 .cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
359 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
360 .load_ucode = iwlagn_load_ucode,
361 .dump_nic_event_log = iwl_dump_nic_event_log,
362 .dump_nic_error_log = iwl_dump_nic_error_log,
363 .dump_csr = iwl_dump_csr,
364 .dump_fh = iwl_dump_fh,
365 .init_alive_start = iwlagn_init_alive_start,
366 .alive_notify = iwlagn_alive_notify,
367 .send_tx_power = iwlagn_send_tx_power,
368 .update_chain_flags = iwl_update_chain_flags,
369 .set_channel_switch = iwl6000_hw_channel_switch,
370 .apm_ops = {
371 .init = iwl_apm_init,
372 .stop = iwl_apm_stop,
373 .config = iwl6000_nic_config,
374 .set_pwr_src = iwl_set_pwr_src,
375 },
376 .eeprom_ops = {
377 .regulatory_bands = {
378 EEPROM_REG_BAND_1_CHANNELS,
379 EEPROM_REG_BAND_2_CHANNELS,
380 EEPROM_REG_BAND_3_CHANNELS,
381 EEPROM_REG_BAND_4_CHANNELS,
382 EEPROM_REG_BAND_5_CHANNELS,
383 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
384 EEPROM_REG_BAND_52_HT40_CHANNELS
385 },
386 .verify_signature = iwlcore_eeprom_verify_signature,
387 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
388 .release_semaphore = iwlcore_eeprom_release_semaphore,
389 .calib_version = iwlagn_eeprom_calib_version,
390 .query_addr = iwlagn_eeprom_query_addr,
391 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
392 },
393 .post_associate = iwl_post_associate,
394 .isr = iwl_isr_ict,
395 .config_ap = iwl_config_ap,
396 .temp_ops = {
397 .temperature = iwlagn_temperature,
398 .set_ct_kill = iwl6000_set_ct_threshold,
399 .set_calib_version = iwl6000_set_calib_version,
400 },
401 .manage_ibss_station = iwlagn_manage_ibss_station,
402 .update_bcast_stations = iwl_update_bcast_stations,
403 .debugfs_ops = {
404 .rx_stats_read = iwl_ucode_rx_stats_read,
405 .tx_stats_read = iwl_ucode_tx_stats_read,
406 .general_stats_read = iwl_ucode_general_stats_read,
407 .bt_stats_read = iwl_ucode_bt_stats_read,
408 .reply_tx_error = iwl_reply_tx_error_read,
409 },
410 .recover_from_tx_stall = iwl_bg_monitor_recover,
411 .check_plcp_health = iwl_good_plcp_health,
412 .check_ack_health = iwl_good_ack_health,
413 .txfifo_flush = iwlagn_txfifo_flush,
414 .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
415 .tt_ops = {
416 .lower_power_detection = iwl_tt_is_low_power_state,
417 .tt_power_mode = iwl_tt_current_power_mode,
418 .ct_kill_check = iwl_check_for_ct_kill,
419 }
333}; 420};
334 421
335static const struct iwl_ops iwl6000_ops = { 422static const struct iwl_ops iwl6000_ops = {
@@ -339,21 +426,9 @@ static const struct iwl_ops iwl6000_ops = {
339 .led = &iwlagn_led_ops, 426 .led = &iwlagn_led_ops,
340}; 427};
341 428
342static void do_not_send_bt_config(struct iwl_priv *priv)
343{
344}
345
346static struct iwl_hcmd_ops iwl6000g2b_hcmd = {
347 .rxon_assoc = iwlagn_send_rxon_assoc,
348 .commit_rxon = iwl_commit_rxon,
349 .set_rxon_chain = iwl_set_rxon_chain,
350 .set_tx_ant = iwlagn_send_tx_ant_config,
351 .send_bt_config = do_not_send_bt_config,
352};
353
354static const struct iwl_ops iwl6000g2b_ops = { 429static const struct iwl_ops iwl6000g2b_ops = {
355 .lib = &iwl6000_lib, 430 .lib = &iwl6000g2b_lib,
356 .hcmd = &iwl6000g2b_hcmd, 431 .hcmd = &iwlagn_bt_hcmd,
357 .utils = &iwlagn_hcmd_utils, 432 .utils = &iwlagn_hcmd_utils,
358 .led = &iwlagn_led_ops, 433 .led = &iwlagn_led_ops,
359}; 434};
@@ -494,7 +569,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
494 .supports_idle = true, 569 .supports_idle = true,
495 .adv_thermal_throttle = true, 570 .adv_thermal_throttle = true,
496 .support_ct_kill_exit = true, 571 .support_ct_kill_exit = true,
497 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 572 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
498 .chain_noise_scale = 1000, 573 .chain_noise_scale = 1000,
499 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 574 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
500 .max_event_log_size = 512, 575 .max_event_log_size = 512,
@@ -502,6 +577,11 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
502 .chain_noise_calib_by_driver = true, 577 .chain_noise_calib_by_driver = true,
503 .need_dc_calib = true, 578 .need_dc_calib = true,
504 .bt_statistics = true, 579 .bt_statistics = true,
580 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
581 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
582 .advanced_bt_coexist = true,
583 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
584 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
505}; 585};
506 586
507struct iwl_cfg iwl6000g2b_2abg_cfg = { 587struct iwl_cfg iwl6000g2b_2abg_cfg = {
@@ -530,7 +610,7 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
530 .supports_idle = true, 610 .supports_idle = true,
531 .adv_thermal_throttle = true, 611 .adv_thermal_throttle = true,
532 .support_ct_kill_exit = true, 612 .support_ct_kill_exit = true,
533 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 613 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
534 .chain_noise_scale = 1000, 614 .chain_noise_scale = 1000,
535 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 615 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
536 .max_event_log_size = 512, 616 .max_event_log_size = 512,
@@ -538,6 +618,11 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
538 .chain_noise_calib_by_driver = true, 618 .chain_noise_calib_by_driver = true,
539 .need_dc_calib = true, 619 .need_dc_calib = true,
540 .bt_statistics = true, 620 .bt_statistics = true,
621 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
622 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
623 .advanced_bt_coexist = true,
624 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
625 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
541}; 626};
542 627
543struct iwl_cfg iwl6000g2b_2bgn_cfg = { 628struct iwl_cfg iwl6000g2b_2bgn_cfg = {
@@ -568,7 +653,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
568 .supports_idle = true, 653 .supports_idle = true,
569 .adv_thermal_throttle = true, 654 .adv_thermal_throttle = true,
570 .support_ct_kill_exit = true, 655 .support_ct_kill_exit = true,
571 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 656 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
572 .chain_noise_scale = 1000, 657 .chain_noise_scale = 1000,
573 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 658 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
574 .max_event_log_size = 512, 659 .max_event_log_size = 512,
@@ -576,6 +661,11 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
576 .chain_noise_calib_by_driver = true, 661 .chain_noise_calib_by_driver = true,
577 .need_dc_calib = true, 662 .need_dc_calib = true,
578 .bt_statistics = true, 663 .bt_statistics = true,
664 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
665 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
666 .advanced_bt_coexist = true,
667 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
668 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
579}; 669};
580 670
581struct iwl_cfg iwl6000g2b_2bg_cfg = { 671struct iwl_cfg iwl6000g2b_2bg_cfg = {
@@ -604,7 +694,7 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
604 .supports_idle = true, 694 .supports_idle = true,
605 .adv_thermal_throttle = true, 695 .adv_thermal_throttle = true,
606 .support_ct_kill_exit = true, 696 .support_ct_kill_exit = true,
607 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 697 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
608 .chain_noise_scale = 1000, 698 .chain_noise_scale = 1000,
609 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 699 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
610 .max_event_log_size = 512, 700 .max_event_log_size = 512,
@@ -612,6 +702,11 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
612 .chain_noise_calib_by_driver = true, 702 .chain_noise_calib_by_driver = true,
613 .need_dc_calib = true, 703 .need_dc_calib = true,
614 .bt_statistics = true, 704 .bt_statistics = true,
705 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
706 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
707 .advanced_bt_coexist = true,
708 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
709 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
615}; 710};
616 711
617struct iwl_cfg iwl6000g2b_bgn_cfg = { 712struct iwl_cfg iwl6000g2b_bgn_cfg = {
@@ -642,7 +737,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
642 .supports_idle = true, 737 .supports_idle = true,
643 .adv_thermal_throttle = true, 738 .adv_thermal_throttle = true,
644 .support_ct_kill_exit = true, 739 .support_ct_kill_exit = true,
645 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 740 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
646 .chain_noise_scale = 1000, 741 .chain_noise_scale = 1000,
647 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 742 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
648 .max_event_log_size = 512, 743 .max_event_log_size = 512,
@@ -650,6 +745,11 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
650 .chain_noise_calib_by_driver = true, 745 .chain_noise_calib_by_driver = true,
651 .need_dc_calib = true, 746 .need_dc_calib = true,
652 .bt_statistics = true, 747 .bt_statistics = true,
748 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
749 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
750 .advanced_bt_coexist = true,
751 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
752 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
653}; 753};
654 754
655struct iwl_cfg iwl6000g2b_bg_cfg = { 755struct iwl_cfg iwl6000g2b_bg_cfg = {
@@ -678,7 +778,7 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
678 .supports_idle = true, 778 .supports_idle = true,
679 .adv_thermal_throttle = true, 779 .adv_thermal_throttle = true,
680 .support_ct_kill_exit = true, 780 .support_ct_kill_exit = true,
681 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 781 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
682 .chain_noise_scale = 1000, 782 .chain_noise_scale = 1000,
683 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 783 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
684 .max_event_log_size = 512, 784 .max_event_log_size = 512,
@@ -686,6 +786,11 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
686 .chain_noise_calib_by_driver = true, 786 .chain_noise_calib_by_driver = true,
687 .need_dc_calib = true, 787 .need_dc_calib = true,
688 .bt_statistics = true, 788 .bt_statistics = true,
789 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
790 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
791 .advanced_bt_coexist = true,
792 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
793 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
689}; 794};
690 795
691/* 796/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index c4c5691032a6..84ad62958535 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -625,7 +625,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
625 625
626 data = &(priv->sensitivity_data); 626 data = &(priv->sensitivity_data);
627 627
628 if (!iwl_is_associated(priv)) { 628 if (!iwl_is_any_associated(priv)) {
629 IWL_DEBUG_CALIB(priv, "<< - not associated\n"); 629 IWL_DEBUG_CALIB(priv, "<< - not associated\n");
630 return; 630 return;
631 } 631 }
@@ -763,6 +763,12 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
763 unsigned long flags; 763 unsigned long flags;
764 struct statistics_rx_non_phy *rx_info; 764 struct statistics_rx_non_phy *rx_info;
765 u8 first_chain; 765 u8 first_chain;
766 /*
767 * MULTI-FIXME:
768 * When we support multiple interfaces on different channels,
769 * this must be modified/fixed.
770 */
771 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
766 772
767 if (priv->disable_chain_noise_cal) 773 if (priv->disable_chain_noise_cal)
768 return; 774 return;
@@ -793,8 +799,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
793 return; 799 return;
794 } 800 }
795 801
796 rxon_band24 = !!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK); 802 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
797 rxon_chnum = le16_to_cpu(priv->staging_rxon.channel); 803 rxon_chnum = le16_to_cpu(ctx->staging.channel);
798 if (priv->cfg->bt_statistics) { 804 if (priv->cfg->bt_statistics) {
799 stat_band24 = !!(((struct iwl_bt_notif_statistics *) 805 stat_band24 = !!(((struct iwl_bt_notif_statistics *)
800 stat_resp)->flag & 806 stat_resp)->flag &
@@ -914,7 +920,11 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
914 * To be safe, simply mask out any chains that we know 920 * To be safe, simply mask out any chains that we know
915 * are not on the device. 921 * are not on the device.
916 */ 922 */
917 active_chains &= priv->hw_params.valid_rx_ant; 923 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
924 /* operated as 1x1 in full concurrency mode */
925 active_chains &= first_antenna(priv->hw_params.valid_rx_ant);
926 } else
927 active_chains &= priv->hw_params.valid_rx_ant;
918 928
919 num_tx_chains = 0; 929 num_tx_chains = 0;
920 for (i = 0; i < NUM_RX_CHAINS; i++) { 930 for (i = 0; i < NUM_RX_CHAINS; i++) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index d706b8afbe5a..5391b4627397 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -25,9 +25,15 @@
25* Intel Linux Wireless <ilw@linux.intel.com> 25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/ 27*****************************************************************************/
28 28#include "iwl-agn.h"
29#include "iwl-agn-debugfs.h" 29#include "iwl-agn-debugfs.h"
30 30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_hex = " %-30s 0x%02X\n";
33static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
34static const char *fmt_header =
35 "%-32s current cumulative delta max\n";
36
31static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) 37static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
32{ 38{
33 int p = 0; 39 int p = 0;
@@ -121,436 +127,380 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
121 } 127 }
122 128
123 pos += iwl_statistics_flag(priv, buf, bufsz); 129 pos += iwl_statistics_flag(priv, buf, bufsz);
124 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
125 "acumulative delta max\n",
126 "Statistics_Rx - OFDM:");
127 pos += scnprintf(buf + pos, bufsz - pos, 130 pos += scnprintf(buf + pos, bufsz - pos,
128 " %-30s %10u %10u %10u %10u\n", 131 fmt_header, "Statistics_Rx - OFDM:");
129 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt), 132 pos += scnprintf(buf + pos, bufsz - pos,
133 fmt_table, "ina_cnt:",
134 le32_to_cpu(ofdm->ina_cnt),
130 accum_ofdm->ina_cnt, 135 accum_ofdm->ina_cnt,
131 delta_ofdm->ina_cnt, max_ofdm->ina_cnt); 136 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
132 pos += scnprintf(buf + pos, bufsz - pos, 137 pos += scnprintf(buf + pos, bufsz - pos,
133 " %-30s %10u %10u %10u %10u\n", 138 fmt_table, "fina_cnt:",
134 "fina_cnt:",
135 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, 139 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
136 delta_ofdm->fina_cnt, max_ofdm->fina_cnt); 140 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
137 pos += scnprintf(buf + pos, bufsz - pos, 141 pos += scnprintf(buf + pos, bufsz - pos,
138 " %-30s %10u %10u %10u %10u\n", 142 fmt_table, "plcp_err:",
139 "plcp_err:",
140 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, 143 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
141 delta_ofdm->plcp_err, max_ofdm->plcp_err); 144 delta_ofdm->plcp_err, max_ofdm->plcp_err);
142 pos += scnprintf(buf + pos, bufsz - pos, 145 pos += scnprintf(buf + pos, bufsz - pos,
143 " %-30s %10u %10u %10u %10u\n", "crc32_err:", 146 fmt_table, "crc32_err:",
144 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, 147 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
145 delta_ofdm->crc32_err, max_ofdm->crc32_err); 148 delta_ofdm->crc32_err, max_ofdm->crc32_err);
146 pos += scnprintf(buf + pos, bufsz - pos, 149 pos += scnprintf(buf + pos, bufsz - pos,
147 " %-30s %10u %10u %10u %10u\n", "overrun_err:", 150 fmt_table, "overrun_err:",
148 le32_to_cpu(ofdm->overrun_err), 151 le32_to_cpu(ofdm->overrun_err),
149 accum_ofdm->overrun_err, delta_ofdm->overrun_err, 152 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
150 max_ofdm->overrun_err); 153 max_ofdm->overrun_err);
151 pos += scnprintf(buf + pos, bufsz - pos, 154 pos += scnprintf(buf + pos, bufsz - pos,
152 " %-30s %10u %10u %10u %10u\n", 155 fmt_table, "early_overrun_err:",
153 "early_overrun_err:",
154 le32_to_cpu(ofdm->early_overrun_err), 156 le32_to_cpu(ofdm->early_overrun_err),
155 accum_ofdm->early_overrun_err, 157 accum_ofdm->early_overrun_err,
156 delta_ofdm->early_overrun_err, 158 delta_ofdm->early_overrun_err,
157 max_ofdm->early_overrun_err); 159 max_ofdm->early_overrun_err);
158 pos += scnprintf(buf + pos, bufsz - pos, 160 pos += scnprintf(buf + pos, bufsz - pos,
159 " %-30s %10u %10u %10u %10u\n", 161 fmt_table, "crc32_good:",
160 "crc32_good:", le32_to_cpu(ofdm->crc32_good), 162 le32_to_cpu(ofdm->crc32_good),
161 accum_ofdm->crc32_good, delta_ofdm->crc32_good, 163 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
162 max_ofdm->crc32_good); 164 max_ofdm->crc32_good);
163 pos += scnprintf(buf + pos, bufsz - pos, 165 pos += scnprintf(buf + pos, bufsz - pos,
164 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", 166 fmt_table, "false_alarm_cnt:",
165 le32_to_cpu(ofdm->false_alarm_cnt), 167 le32_to_cpu(ofdm->false_alarm_cnt),
166 accum_ofdm->false_alarm_cnt, 168 accum_ofdm->false_alarm_cnt,
167 delta_ofdm->false_alarm_cnt, 169 delta_ofdm->false_alarm_cnt,
168 max_ofdm->false_alarm_cnt); 170 max_ofdm->false_alarm_cnt);
169 pos += scnprintf(buf + pos, bufsz - pos, 171 pos += scnprintf(buf + pos, bufsz - pos,
170 " %-30s %10u %10u %10u %10u\n", 172 fmt_table, "fina_sync_err_cnt:",
171 "fina_sync_err_cnt:",
172 le32_to_cpu(ofdm->fina_sync_err_cnt), 173 le32_to_cpu(ofdm->fina_sync_err_cnt),
173 accum_ofdm->fina_sync_err_cnt, 174 accum_ofdm->fina_sync_err_cnt,
174 delta_ofdm->fina_sync_err_cnt, 175 delta_ofdm->fina_sync_err_cnt,
175 max_ofdm->fina_sync_err_cnt); 176 max_ofdm->fina_sync_err_cnt);
176 pos += scnprintf(buf + pos, bufsz - pos, 177 pos += scnprintf(buf + pos, bufsz - pos,
177 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:", 178 fmt_table, "sfd_timeout:",
178 le32_to_cpu(ofdm->sfd_timeout), 179 le32_to_cpu(ofdm->sfd_timeout),
179 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, 180 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
180 max_ofdm->sfd_timeout); 181 max_ofdm->sfd_timeout);
181 pos += scnprintf(buf + pos, bufsz - pos, 182 pos += scnprintf(buf + pos, bufsz - pos,
182 " %-30s %10u %10u %10u %10u\n", "fina_timeout:", 183 fmt_table, "fina_timeout:",
183 le32_to_cpu(ofdm->fina_timeout), 184 le32_to_cpu(ofdm->fina_timeout),
184 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, 185 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
185 max_ofdm->fina_timeout); 186 max_ofdm->fina_timeout);
186 pos += scnprintf(buf + pos, bufsz - pos, 187 pos += scnprintf(buf + pos, bufsz - pos,
187 " %-30s %10u %10u %10u %10u\n", 188 fmt_table, "unresponded_rts:",
188 "unresponded_rts:",
189 le32_to_cpu(ofdm->unresponded_rts), 189 le32_to_cpu(ofdm->unresponded_rts),
190 accum_ofdm->unresponded_rts, 190 accum_ofdm->unresponded_rts,
191 delta_ofdm->unresponded_rts, 191 delta_ofdm->unresponded_rts,
192 max_ofdm->unresponded_rts); 192 max_ofdm->unresponded_rts);
193 pos += scnprintf(buf + pos, bufsz - pos, 193 pos += scnprintf(buf + pos, bufsz - pos,
194 " %-30s %10u %10u %10u %10u\n", 194 fmt_table, "rxe_frame_lmt_ovrun:",
195 "rxe_frame_lmt_ovrun:",
196 le32_to_cpu(ofdm->rxe_frame_limit_overrun), 195 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
197 accum_ofdm->rxe_frame_limit_overrun, 196 accum_ofdm->rxe_frame_limit_overrun,
198 delta_ofdm->rxe_frame_limit_overrun, 197 delta_ofdm->rxe_frame_limit_overrun,
199 max_ofdm->rxe_frame_limit_overrun); 198 max_ofdm->rxe_frame_limit_overrun);
200 pos += scnprintf(buf + pos, bufsz - pos, 199 pos += scnprintf(buf + pos, bufsz - pos,
201 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", 200 fmt_table, "sent_ack_cnt:",
202 le32_to_cpu(ofdm->sent_ack_cnt), 201 le32_to_cpu(ofdm->sent_ack_cnt),
203 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, 202 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
204 max_ofdm->sent_ack_cnt); 203 max_ofdm->sent_ack_cnt);
205 pos += scnprintf(buf + pos, bufsz - pos, 204 pos += scnprintf(buf + pos, bufsz - pos,
206 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", 205 fmt_table, "sent_cts_cnt:",
207 le32_to_cpu(ofdm->sent_cts_cnt), 206 le32_to_cpu(ofdm->sent_cts_cnt),
208 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, 207 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
209 max_ofdm->sent_cts_cnt); 208 max_ofdm->sent_cts_cnt);
210 pos += scnprintf(buf + pos, bufsz - pos, 209 pos += scnprintf(buf + pos, bufsz - pos,
211 " %-30s %10u %10u %10u %10u\n", 210 fmt_table, "sent_ba_rsp_cnt:",
212 "sent_ba_rsp_cnt:",
213 le32_to_cpu(ofdm->sent_ba_rsp_cnt), 211 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
214 accum_ofdm->sent_ba_rsp_cnt, 212 accum_ofdm->sent_ba_rsp_cnt,
215 delta_ofdm->sent_ba_rsp_cnt, 213 delta_ofdm->sent_ba_rsp_cnt,
216 max_ofdm->sent_ba_rsp_cnt); 214 max_ofdm->sent_ba_rsp_cnt);
217 pos += scnprintf(buf + pos, bufsz - pos, 215 pos += scnprintf(buf + pos, bufsz - pos,
218 " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:", 216 fmt_table, "dsp_self_kill:",
219 le32_to_cpu(ofdm->dsp_self_kill), 217 le32_to_cpu(ofdm->dsp_self_kill),
220 accum_ofdm->dsp_self_kill, 218 accum_ofdm->dsp_self_kill,
221 delta_ofdm->dsp_self_kill, 219 delta_ofdm->dsp_self_kill,
222 max_ofdm->dsp_self_kill); 220 max_ofdm->dsp_self_kill);
223 pos += scnprintf(buf + pos, bufsz - pos, 221 pos += scnprintf(buf + pos, bufsz - pos,
224 " %-30s %10u %10u %10u %10u\n", 222 fmt_table, "mh_format_err:",
225 "mh_format_err:",
226 le32_to_cpu(ofdm->mh_format_err), 223 le32_to_cpu(ofdm->mh_format_err),
227 accum_ofdm->mh_format_err, 224 accum_ofdm->mh_format_err,
228 delta_ofdm->mh_format_err, 225 delta_ofdm->mh_format_err,
229 max_ofdm->mh_format_err); 226 max_ofdm->mh_format_err);
230 pos += scnprintf(buf + pos, bufsz - pos, 227 pos += scnprintf(buf + pos, bufsz - pos,
231 " %-30s %10u %10u %10u %10u\n", 228 fmt_table, "re_acq_main_rssi_sum:",
232 "re_acq_main_rssi_sum:",
233 le32_to_cpu(ofdm->re_acq_main_rssi_sum), 229 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
234 accum_ofdm->re_acq_main_rssi_sum, 230 accum_ofdm->re_acq_main_rssi_sum,
235 delta_ofdm->re_acq_main_rssi_sum, 231 delta_ofdm->re_acq_main_rssi_sum,
236 max_ofdm->re_acq_main_rssi_sum); 232 max_ofdm->re_acq_main_rssi_sum);
237 233
238 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
239 "acumulative delta max\n",
240 "Statistics_Rx - CCK:");
241 pos += scnprintf(buf + pos, bufsz - pos, 234 pos += scnprintf(buf + pos, bufsz - pos,
242 " %-30s %10u %10u %10u %10u\n", 235 fmt_header, "Statistics_Rx - CCK:");
243 "ina_cnt:", 236 pos += scnprintf(buf + pos, bufsz - pos,
237 fmt_table, "ina_cnt:",
244 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, 238 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
245 delta_cck->ina_cnt, max_cck->ina_cnt); 239 delta_cck->ina_cnt, max_cck->ina_cnt);
246 pos += scnprintf(buf + pos, bufsz - pos, 240 pos += scnprintf(buf + pos, bufsz - pos,
247 " %-30s %10u %10u %10u %10u\n", 241 fmt_table, "fina_cnt:",
248 "fina_cnt:",
249 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, 242 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
250 delta_cck->fina_cnt, max_cck->fina_cnt); 243 delta_cck->fina_cnt, max_cck->fina_cnt);
251 pos += scnprintf(buf + pos, bufsz - pos, 244 pos += scnprintf(buf + pos, bufsz - pos,
252 " %-30s %10u %10u %10u %10u\n", 245 fmt_table, "plcp_err:",
253 "plcp_err:",
254 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, 246 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
255 delta_cck->plcp_err, max_cck->plcp_err); 247 delta_cck->plcp_err, max_cck->plcp_err);
256 pos += scnprintf(buf + pos, bufsz - pos, 248 pos += scnprintf(buf + pos, bufsz - pos,
257 " %-30s %10u %10u %10u %10u\n", 249 fmt_table, "crc32_err:",
258 "crc32_err:",
259 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, 250 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
260 delta_cck->crc32_err, max_cck->crc32_err); 251 delta_cck->crc32_err, max_cck->crc32_err);
261 pos += scnprintf(buf + pos, bufsz - pos, 252 pos += scnprintf(buf + pos, bufsz - pos,
262 " %-30s %10u %10u %10u %10u\n", 253 fmt_table, "overrun_err:",
263 "overrun_err:",
264 le32_to_cpu(cck->overrun_err), 254 le32_to_cpu(cck->overrun_err),
265 accum_cck->overrun_err, delta_cck->overrun_err, 255 accum_cck->overrun_err, delta_cck->overrun_err,
266 max_cck->overrun_err); 256 max_cck->overrun_err);
267 pos += scnprintf(buf + pos, bufsz - pos, 257 pos += scnprintf(buf + pos, bufsz - pos,
268 " %-30s %10u %10u %10u %10u\n", 258 fmt_table, "early_overrun_err:",
269 "early_overrun_err:",
270 le32_to_cpu(cck->early_overrun_err), 259 le32_to_cpu(cck->early_overrun_err),
271 accum_cck->early_overrun_err, 260 accum_cck->early_overrun_err,
272 delta_cck->early_overrun_err, 261 delta_cck->early_overrun_err,
273 max_cck->early_overrun_err); 262 max_cck->early_overrun_err);
274 pos += scnprintf(buf + pos, bufsz - pos, 263 pos += scnprintf(buf + pos, bufsz - pos,
275 " %-30s %10u %10u %10u %10u\n", 264 fmt_table, "crc32_good:",
276 "crc32_good:",
277 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, 265 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
278 delta_cck->crc32_good, max_cck->crc32_good); 266 delta_cck->crc32_good, max_cck->crc32_good);
279 pos += scnprintf(buf + pos, bufsz - pos, 267 pos += scnprintf(buf + pos, bufsz - pos,
280 " %-30s %10u %10u %10u %10u\n", 268 fmt_table, "false_alarm_cnt:",
281 "false_alarm_cnt:",
282 le32_to_cpu(cck->false_alarm_cnt), 269 le32_to_cpu(cck->false_alarm_cnt),
283 accum_cck->false_alarm_cnt, 270 accum_cck->false_alarm_cnt,
284 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); 271 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
285 pos += scnprintf(buf + pos, bufsz - pos, 272 pos += scnprintf(buf + pos, bufsz - pos,
286 " %-30s %10u %10u %10u %10u\n", 273 fmt_table, "fina_sync_err_cnt:",
287 "fina_sync_err_cnt:",
288 le32_to_cpu(cck->fina_sync_err_cnt), 274 le32_to_cpu(cck->fina_sync_err_cnt),
289 accum_cck->fina_sync_err_cnt, 275 accum_cck->fina_sync_err_cnt,
290 delta_cck->fina_sync_err_cnt, 276 delta_cck->fina_sync_err_cnt,
291 max_cck->fina_sync_err_cnt); 277 max_cck->fina_sync_err_cnt);
292 pos += scnprintf(buf + pos, bufsz - pos, 278 pos += scnprintf(buf + pos, bufsz - pos,
293 " %-30s %10u %10u %10u %10u\n", 279 fmt_table, "sfd_timeout:",
294 "sfd_timeout:",
295 le32_to_cpu(cck->sfd_timeout), 280 le32_to_cpu(cck->sfd_timeout),
296 accum_cck->sfd_timeout, delta_cck->sfd_timeout, 281 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
297 max_cck->sfd_timeout); 282 max_cck->sfd_timeout);
298 pos += scnprintf(buf + pos, bufsz - pos, 283 pos += scnprintf(buf + pos, bufsz - pos,
299 " %-30s %10u %10u %10u %10u\n", "fina_timeout:", 284 fmt_table, "fina_timeout:",
300 le32_to_cpu(cck->fina_timeout), 285 le32_to_cpu(cck->fina_timeout),
301 accum_cck->fina_timeout, delta_cck->fina_timeout, 286 accum_cck->fina_timeout, delta_cck->fina_timeout,
302 max_cck->fina_timeout); 287 max_cck->fina_timeout);
303 pos += scnprintf(buf + pos, bufsz - pos, 288 pos += scnprintf(buf + pos, bufsz - pos,
304 " %-30s %10u %10u %10u %10u\n", 289 fmt_table, "unresponded_rts:",
305 "unresponded_rts:",
306 le32_to_cpu(cck->unresponded_rts), 290 le32_to_cpu(cck->unresponded_rts),
307 accum_cck->unresponded_rts, delta_cck->unresponded_rts, 291 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
308 max_cck->unresponded_rts); 292 max_cck->unresponded_rts);
309 pos += scnprintf(buf + pos, bufsz - pos, 293 pos += scnprintf(buf + pos, bufsz - pos,
310 " %-30s %10u %10u %10u %10u\n", 294 fmt_table, "rxe_frame_lmt_ovrun:",
311 "rxe_frame_lmt_ovrun:",
312 le32_to_cpu(cck->rxe_frame_limit_overrun), 295 le32_to_cpu(cck->rxe_frame_limit_overrun),
313 accum_cck->rxe_frame_limit_overrun, 296 accum_cck->rxe_frame_limit_overrun,
314 delta_cck->rxe_frame_limit_overrun, 297 delta_cck->rxe_frame_limit_overrun,
315 max_cck->rxe_frame_limit_overrun); 298 max_cck->rxe_frame_limit_overrun);
316 pos += scnprintf(buf + pos, bufsz - pos, 299 pos += scnprintf(buf + pos, bufsz - pos,
317 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", 300 fmt_table, "sent_ack_cnt:",
318 le32_to_cpu(cck->sent_ack_cnt), 301 le32_to_cpu(cck->sent_ack_cnt),
319 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, 302 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
320 max_cck->sent_ack_cnt); 303 max_cck->sent_ack_cnt);
321 pos += scnprintf(buf + pos, bufsz - pos, 304 pos += scnprintf(buf + pos, bufsz - pos,
322 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", 305 fmt_table, "sent_cts_cnt:",
323 le32_to_cpu(cck->sent_cts_cnt), 306 le32_to_cpu(cck->sent_cts_cnt),
324 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, 307 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
325 max_cck->sent_cts_cnt); 308 max_cck->sent_cts_cnt);
326 pos += scnprintf(buf + pos, bufsz - pos, 309 pos += scnprintf(buf + pos, bufsz - pos,
327 " %-30s %10u %10u %10u %10u\n", "sent_ba_rsp_cnt:", 310 fmt_table, "sent_ba_rsp_cnt:",
328 le32_to_cpu(cck->sent_ba_rsp_cnt), 311 le32_to_cpu(cck->sent_ba_rsp_cnt),
329 accum_cck->sent_ba_rsp_cnt, 312 accum_cck->sent_ba_rsp_cnt,
330 delta_cck->sent_ba_rsp_cnt, 313 delta_cck->sent_ba_rsp_cnt,
331 max_cck->sent_ba_rsp_cnt); 314 max_cck->sent_ba_rsp_cnt);
332 pos += scnprintf(buf + pos, bufsz - pos, 315 pos += scnprintf(buf + pos, bufsz - pos,
333 " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:", 316 fmt_table, "dsp_self_kill:",
334 le32_to_cpu(cck->dsp_self_kill), 317 le32_to_cpu(cck->dsp_self_kill),
335 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill, 318 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
336 max_cck->dsp_self_kill); 319 max_cck->dsp_self_kill);
337 pos += scnprintf(buf + pos, bufsz - pos, 320 pos += scnprintf(buf + pos, bufsz - pos,
338 " %-30s %10u %10u %10u %10u\n", "mh_format_err:", 321 fmt_table, "mh_format_err:",
339 le32_to_cpu(cck->mh_format_err), 322 le32_to_cpu(cck->mh_format_err),
340 accum_cck->mh_format_err, delta_cck->mh_format_err, 323 accum_cck->mh_format_err, delta_cck->mh_format_err,
341 max_cck->mh_format_err); 324 max_cck->mh_format_err);
342 pos += scnprintf(buf + pos, bufsz - pos, 325 pos += scnprintf(buf + pos, bufsz - pos,
343 " %-30s %10u %10u %10u %10u\n", 326 fmt_table, "re_acq_main_rssi_sum:",
344 "re_acq_main_rssi_sum:",
345 le32_to_cpu(cck->re_acq_main_rssi_sum), 327 le32_to_cpu(cck->re_acq_main_rssi_sum),
346 accum_cck->re_acq_main_rssi_sum, 328 accum_cck->re_acq_main_rssi_sum,
347 delta_cck->re_acq_main_rssi_sum, 329 delta_cck->re_acq_main_rssi_sum,
348 max_cck->re_acq_main_rssi_sum); 330 max_cck->re_acq_main_rssi_sum);
349 331
350 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
351 "acumulative delta max\n",
352 "Statistics_Rx - GENERAL:");
353 pos += scnprintf(buf + pos, bufsz - pos, 332 pos += scnprintf(buf + pos, bufsz - pos,
354 " %-30s %10u %10u %10u %10u\n", "bogus_cts:", 333 fmt_header, "Statistics_Rx - GENERAL:");
334 pos += scnprintf(buf + pos, bufsz - pos,
335 fmt_table, "bogus_cts:",
355 le32_to_cpu(general->bogus_cts), 336 le32_to_cpu(general->bogus_cts),
356 accum_general->bogus_cts, delta_general->bogus_cts, 337 accum_general->bogus_cts, delta_general->bogus_cts,
357 max_general->bogus_cts); 338 max_general->bogus_cts);
358 pos += scnprintf(buf + pos, bufsz - pos, 339 pos += scnprintf(buf + pos, bufsz - pos,
359 " %-30s %10u %10u %10u %10u\n", "bogus_ack:", 340 fmt_table, "bogus_ack:",
360 le32_to_cpu(general->bogus_ack), 341 le32_to_cpu(general->bogus_ack),
361 accum_general->bogus_ack, delta_general->bogus_ack, 342 accum_general->bogus_ack, delta_general->bogus_ack,
362 max_general->bogus_ack); 343 max_general->bogus_ack);
363 pos += scnprintf(buf + pos, bufsz - pos, 344 pos += scnprintf(buf + pos, bufsz - pos,
364 " %-30s %10u %10u %10u %10u\n", 345 fmt_table, "non_bssid_frames:",
365 "non_bssid_frames:",
366 le32_to_cpu(general->non_bssid_frames), 346 le32_to_cpu(general->non_bssid_frames),
367 accum_general->non_bssid_frames, 347 accum_general->non_bssid_frames,
368 delta_general->non_bssid_frames, 348 delta_general->non_bssid_frames,
369 max_general->non_bssid_frames); 349 max_general->non_bssid_frames);
370 pos += scnprintf(buf + pos, bufsz - pos, 350 pos += scnprintf(buf + pos, bufsz - pos,
371 " %-30s %10u %10u %10u %10u\n", 351 fmt_table, "filtered_frames:",
372 "filtered_frames:",
373 le32_to_cpu(general->filtered_frames), 352 le32_to_cpu(general->filtered_frames),
374 accum_general->filtered_frames, 353 accum_general->filtered_frames,
375 delta_general->filtered_frames, 354 delta_general->filtered_frames,
376 max_general->filtered_frames); 355 max_general->filtered_frames);
377 pos += scnprintf(buf + pos, bufsz - pos, 356 pos += scnprintf(buf + pos, bufsz - pos,
378 " %-30s %10u %10u %10u %10u\n", 357 fmt_table, "non_channel_beacons:",
379 "non_channel_beacons:",
380 le32_to_cpu(general->non_channel_beacons), 358 le32_to_cpu(general->non_channel_beacons),
381 accum_general->non_channel_beacons, 359 accum_general->non_channel_beacons,
382 delta_general->non_channel_beacons, 360 delta_general->non_channel_beacons,
383 max_general->non_channel_beacons); 361 max_general->non_channel_beacons);
384 pos += scnprintf(buf + pos, bufsz - pos, 362 pos += scnprintf(buf + pos, bufsz - pos,
385 " %-30s %10u %10u %10u %10u\n", 363 fmt_table, "channel_beacons:",
386 "channel_beacons:",
387 le32_to_cpu(general->channel_beacons), 364 le32_to_cpu(general->channel_beacons),
388 accum_general->channel_beacons, 365 accum_general->channel_beacons,
389 delta_general->channel_beacons, 366 delta_general->channel_beacons,
390 max_general->channel_beacons); 367 max_general->channel_beacons);
391 pos += scnprintf(buf + pos, bufsz - pos, 368 pos += scnprintf(buf + pos, bufsz - pos,
392 " %-30s %10u %10u %10u %10u\n", 369 fmt_table, "num_missed_bcon:",
393 "num_missed_bcon:",
394 le32_to_cpu(general->num_missed_bcon), 370 le32_to_cpu(general->num_missed_bcon),
395 accum_general->num_missed_bcon, 371 accum_general->num_missed_bcon,
396 delta_general->num_missed_bcon, 372 delta_general->num_missed_bcon,
397 max_general->num_missed_bcon); 373 max_general->num_missed_bcon);
398 pos += scnprintf(buf + pos, bufsz - pos, 374 pos += scnprintf(buf + pos, bufsz - pos,
399 " %-30s %10u %10u %10u %10u\n", 375 fmt_table, "adc_rx_saturation_time:",
400 "adc_rx_saturation_time:",
401 le32_to_cpu(general->adc_rx_saturation_time), 376 le32_to_cpu(general->adc_rx_saturation_time),
402 accum_general->adc_rx_saturation_time, 377 accum_general->adc_rx_saturation_time,
403 delta_general->adc_rx_saturation_time, 378 delta_general->adc_rx_saturation_time,
404 max_general->adc_rx_saturation_time); 379 max_general->adc_rx_saturation_time);
405 pos += scnprintf(buf + pos, bufsz - pos, 380 pos += scnprintf(buf + pos, bufsz - pos,
406 " %-30s %10u %10u %10u %10u\n", 381 fmt_table, "ina_detect_search_tm:",
407 "ina_detect_search_tm:",
408 le32_to_cpu(general->ina_detection_search_time), 382 le32_to_cpu(general->ina_detection_search_time),
409 accum_general->ina_detection_search_time, 383 accum_general->ina_detection_search_time,
410 delta_general->ina_detection_search_time, 384 delta_general->ina_detection_search_time,
411 max_general->ina_detection_search_time); 385 max_general->ina_detection_search_time);
412 pos += scnprintf(buf + pos, bufsz - pos, 386 pos += scnprintf(buf + pos, bufsz - pos,
413 " %-30s %10u %10u %10u %10u\n", 387 fmt_table, "beacon_silence_rssi_a:",
414 "beacon_silence_rssi_a:",
415 le32_to_cpu(general->beacon_silence_rssi_a), 388 le32_to_cpu(general->beacon_silence_rssi_a),
416 accum_general->beacon_silence_rssi_a, 389 accum_general->beacon_silence_rssi_a,
417 delta_general->beacon_silence_rssi_a, 390 delta_general->beacon_silence_rssi_a,
418 max_general->beacon_silence_rssi_a); 391 max_general->beacon_silence_rssi_a);
419 pos += scnprintf(buf + pos, bufsz - pos, 392 pos += scnprintf(buf + pos, bufsz - pos,
420 " %-30s %10u %10u %10u %10u\n", 393 fmt_table, "beacon_silence_rssi_b:",
421 "beacon_silence_rssi_b:",
422 le32_to_cpu(general->beacon_silence_rssi_b), 394 le32_to_cpu(general->beacon_silence_rssi_b),
423 accum_general->beacon_silence_rssi_b, 395 accum_general->beacon_silence_rssi_b,
424 delta_general->beacon_silence_rssi_b, 396 delta_general->beacon_silence_rssi_b,
425 max_general->beacon_silence_rssi_b); 397 max_general->beacon_silence_rssi_b);
426 pos += scnprintf(buf + pos, bufsz - pos, 398 pos += scnprintf(buf + pos, bufsz - pos,
427 " %-30s %10u %10u %10u %10u\n", 399 fmt_table, "beacon_silence_rssi_c:",
428 "beacon_silence_rssi_c:",
429 le32_to_cpu(general->beacon_silence_rssi_c), 400 le32_to_cpu(general->beacon_silence_rssi_c),
430 accum_general->beacon_silence_rssi_c, 401 accum_general->beacon_silence_rssi_c,
431 delta_general->beacon_silence_rssi_c, 402 delta_general->beacon_silence_rssi_c,
432 max_general->beacon_silence_rssi_c); 403 max_general->beacon_silence_rssi_c);
433 pos += scnprintf(buf + pos, bufsz - pos, 404 pos += scnprintf(buf + pos, bufsz - pos,
434 " %-30s %10u %10u %10u %10u\n", 405 fmt_table, "interference_data_flag:",
435 "interference_data_flag:",
436 le32_to_cpu(general->interference_data_flag), 406 le32_to_cpu(general->interference_data_flag),
437 accum_general->interference_data_flag, 407 accum_general->interference_data_flag,
438 delta_general->interference_data_flag, 408 delta_general->interference_data_flag,
439 max_general->interference_data_flag); 409 max_general->interference_data_flag);
440 pos += scnprintf(buf + pos, bufsz - pos, 410 pos += scnprintf(buf + pos, bufsz - pos,
441 " %-30s %10u %10u %10u %10u\n", 411 fmt_table, "channel_load:",
442 "channel_load:",
443 le32_to_cpu(general->channel_load), 412 le32_to_cpu(general->channel_load),
444 accum_general->channel_load, 413 accum_general->channel_load,
445 delta_general->channel_load, 414 delta_general->channel_load,
446 max_general->channel_load); 415 max_general->channel_load);
447 pos += scnprintf(buf + pos, bufsz - pos, 416 pos += scnprintf(buf + pos, bufsz - pos,
448 " %-30s %10u %10u %10u %10u\n", 417 fmt_table, "dsp_false_alarms:",
449 "dsp_false_alarms:",
450 le32_to_cpu(general->dsp_false_alarms), 418 le32_to_cpu(general->dsp_false_alarms),
451 accum_general->dsp_false_alarms, 419 accum_general->dsp_false_alarms,
452 delta_general->dsp_false_alarms, 420 delta_general->dsp_false_alarms,
453 max_general->dsp_false_alarms); 421 max_general->dsp_false_alarms);
454 pos += scnprintf(buf + pos, bufsz - pos, 422 pos += scnprintf(buf + pos, bufsz - pos,
455 " %-30s %10u %10u %10u %10u\n", 423 fmt_table, "beacon_rssi_a:",
456 "beacon_rssi_a:",
457 le32_to_cpu(general->beacon_rssi_a), 424 le32_to_cpu(general->beacon_rssi_a),
458 accum_general->beacon_rssi_a, 425 accum_general->beacon_rssi_a,
459 delta_general->beacon_rssi_a, 426 delta_general->beacon_rssi_a,
460 max_general->beacon_rssi_a); 427 max_general->beacon_rssi_a);
461 pos += scnprintf(buf + pos, bufsz - pos, 428 pos += scnprintf(buf + pos, bufsz - pos,
462 " %-30s %10u %10u %10u %10u\n", 429 fmt_table, "beacon_rssi_b:",
463 "beacon_rssi_b:",
464 le32_to_cpu(general->beacon_rssi_b), 430 le32_to_cpu(general->beacon_rssi_b),
465 accum_general->beacon_rssi_b, 431 accum_general->beacon_rssi_b,
466 delta_general->beacon_rssi_b, 432 delta_general->beacon_rssi_b,
467 max_general->beacon_rssi_b); 433 max_general->beacon_rssi_b);
468 pos += scnprintf(buf + pos, bufsz - pos, 434 pos += scnprintf(buf + pos, bufsz - pos,
469 " %-30s %10u %10u %10u %10u\n", 435 fmt_table, "beacon_rssi_c:",
470 "beacon_rssi_c:",
471 le32_to_cpu(general->beacon_rssi_c), 436 le32_to_cpu(general->beacon_rssi_c),
472 accum_general->beacon_rssi_c, 437 accum_general->beacon_rssi_c,
473 delta_general->beacon_rssi_c, 438 delta_general->beacon_rssi_c,
474 max_general->beacon_rssi_c); 439 max_general->beacon_rssi_c);
475 pos += scnprintf(buf + pos, bufsz - pos, 440 pos += scnprintf(buf + pos, bufsz - pos,
476 " %-30s %10u %10u %10u %10u\n", 441 fmt_table, "beacon_energy_a:",
477 "beacon_energy_a:",
478 le32_to_cpu(general->beacon_energy_a), 442 le32_to_cpu(general->beacon_energy_a),
479 accum_general->beacon_energy_a, 443 accum_general->beacon_energy_a,
480 delta_general->beacon_energy_a, 444 delta_general->beacon_energy_a,
481 max_general->beacon_energy_a); 445 max_general->beacon_energy_a);
482 pos += scnprintf(buf + pos, bufsz - pos, 446 pos += scnprintf(buf + pos, bufsz - pos,
483 " %-30s %10u %10u %10u %10u\n", 447 fmt_table, "beacon_energy_b:",
484 "beacon_energy_b:",
485 le32_to_cpu(general->beacon_energy_b), 448 le32_to_cpu(general->beacon_energy_b),
486 accum_general->beacon_energy_b, 449 accum_general->beacon_energy_b,
487 delta_general->beacon_energy_b, 450 delta_general->beacon_energy_b,
488 max_general->beacon_energy_b); 451 max_general->beacon_energy_b);
489 pos += scnprintf(buf + pos, bufsz - pos, 452 pos += scnprintf(buf + pos, bufsz - pos,
490 " %-30s %10u %10u %10u %10u\n", 453 fmt_table, "beacon_energy_c:",
491 "beacon_energy_c:",
492 le32_to_cpu(general->beacon_energy_c), 454 le32_to_cpu(general->beacon_energy_c),
493 accum_general->beacon_energy_c, 455 accum_general->beacon_energy_c,
494 delta_general->beacon_energy_c, 456 delta_general->beacon_energy_c,
495 max_general->beacon_energy_c); 457 max_general->beacon_energy_c);
496 458
497 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
498 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
499 "acumulative delta max\n",
500 "Statistics_Rx - OFDM_HT:");
501 pos += scnprintf(buf + pos, bufsz - pos, 459 pos += scnprintf(buf + pos, bufsz - pos,
502 " %-30s %10u %10u %10u %10u\n", 460 fmt_header, "Statistics_Rx - OFDM_HT:");
503 "plcp_err:", 461 pos += scnprintf(buf + pos, bufsz - pos,
462 fmt_table, "plcp_err:",
504 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, 463 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
505 delta_ht->plcp_err, max_ht->plcp_err); 464 delta_ht->plcp_err, max_ht->plcp_err);
506 pos += scnprintf(buf + pos, bufsz - pos, 465 pos += scnprintf(buf + pos, bufsz - pos,
507 " %-30s %10u %10u %10u %10u\n", 466 fmt_table, "overrun_err:",
508 "overrun_err:",
509 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, 467 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
510 delta_ht->overrun_err, max_ht->overrun_err); 468 delta_ht->overrun_err, max_ht->overrun_err);
511 pos += scnprintf(buf + pos, bufsz - pos, 469 pos += scnprintf(buf + pos, bufsz - pos,
512 " %-30s %10u %10u %10u %10u\n", 470 fmt_table, "early_overrun_err:",
513 "early_overrun_err:",
514 le32_to_cpu(ht->early_overrun_err), 471 le32_to_cpu(ht->early_overrun_err),
515 accum_ht->early_overrun_err, 472 accum_ht->early_overrun_err,
516 delta_ht->early_overrun_err, 473 delta_ht->early_overrun_err,
517 max_ht->early_overrun_err); 474 max_ht->early_overrun_err);
518 pos += scnprintf(buf + pos, bufsz - pos, 475 pos += scnprintf(buf + pos, bufsz - pos,
519 " %-30s %10u %10u %10u %10u\n", 476 fmt_table, "crc32_good:",
520 "crc32_good:",
521 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, 477 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
522 delta_ht->crc32_good, max_ht->crc32_good); 478 delta_ht->crc32_good, max_ht->crc32_good);
523 pos += scnprintf(buf + pos, bufsz - pos, 479 pos += scnprintf(buf + pos, bufsz - pos,
524 " %-30s %10u %10u %10u %10u\n", 480 fmt_table, "crc32_err:",
525 "crc32_err:",
526 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, 481 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
527 delta_ht->crc32_err, max_ht->crc32_err); 482 delta_ht->crc32_err, max_ht->crc32_err);
528 pos += scnprintf(buf + pos, bufsz - pos, 483 pos += scnprintf(buf + pos, bufsz - pos,
529 " %-30s %10u %10u %10u %10u\n", 484 fmt_table, "mh_format_err:",
530 "mh_format_err:",
531 le32_to_cpu(ht->mh_format_err), 485 le32_to_cpu(ht->mh_format_err),
532 accum_ht->mh_format_err, 486 accum_ht->mh_format_err,
533 delta_ht->mh_format_err, max_ht->mh_format_err); 487 delta_ht->mh_format_err, max_ht->mh_format_err);
534 pos += scnprintf(buf + pos, bufsz - pos, 488 pos += scnprintf(buf + pos, bufsz - pos,
535 " %-30s %10u %10u %10u %10u\n", 489 fmt_table, "agg_crc32_good:",
536 "agg_crc32_good:",
537 le32_to_cpu(ht->agg_crc32_good), 490 le32_to_cpu(ht->agg_crc32_good),
538 accum_ht->agg_crc32_good, 491 accum_ht->agg_crc32_good,
539 delta_ht->agg_crc32_good, max_ht->agg_crc32_good); 492 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
540 pos += scnprintf(buf + pos, bufsz - pos, 493 pos += scnprintf(buf + pos, bufsz - pos,
541 " %-30s %10u %10u %10u %10u\n", 494 fmt_table, "agg_mpdu_cnt:",
542 "agg_mpdu_cnt:",
543 le32_to_cpu(ht->agg_mpdu_cnt), 495 le32_to_cpu(ht->agg_mpdu_cnt),
544 accum_ht->agg_mpdu_cnt, 496 accum_ht->agg_mpdu_cnt,
545 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); 497 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
546 pos += scnprintf(buf + pos, bufsz - pos, 498 pos += scnprintf(buf + pos, bufsz - pos,
547 " %-30s %10u %10u %10u %10u\n", 499 fmt_table, "agg_cnt:",
548 "agg_cnt:",
549 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, 500 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
550 delta_ht->agg_cnt, max_ht->agg_cnt); 501 delta_ht->agg_cnt, max_ht->agg_cnt);
551 pos += scnprintf(buf + pos, bufsz - pos, 502 pos += scnprintf(buf + pos, bufsz - pos,
552 " %-30s %10u %10u %10u %10u\n", 503 fmt_table, "unsupport_mcs:",
553 "unsupport_mcs:",
554 le32_to_cpu(ht->unsupport_mcs), 504 le32_to_cpu(ht->unsupport_mcs),
555 accum_ht->unsupport_mcs, 505 accum_ht->unsupport_mcs,
556 delta_ht->unsupport_mcs, max_ht->unsupport_mcs); 506 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
@@ -597,166 +547,141 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
597 } 547 }
598 548
599 pos += iwl_statistics_flag(priv, buf, bufsz); 549 pos += iwl_statistics_flag(priv, buf, bufsz);
600 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
601 "acumulative delta max\n",
602 "Statistics_Tx:");
603 pos += scnprintf(buf + pos, bufsz - pos, 550 pos += scnprintf(buf + pos, bufsz - pos,
604 " %-30s %10u %10u %10u %10u\n", 551 fmt_header, "Statistics_Tx:");
605 "preamble:", 552 pos += scnprintf(buf + pos, bufsz - pos,
553 fmt_table, "preamble:",
606 le32_to_cpu(tx->preamble_cnt), 554 le32_to_cpu(tx->preamble_cnt),
607 accum_tx->preamble_cnt, 555 accum_tx->preamble_cnt,
608 delta_tx->preamble_cnt, max_tx->preamble_cnt); 556 delta_tx->preamble_cnt, max_tx->preamble_cnt);
609 pos += scnprintf(buf + pos, bufsz - pos, 557 pos += scnprintf(buf + pos, bufsz - pos,
610 " %-30s %10u %10u %10u %10u\n", 558 fmt_table, "rx_detected_cnt:",
611 "rx_detected_cnt:",
612 le32_to_cpu(tx->rx_detected_cnt), 559 le32_to_cpu(tx->rx_detected_cnt),
613 accum_tx->rx_detected_cnt, 560 accum_tx->rx_detected_cnt,
614 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); 561 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
615 pos += scnprintf(buf + pos, bufsz - pos, 562 pos += scnprintf(buf + pos, bufsz - pos,
616 " %-30s %10u %10u %10u %10u\n", 563 fmt_table, "bt_prio_defer_cnt:",
617 "bt_prio_defer_cnt:",
618 le32_to_cpu(tx->bt_prio_defer_cnt), 564 le32_to_cpu(tx->bt_prio_defer_cnt),
619 accum_tx->bt_prio_defer_cnt, 565 accum_tx->bt_prio_defer_cnt,
620 delta_tx->bt_prio_defer_cnt, 566 delta_tx->bt_prio_defer_cnt,
621 max_tx->bt_prio_defer_cnt); 567 max_tx->bt_prio_defer_cnt);
622 pos += scnprintf(buf + pos, bufsz - pos, 568 pos += scnprintf(buf + pos, bufsz - pos,
623 " %-30s %10u %10u %10u %10u\n", 569 fmt_table, "bt_prio_kill_cnt:",
624 "bt_prio_kill_cnt:",
625 le32_to_cpu(tx->bt_prio_kill_cnt), 570 le32_to_cpu(tx->bt_prio_kill_cnt),
626 accum_tx->bt_prio_kill_cnt, 571 accum_tx->bt_prio_kill_cnt,
627 delta_tx->bt_prio_kill_cnt, 572 delta_tx->bt_prio_kill_cnt,
628 max_tx->bt_prio_kill_cnt); 573 max_tx->bt_prio_kill_cnt);
629 pos += scnprintf(buf + pos, bufsz - pos, 574 pos += scnprintf(buf + pos, bufsz - pos,
630 " %-30s %10u %10u %10u %10u\n", 575 fmt_table, "few_bytes_cnt:",
631 "few_bytes_cnt:",
632 le32_to_cpu(tx->few_bytes_cnt), 576 le32_to_cpu(tx->few_bytes_cnt),
633 accum_tx->few_bytes_cnt, 577 accum_tx->few_bytes_cnt,
634 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); 578 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
635 pos += scnprintf(buf + pos, bufsz - pos, 579 pos += scnprintf(buf + pos, bufsz - pos,
636 " %-30s %10u %10u %10u %10u\n", 580 fmt_table, "cts_timeout:",
637 "cts_timeout:",
638 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, 581 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
639 delta_tx->cts_timeout, max_tx->cts_timeout); 582 delta_tx->cts_timeout, max_tx->cts_timeout);
640 pos += scnprintf(buf + pos, bufsz - pos, 583 pos += scnprintf(buf + pos, bufsz - pos,
641 " %-30s %10u %10u %10u %10u\n", 584 fmt_table, "ack_timeout:",
642 "ack_timeout:",
643 le32_to_cpu(tx->ack_timeout), 585 le32_to_cpu(tx->ack_timeout),
644 accum_tx->ack_timeout, 586 accum_tx->ack_timeout,
645 delta_tx->ack_timeout, max_tx->ack_timeout); 587 delta_tx->ack_timeout, max_tx->ack_timeout);
646 pos += scnprintf(buf + pos, bufsz - pos, 588 pos += scnprintf(buf + pos, bufsz - pos,
647 " %-30s %10u %10u %10u %10u\n", 589 fmt_table, "expected_ack_cnt:",
648 "expected_ack_cnt:",
649 le32_to_cpu(tx->expected_ack_cnt), 590 le32_to_cpu(tx->expected_ack_cnt),
650 accum_tx->expected_ack_cnt, 591 accum_tx->expected_ack_cnt,
651 delta_tx->expected_ack_cnt, 592 delta_tx->expected_ack_cnt,
652 max_tx->expected_ack_cnt); 593 max_tx->expected_ack_cnt);
653 pos += scnprintf(buf + pos, bufsz - pos, 594 pos += scnprintf(buf + pos, bufsz - pos,
654 " %-30s %10u %10u %10u %10u\n", 595 fmt_table, "actual_ack_cnt:",
655 "actual_ack_cnt:",
656 le32_to_cpu(tx->actual_ack_cnt), 596 le32_to_cpu(tx->actual_ack_cnt),
657 accum_tx->actual_ack_cnt, 597 accum_tx->actual_ack_cnt,
658 delta_tx->actual_ack_cnt, 598 delta_tx->actual_ack_cnt,
659 max_tx->actual_ack_cnt); 599 max_tx->actual_ack_cnt);
660 pos += scnprintf(buf + pos, bufsz - pos, 600 pos += scnprintf(buf + pos, bufsz - pos,
661 " %-30s %10u %10u %10u %10u\n", 601 fmt_table, "dump_msdu_cnt:",
662 "dump_msdu_cnt:",
663 le32_to_cpu(tx->dump_msdu_cnt), 602 le32_to_cpu(tx->dump_msdu_cnt),
664 accum_tx->dump_msdu_cnt, 603 accum_tx->dump_msdu_cnt,
665 delta_tx->dump_msdu_cnt, 604 delta_tx->dump_msdu_cnt,
666 max_tx->dump_msdu_cnt); 605 max_tx->dump_msdu_cnt);
667 pos += scnprintf(buf + pos, bufsz - pos, 606 pos += scnprintf(buf + pos, bufsz - pos,
668 " %-30s %10u %10u %10u %10u\n", 607 fmt_table, "abort_nxt_frame_mismatch:",
669 "abort_nxt_frame_mismatch:",
670 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), 608 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
671 accum_tx->burst_abort_next_frame_mismatch_cnt, 609 accum_tx->burst_abort_next_frame_mismatch_cnt,
672 delta_tx->burst_abort_next_frame_mismatch_cnt, 610 delta_tx->burst_abort_next_frame_mismatch_cnt,
673 max_tx->burst_abort_next_frame_mismatch_cnt); 611 max_tx->burst_abort_next_frame_mismatch_cnt);
674 pos += scnprintf(buf + pos, bufsz - pos, 612 pos += scnprintf(buf + pos, bufsz - pos,
675 " %-30s %10u %10u %10u %10u\n", 613 fmt_table, "abort_missing_nxt_frame:",
676 "abort_missing_nxt_frame:",
677 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), 614 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
678 accum_tx->burst_abort_missing_next_frame_cnt, 615 accum_tx->burst_abort_missing_next_frame_cnt,
679 delta_tx->burst_abort_missing_next_frame_cnt, 616 delta_tx->burst_abort_missing_next_frame_cnt,
680 max_tx->burst_abort_missing_next_frame_cnt); 617 max_tx->burst_abort_missing_next_frame_cnt);
681 pos += scnprintf(buf + pos, bufsz - pos, 618 pos += scnprintf(buf + pos, bufsz - pos,
682 " %-30s %10u %10u %10u %10u\n", 619 fmt_table, "cts_timeout_collision:",
683 "cts_timeout_collision:",
684 le32_to_cpu(tx->cts_timeout_collision), 620 le32_to_cpu(tx->cts_timeout_collision),
685 accum_tx->cts_timeout_collision, 621 accum_tx->cts_timeout_collision,
686 delta_tx->cts_timeout_collision, 622 delta_tx->cts_timeout_collision,
687 max_tx->cts_timeout_collision); 623 max_tx->cts_timeout_collision);
688 pos += scnprintf(buf + pos, bufsz - pos, 624 pos += scnprintf(buf + pos, bufsz - pos,
689 " %-30s %10u %10u %10u %10u\n", 625 fmt_table, "ack_ba_timeout_collision:",
690 "ack_ba_timeout_collision:",
691 le32_to_cpu(tx->ack_or_ba_timeout_collision), 626 le32_to_cpu(tx->ack_or_ba_timeout_collision),
692 accum_tx->ack_or_ba_timeout_collision, 627 accum_tx->ack_or_ba_timeout_collision,
693 delta_tx->ack_or_ba_timeout_collision, 628 delta_tx->ack_or_ba_timeout_collision,
694 max_tx->ack_or_ba_timeout_collision); 629 max_tx->ack_or_ba_timeout_collision);
695 pos += scnprintf(buf + pos, bufsz - pos, 630 pos += scnprintf(buf + pos, bufsz - pos,
696 " %-30s %10u %10u %10u %10u\n", 631 fmt_table, "agg ba_timeout:",
697 "agg ba_timeout:",
698 le32_to_cpu(tx->agg.ba_timeout), 632 le32_to_cpu(tx->agg.ba_timeout),
699 accum_tx->agg.ba_timeout, 633 accum_tx->agg.ba_timeout,
700 delta_tx->agg.ba_timeout, 634 delta_tx->agg.ba_timeout,
701 max_tx->agg.ba_timeout); 635 max_tx->agg.ba_timeout);
702 pos += scnprintf(buf + pos, bufsz - pos, 636 pos += scnprintf(buf + pos, bufsz - pos,
703 " %-30s %10u %10u %10u %10u\n", 637 fmt_table, "agg ba_resched_frames:",
704 "agg ba_resched_frames:",
705 le32_to_cpu(tx->agg.ba_reschedule_frames), 638 le32_to_cpu(tx->agg.ba_reschedule_frames),
706 accum_tx->agg.ba_reschedule_frames, 639 accum_tx->agg.ba_reschedule_frames,
707 delta_tx->agg.ba_reschedule_frames, 640 delta_tx->agg.ba_reschedule_frames,
708 max_tx->agg.ba_reschedule_frames); 641 max_tx->agg.ba_reschedule_frames);
709 pos += scnprintf(buf + pos, bufsz - pos, 642 pos += scnprintf(buf + pos, bufsz - pos,
710 " %-30s %10u %10u %10u %10u\n", 643 fmt_table, "agg scd_query_agg_frame:",
711 "agg scd_query_agg_frame:",
712 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), 644 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
713 accum_tx->agg.scd_query_agg_frame_cnt, 645 accum_tx->agg.scd_query_agg_frame_cnt,
714 delta_tx->agg.scd_query_agg_frame_cnt, 646 delta_tx->agg.scd_query_agg_frame_cnt,
715 max_tx->agg.scd_query_agg_frame_cnt); 647 max_tx->agg.scd_query_agg_frame_cnt);
716 pos += scnprintf(buf + pos, bufsz - pos, 648 pos += scnprintf(buf + pos, bufsz - pos,
717 " %-30s %10u %10u %10u %10u\n", 649 fmt_table, "agg scd_query_no_agg:",
718 "agg scd_query_no_agg:",
719 le32_to_cpu(tx->agg.scd_query_no_agg), 650 le32_to_cpu(tx->agg.scd_query_no_agg),
720 accum_tx->agg.scd_query_no_agg, 651 accum_tx->agg.scd_query_no_agg,
721 delta_tx->agg.scd_query_no_agg, 652 delta_tx->agg.scd_query_no_agg,
722 max_tx->agg.scd_query_no_agg); 653 max_tx->agg.scd_query_no_agg);
723 pos += scnprintf(buf + pos, bufsz - pos, 654 pos += scnprintf(buf + pos, bufsz - pos,
724 " %-30s %10u %10u %10u %10u\n", 655 fmt_table, "agg scd_query_agg:",
725 "agg scd_query_agg:",
726 le32_to_cpu(tx->agg.scd_query_agg), 656 le32_to_cpu(tx->agg.scd_query_agg),
727 accum_tx->agg.scd_query_agg, 657 accum_tx->agg.scd_query_agg,
728 delta_tx->agg.scd_query_agg, 658 delta_tx->agg.scd_query_agg,
729 max_tx->agg.scd_query_agg); 659 max_tx->agg.scd_query_agg);
730 pos += scnprintf(buf + pos, bufsz - pos, 660 pos += scnprintf(buf + pos, bufsz - pos,
731 " %-30s %10u %10u %10u %10u\n", 661 fmt_table, "agg scd_query_mismatch:",
732 "agg scd_query_mismatch:",
733 le32_to_cpu(tx->agg.scd_query_mismatch), 662 le32_to_cpu(tx->agg.scd_query_mismatch),
734 accum_tx->agg.scd_query_mismatch, 663 accum_tx->agg.scd_query_mismatch,
735 delta_tx->agg.scd_query_mismatch, 664 delta_tx->agg.scd_query_mismatch,
736 max_tx->agg.scd_query_mismatch); 665 max_tx->agg.scd_query_mismatch);
737 pos += scnprintf(buf + pos, bufsz - pos, 666 pos += scnprintf(buf + pos, bufsz - pos,
738 " %-30s %10u %10u %10u %10u\n", 667 fmt_table, "agg frame_not_ready:",
739 "agg frame_not_ready:",
740 le32_to_cpu(tx->agg.frame_not_ready), 668 le32_to_cpu(tx->agg.frame_not_ready),
741 accum_tx->agg.frame_not_ready, 669 accum_tx->agg.frame_not_ready,
742 delta_tx->agg.frame_not_ready, 670 delta_tx->agg.frame_not_ready,
743 max_tx->agg.frame_not_ready); 671 max_tx->agg.frame_not_ready);
744 pos += scnprintf(buf + pos, bufsz - pos, 672 pos += scnprintf(buf + pos, bufsz - pos,
745 " %-30s %10u %10u %10u %10u\n", 673 fmt_table, "agg underrun:",
746 "agg underrun:",
747 le32_to_cpu(tx->agg.underrun), 674 le32_to_cpu(tx->agg.underrun),
748 accum_tx->agg.underrun, 675 accum_tx->agg.underrun,
749 delta_tx->agg.underrun, max_tx->agg.underrun); 676 delta_tx->agg.underrun, max_tx->agg.underrun);
750 pos += scnprintf(buf + pos, bufsz - pos, 677 pos += scnprintf(buf + pos, bufsz - pos,
751 " %-30s %10u %10u %10u %10u\n", 678 fmt_table, "agg bt_prio_kill:",
752 "agg bt_prio_kill:",
753 le32_to_cpu(tx->agg.bt_prio_kill), 679 le32_to_cpu(tx->agg.bt_prio_kill),
754 accum_tx->agg.bt_prio_kill, 680 accum_tx->agg.bt_prio_kill,
755 delta_tx->agg.bt_prio_kill, 681 delta_tx->agg.bt_prio_kill,
756 max_tx->agg.bt_prio_kill); 682 max_tx->agg.bt_prio_kill);
757 pos += scnprintf(buf + pos, bufsz - pos, 683 pos += scnprintf(buf + pos, bufsz - pos,
758 " %-30s %10u %10u %10u %10u\n", 684 fmt_table, "agg rx_ba_rsp_cnt:",
759 "agg rx_ba_rsp_cnt:",
760 le32_to_cpu(tx->agg.rx_ba_rsp_cnt), 685 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
761 accum_tx->agg.rx_ba_rsp_cnt, 686 accum_tx->agg.rx_ba_rsp_cnt,
762 delta_tx->agg.rx_ba_rsp_cnt, 687 delta_tx->agg.rx_ba_rsp_cnt,
@@ -767,15 +692,15 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
767 "tx power: (1/2 dB step)\n"); 692 "tx power: (1/2 dB step)\n");
768 if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a) 693 if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
769 pos += scnprintf(buf + pos, bufsz - pos, 694 pos += scnprintf(buf + pos, bufsz - pos,
770 "\tantenna A: 0x%X\n", 695 fmt_hex, "antenna A:",
771 tx->tx_power.ant_a); 696 tx->tx_power.ant_a);
772 if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b) 697 if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
773 pos += scnprintf(buf + pos, bufsz - pos, 698 pos += scnprintf(buf + pos, bufsz - pos,
774 "\tantenna B: 0x%X\n", 699 fmt_hex, "antenna B:",
775 tx->tx_power.ant_b); 700 tx->tx_power.ant_b);
776 if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c) 701 if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
777 pos += scnprintf(buf + pos, bufsz - pos, 702 pos += scnprintf(buf + pos, bufsz - pos,
778 "\tantenna C: 0x%X\n", 703 fmt_hex, "antenna C:",
779 tx->tx_power.ant_c); 704 tx->tx_power.ant_c);
780 } 705 }
781 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 706 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
@@ -838,84 +763,72 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
838 } 763 }
839 764
840 pos += iwl_statistics_flag(priv, buf, bufsz); 765 pos += iwl_statistics_flag(priv, buf, bufsz);
841 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" 766 pos += scnprintf(buf + pos, bufsz - pos,
842 "acumulative delta max\n", 767 fmt_header, "Statistics_General:");
843 "Statistics_General:"); 768 pos += scnprintf(buf + pos, bufsz - pos,
844 pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n", 769 fmt_value, "temperature:",
845 "temperature:",
846 le32_to_cpu(general->temperature)); 770 le32_to_cpu(general->temperature));
847 pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n", 771 pos += scnprintf(buf + pos, bufsz - pos,
848 "temperature_m:", 772 fmt_value, "temperature_m:",
849 le32_to_cpu(general->temperature_m)); 773 le32_to_cpu(general->temperature_m));
850 pos += scnprintf(buf + pos, bufsz - pos, 774 pos += scnprintf(buf + pos, bufsz - pos,
851 " %-30s %10u %10u %10u %10u\n", 775 fmt_value, "ttl_timestamp:",
852 "burst_check:", 776 le32_to_cpu(general->ttl_timestamp));
777 pos += scnprintf(buf + pos, bufsz - pos,
778 fmt_table, "burst_check:",
853 le32_to_cpu(dbg->burst_check), 779 le32_to_cpu(dbg->burst_check),
854 accum_dbg->burst_check, 780 accum_dbg->burst_check,
855 delta_dbg->burst_check, max_dbg->burst_check); 781 delta_dbg->burst_check, max_dbg->burst_check);
856 pos += scnprintf(buf + pos, bufsz - pos, 782 pos += scnprintf(buf + pos, bufsz - pos,
857 " %-30s %10u %10u %10u %10u\n", 783 fmt_table, "burst_count:",
858 "burst_count:",
859 le32_to_cpu(dbg->burst_count), 784 le32_to_cpu(dbg->burst_count),
860 accum_dbg->burst_count, 785 accum_dbg->burst_count,
861 delta_dbg->burst_count, max_dbg->burst_count); 786 delta_dbg->burst_count, max_dbg->burst_count);
862 pos += scnprintf(buf + pos, bufsz - pos, 787 pos += scnprintf(buf + pos, bufsz - pos,
863 " %-30s %10u %10u %10u %10u\n", 788 fmt_table, "wait_for_silence_timeout_count:",
864 "wait_for_silence_timeout_count:",
865 le32_to_cpu(dbg->wait_for_silence_timeout_cnt), 789 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
866 accum_dbg->wait_for_silence_timeout_cnt, 790 accum_dbg->wait_for_silence_timeout_cnt,
867 delta_dbg->wait_for_silence_timeout_cnt, 791 delta_dbg->wait_for_silence_timeout_cnt,
868 max_dbg->wait_for_silence_timeout_cnt); 792 max_dbg->wait_for_silence_timeout_cnt);
869 pos += scnprintf(buf + pos, bufsz - pos, 793 pos += scnprintf(buf + pos, bufsz - pos,
870 " %-30s %10u %10u %10u %10u\n", 794 fmt_table, "sleep_time:",
871 "sleep_time:",
872 le32_to_cpu(general->sleep_time), 795 le32_to_cpu(general->sleep_time),
873 accum_general->sleep_time, 796 accum_general->sleep_time,
874 delta_general->sleep_time, max_general->sleep_time); 797 delta_general->sleep_time, max_general->sleep_time);
875 pos += scnprintf(buf + pos, bufsz - pos, 798 pos += scnprintf(buf + pos, bufsz - pos,
876 " %-30s %10u %10u %10u %10u\n", 799 fmt_table, "slots_out:",
877 "slots_out:",
878 le32_to_cpu(general->slots_out), 800 le32_to_cpu(general->slots_out),
879 accum_general->slots_out, 801 accum_general->slots_out,
880 delta_general->slots_out, max_general->slots_out); 802 delta_general->slots_out, max_general->slots_out);
881 pos += scnprintf(buf + pos, bufsz - pos, 803 pos += scnprintf(buf + pos, bufsz - pos,
882 " %-30s %10u %10u %10u %10u\n", 804 fmt_table, "slots_idle:",
883 "slots_idle:",
884 le32_to_cpu(general->slots_idle), 805 le32_to_cpu(general->slots_idle),
885 accum_general->slots_idle, 806 accum_general->slots_idle,
886 delta_general->slots_idle, max_general->slots_idle); 807 delta_general->slots_idle, max_general->slots_idle);
887 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
888 le32_to_cpu(general->ttl_timestamp));
889 pos += scnprintf(buf + pos, bufsz - pos, 808 pos += scnprintf(buf + pos, bufsz - pos,
890 " %-30s %10u %10u %10u %10u\n", 809 fmt_table, "tx_on_a:",
891 "tx_on_a:",
892 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, 810 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
893 delta_div->tx_on_a, max_div->tx_on_a); 811 delta_div->tx_on_a, max_div->tx_on_a);
894 pos += scnprintf(buf + pos, bufsz - pos, 812 pos += scnprintf(buf + pos, bufsz - pos,
895 " %-30s %10u %10u %10u %10u\n", 813 fmt_table, "tx_on_b:",
896 "tx_on_b:",
897 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, 814 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
898 delta_div->tx_on_b, max_div->tx_on_b); 815 delta_div->tx_on_b, max_div->tx_on_b);
899 pos += scnprintf(buf + pos, bufsz - pos, 816 pos += scnprintf(buf + pos, bufsz - pos,
900 " %-30s %10u %10u %10u %10u\n", 817 fmt_table, "exec_time:",
901 "exec_time:",
902 le32_to_cpu(div->exec_time), accum_div->exec_time, 818 le32_to_cpu(div->exec_time), accum_div->exec_time,
903 delta_div->exec_time, max_div->exec_time); 819 delta_div->exec_time, max_div->exec_time);
904 pos += scnprintf(buf + pos, bufsz - pos, 820 pos += scnprintf(buf + pos, bufsz - pos,
905 " %-30s %10u %10u %10u %10u\n", 821 fmt_table, "probe_time:",
906 "probe_time:",
907 le32_to_cpu(div->probe_time), accum_div->probe_time, 822 le32_to_cpu(div->probe_time), accum_div->probe_time,
908 delta_div->probe_time, max_div->probe_time); 823 delta_div->probe_time, max_div->probe_time);
909 pos += scnprintf(buf + pos, bufsz - pos, 824 pos += scnprintf(buf + pos, bufsz - pos,
910 " %-30s %10u %10u %10u %10u\n", 825 fmt_table, "rx_enable_counter:",
911 "rx_enable_counter:",
912 le32_to_cpu(general->rx_enable_counter), 826 le32_to_cpu(general->rx_enable_counter),
913 accum_general->rx_enable_counter, 827 accum_general->rx_enable_counter,
914 delta_general->rx_enable_counter, 828 delta_general->rx_enable_counter,
915 max_general->rx_enable_counter); 829 max_general->rx_enable_counter);
916 pos += scnprintf(buf + pos, bufsz - pos, 830 pos += scnprintf(buf + pos, bufsz - pos,
917 " %-30s %10u %10u %10u %10u\n", 831 fmt_table, "num_of_sos_states:",
918 "num_of_sos_states:",
919 le32_to_cpu(general->num_of_sos_states), 832 le32_to_cpu(general->num_of_sos_states),
920 accum_general->num_of_sos_states, 833 accum_general->num_of_sos_states,
921 delta_general->num_of_sos_states, 834 delta_general->num_of_sos_states,
@@ -1011,3 +924,147 @@ ssize_t iwl_ucode_bt_stats_read(struct file *file,
1011 kfree(buf); 924 kfree(buf);
1012 return ret; 925 return ret;
1013} 926}
927
928ssize_t iwl_reply_tx_error_read(struct file *file,
929 char __user *user_buf,
930 size_t count, loff_t *ppos)
931{
932 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
933 int pos = 0;
934 char *buf;
935 int bufsz = (sizeof(struct reply_tx_error_statistics) * 24) +
936 (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
937 ssize_t ret;
938
939 if (!iwl_is_alive(priv))
940 return -EAGAIN;
941
942 buf = kzalloc(bufsz, GFP_KERNEL);
943 if (!buf) {
944 IWL_ERR(priv, "Can not allocate Buffer\n");
945 return -ENOMEM;
946 }
947
948 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
949 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
950 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY),
951 priv->_agn.reply_tx_stats.pp_delay);
952 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
953 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES),
954 priv->_agn.reply_tx_stats.pp_few_bytes);
955 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
956 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO),
957 priv->_agn.reply_tx_stats.pp_bt_prio);
958 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
959 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD),
960 priv->_agn.reply_tx_stats.pp_quiet_period);
961 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
962 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK),
963 priv->_agn.reply_tx_stats.pp_calc_ttak);
964 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
965 iwl_get_tx_fail_reason(
966 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY),
967 priv->_agn.reply_tx_stats.int_crossed_retry);
968 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
969 iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT),
970 priv->_agn.reply_tx_stats.short_limit);
971 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
972 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT),
973 priv->_agn.reply_tx_stats.long_limit);
974 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
975 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN),
976 priv->_agn.reply_tx_stats.fifo_underrun);
977 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
978 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW),
979 priv->_agn.reply_tx_stats.drain_flow);
980 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
981 iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH),
982 priv->_agn.reply_tx_stats.rfkill_flush);
983 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
984 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE),
985 priv->_agn.reply_tx_stats.life_expire);
986 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
987 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS),
988 priv->_agn.reply_tx_stats.dest_ps);
989 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
990 iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED),
991 priv->_agn.reply_tx_stats.host_abort);
992 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
993 iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY),
994 priv->_agn.reply_tx_stats.pp_delay);
995 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
996 iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID),
997 priv->_agn.reply_tx_stats.sta_invalid);
998 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
999 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED),
1000 priv->_agn.reply_tx_stats.frag_drop);
1001 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1002 iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE),
1003 priv->_agn.reply_tx_stats.tid_disable);
1004 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1005 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED),
1006 priv->_agn.reply_tx_stats.fifo_flush);
1007 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1008 iwl_get_tx_fail_reason(
1009 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL),
1010 priv->_agn.reply_tx_stats.insuff_cf_poll);
1011 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1012 iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX),
1013 priv->_agn.reply_tx_stats.fail_hw_drop);
1014 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1015 iwl_get_tx_fail_reason(
1016 TX_STATUS_FAIL_NO_BEACON_ON_RADAR),
1017 priv->_agn.reply_tx_stats.sta_color_mismatch);
1018 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
1019 priv->_agn.reply_tx_stats.unknown);
1020
1021 pos += scnprintf(buf + pos, bufsz - pos,
1022 "\nStatistics_Agg_TX_Error:\n");
1023
1024 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1025 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK),
1026 priv->_agn.reply_agg_tx_stats.underrun);
1027 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1028 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK),
1029 priv->_agn.reply_agg_tx_stats.bt_prio);
1030 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1031 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK),
1032 priv->_agn.reply_agg_tx_stats.few_bytes);
1033 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1034 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK),
1035 priv->_agn.reply_agg_tx_stats.abort);
1036 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1037 iwl_get_agg_tx_fail_reason(
1038 AGG_TX_STATE_LAST_SENT_TTL_MSK),
1039 priv->_agn.reply_agg_tx_stats.last_sent_ttl);
1040 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1041 iwl_get_agg_tx_fail_reason(
1042 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK),
1043 priv->_agn.reply_agg_tx_stats.last_sent_try);
1044 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1045 iwl_get_agg_tx_fail_reason(
1046 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK),
1047 priv->_agn.reply_agg_tx_stats.last_sent_bt_kill);
1048 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1049 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK),
1050 priv->_agn.reply_agg_tx_stats.scd_query);
1051 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1052 iwl_get_agg_tx_fail_reason(
1053 AGG_TX_STATE_TEST_BAD_CRC32_MSK),
1054 priv->_agn.reply_agg_tx_stats.bad_crc32);
1055 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1056 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK),
1057 priv->_agn.reply_agg_tx_stats.response);
1058 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1059 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK),
1060 priv->_agn.reply_agg_tx_stats.dump_tx);
1061 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1062 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK),
1063 priv->_agn.reply_agg_tx_stats.delay_tx);
1064 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
1065 priv->_agn.reply_agg_tx_stats.unknown);
1066
1067 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1068 kfree(buf);
1069 return ret;
1070}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
index bbdce5913ac7..f2573b5486cd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
@@ -39,6 +39,8 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
39 size_t count, loff_t *ppos); 39 size_t count, loff_t *ppos);
40ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf, 40ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf,
41 size_t count, loff_t *ppos); 41 size_t count, loff_t *ppos);
42ssize_t iwl_reply_tx_error_read(struct file *file, char __user *user_buf,
43 size_t count, loff_t *ppos);
42#else 44#else
43static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf, 45static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
44 size_t count, loff_t *ppos) 46 size_t count, loff_t *ppos)
@@ -60,4 +62,9 @@ static ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf,
60{ 62{
61 return 0; 63 return 0;
62} 64}
65static ssize_t iwl_reply_tx_error_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 return 0;
69}
63#endif 70#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 75b901b3eb1e..d86902b83630 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -37,12 +37,13 @@
37#include "iwl-io.h" 37#include "iwl-io.h"
38#include "iwl-agn.h" 38#include "iwl-agn.h"
39 39
40int iwlagn_send_rxon_assoc(struct iwl_priv *priv) 40int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
41 struct iwl_rxon_context *ctx)
41{ 42{
42 int ret = 0; 43 int ret = 0;
43 struct iwl5000_rxon_assoc_cmd rxon_assoc; 44 struct iwl5000_rxon_assoc_cmd rxon_assoc;
44 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; 45 const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
45 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; 46 const struct iwl_rxon_cmd *rxon2 = &ctx->active;
46 47
47 if ((rxon1->flags == rxon2->flags) && 48 if ((rxon1->flags == rxon2->flags) &&
48 (rxon1->filter_flags == rxon2->filter_flags) && 49 (rxon1->filter_flags == rxon2->filter_flags) &&
@@ -60,23 +61,23 @@ int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
60 return 0; 61 return 0;
61 } 62 }
62 63
63 rxon_assoc.flags = priv->staging_rxon.flags; 64 rxon_assoc.flags = ctx->staging.flags;
64 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; 65 rxon_assoc.filter_flags = ctx->staging.filter_flags;
65 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; 66 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
66 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; 67 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
67 rxon_assoc.reserved1 = 0; 68 rxon_assoc.reserved1 = 0;
68 rxon_assoc.reserved2 = 0; 69 rxon_assoc.reserved2 = 0;
69 rxon_assoc.reserved3 = 0; 70 rxon_assoc.reserved3 = 0;
70 rxon_assoc.ofdm_ht_single_stream_basic_rates = 71 rxon_assoc.ofdm_ht_single_stream_basic_rates =
71 priv->staging_rxon.ofdm_ht_single_stream_basic_rates; 72 ctx->staging.ofdm_ht_single_stream_basic_rates;
72 rxon_assoc.ofdm_ht_dual_stream_basic_rates = 73 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
73 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; 74 ctx->staging.ofdm_ht_dual_stream_basic_rates;
74 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; 75 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
75 rxon_assoc.ofdm_ht_triple_stream_basic_rates = 76 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
76 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates; 77 ctx->staging.ofdm_ht_triple_stream_basic_rates;
77 rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data; 78 rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
78 79
79 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, 80 ret = iwl_send_cmd_pdu_async(priv, ctx->rxon_assoc_cmd,
80 sizeof(rxon_assoc), &rxon_assoc, NULL); 81 sizeof(rxon_assoc), &rxon_assoc, NULL);
81 if (ret) 82 if (ret)
82 return ret; 83 return ret;
@@ -184,7 +185,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
184 int ret; 185 int ret;
185 186
186 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 187 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
187 iwl_is_associated(priv)) { 188 iwl_is_any_associated(priv)) {
188 struct iwl_calib_chain_noise_reset_cmd cmd; 189 struct iwl_calib_chain_noise_reset_cmd cmd;
189 190
190 /* clear data for chain noise calibration algorithm */ 191 /* clear data for chain noise calibration algorithm */
@@ -235,13 +236,13 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
235 /* data from PHY/DSP regarding signal strength, etc., 236 /* data from PHY/DSP regarding signal strength, etc.,
236 * contents are always there, not configurable by host 237 * contents are always there, not configurable by host
237 */ 238 */
238 struct iwl5000_non_cfg_phy *ncphy = 239 struct iwlagn_non_cfg_phy *ncphy =
239 (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf; 240 (struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
240 u32 val, rssi_a, rssi_b, rssi_c, max_rssi; 241 u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
241 u8 agc; 242 u8 agc;
242 243
243 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]); 244 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]);
244 agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS; 245 agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS;
245 246
246 /* Find max rssi among 3 possible receivers. 247 /* Find max rssi among 3 possible receivers.
247 * These values are measured by the digital signal processor (DSP). 248 * These values are measured by the digital signal processor (DSP).
@@ -249,11 +250,14 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
249 * if the radio's automatic gain control (AGC) is working right. 250 * if the radio's automatic gain control (AGC) is working right.
250 * AGC value (see below) will provide the "interesting" info. 251 * AGC value (see below) will provide the "interesting" info.
251 */ 252 */
252 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]); 253 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]);
253 rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS; 254 rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >>
254 rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS; 255 IWLAGN_OFDM_RSSI_A_BIT_POS;
255 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]); 256 rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >>
256 rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS; 257 IWLAGN_OFDM_RSSI_B_BIT_POS;
258 val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]);
259 rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >>
260 IWLAGN_OFDM_RSSI_C_BIT_POS;
257 261
258 max_rssi = max_t(u32, rssi_a, rssi_b); 262 max_rssi = max_t(u32, rssi_a, rssi_b);
259 max_rssi = max_t(u32, max_rssi, rssi_c); 263 max_rssi = max_t(u32, max_rssi, rssi_c);
@@ -266,12 +270,109 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
266 return max_rssi - agc - IWLAGN_RSSI_OFFSET; 270 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
267} 271}
268 272
273static int iwlagn_set_pan_params(struct iwl_priv *priv)
274{
275 struct iwl_wipan_params_cmd cmd;
276 struct iwl_rxon_context *ctx_bss, *ctx_pan;
277 int slot0 = 300, slot1 = 0;
278 int ret;
279
280 if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
281 return 0;
282
283 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
284
285 lockdep_assert_held(&priv->mutex);
286
287 ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
288 ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
289
290 /*
291 * If the PAN context is inactive, then we don't need
292 * to update the PAN parameters, the last thing we'll
293 * have done before it goes inactive is making the PAN
294 * parameters be WLAN-only.
295 */
296 if (!ctx_pan->is_active)
297 return 0;
298
299 memset(&cmd, 0, sizeof(cmd));
300
301 /* only 2 slots are currently allowed */
302 cmd.num_slots = 2;
303
304 cmd.slots[0].type = 0; /* BSS */
305 cmd.slots[1].type = 1; /* PAN */
306
307 if (ctx_bss->vif && ctx_pan->vif) {
308 int bcnint = ctx_pan->vif->bss_conf.beacon_int;
309
310 /* should be set, but seems unused?? */
311 cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
312
313 if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
314 bcnint &&
315 bcnint != ctx_bss->vif->bss_conf.beacon_int) {
316 IWL_ERR(priv,
317 "beacon intervals don't match (%d, %d)\n",
318 ctx_bss->vif->bss_conf.beacon_int,
319 ctx_pan->vif->bss_conf.beacon_int);
320 } else
321 bcnint = max_t(int, bcnint,
322 ctx_bss->vif->bss_conf.beacon_int);
323 if (!bcnint)
324 bcnint = DEFAULT_BEACON_INTERVAL;
325 slot0 = bcnint / 2;
326 slot1 = bcnint - slot0;
327
328 if (test_bit(STATUS_SCAN_HW, &priv->status) ||
329 (!ctx_bss->vif->bss_conf.idle &&
330 !ctx_bss->vif->bss_conf.assoc)) {
331 slot0 = bcnint * 3 - 20;
332 slot1 = 20;
333 } else if (!ctx_pan->vif->bss_conf.idle &&
334 !ctx_pan->vif->bss_conf.assoc) {
335 slot1 = bcnint * 3 - 20;
336 slot0 = 20;
337 }
338 } else if (ctx_pan->vif) {
339 slot0 = 0;
340 slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
341 ctx_pan->vif->bss_conf.beacon_int;
342 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
343
344 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
345 slot0 = slot1 * 3 - 20;
346 slot1 = 20;
347 }
348 }
349
350 cmd.slots[0].width = cpu_to_le16(slot0);
351 cmd.slots[1].width = cpu_to_le16(slot1);
352
353 ret = iwl_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, sizeof(cmd), &cmd);
354 if (ret)
355 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
356
357 return ret;
358}
359
269struct iwl_hcmd_ops iwlagn_hcmd = { 360struct iwl_hcmd_ops iwlagn_hcmd = {
270 .rxon_assoc = iwlagn_send_rxon_assoc, 361 .rxon_assoc = iwlagn_send_rxon_assoc,
271 .commit_rxon = iwl_commit_rxon, 362 .commit_rxon = iwl_commit_rxon,
272 .set_rxon_chain = iwl_set_rxon_chain, 363 .set_rxon_chain = iwl_set_rxon_chain,
273 .set_tx_ant = iwlagn_send_tx_ant_config, 364 .set_tx_ant = iwlagn_send_tx_ant_config,
274 .send_bt_config = iwl_send_bt_config, 365 .send_bt_config = iwl_send_bt_config,
366 .set_pan_params = iwlagn_set_pan_params,
367};
368
369struct iwl_hcmd_ops iwlagn_bt_hcmd = {
370 .rxon_assoc = iwlagn_send_rxon_assoc,
371 .commit_rxon = iwl_commit_rxon,
372 .set_rxon_chain = iwl_set_rxon_chain,
373 .set_tx_ant = iwlagn_send_tx_ant_config,
374 .send_bt_config = iwlagn_send_advance_bt_config,
375 .set_pan_params = iwlagn_set_pan_params,
275}; 376};
276 377
277struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = { 378struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 9dd9e64c2b0b..299fd9d59604 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -46,6 +46,181 @@ static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
46 tx_resp->frame_count) & MAX_SN; 46 tx_resp->frame_count) & MAX_SN;
47} 47}
48 48
49static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
50{
51 status &= TX_STATUS_MSK;
52
53 switch (status) {
54 case TX_STATUS_POSTPONE_DELAY:
55 priv->_agn.reply_tx_stats.pp_delay++;
56 break;
57 case TX_STATUS_POSTPONE_FEW_BYTES:
58 priv->_agn.reply_tx_stats.pp_few_bytes++;
59 break;
60 case TX_STATUS_POSTPONE_BT_PRIO:
61 priv->_agn.reply_tx_stats.pp_bt_prio++;
62 break;
63 case TX_STATUS_POSTPONE_QUIET_PERIOD:
64 priv->_agn.reply_tx_stats.pp_quiet_period++;
65 break;
66 case TX_STATUS_POSTPONE_CALC_TTAK:
67 priv->_agn.reply_tx_stats.pp_calc_ttak++;
68 break;
69 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
70 priv->_agn.reply_tx_stats.int_crossed_retry++;
71 break;
72 case TX_STATUS_FAIL_SHORT_LIMIT:
73 priv->_agn.reply_tx_stats.short_limit++;
74 break;
75 case TX_STATUS_FAIL_LONG_LIMIT:
76 priv->_agn.reply_tx_stats.long_limit++;
77 break;
78 case TX_STATUS_FAIL_FIFO_UNDERRUN:
79 priv->_agn.reply_tx_stats.fifo_underrun++;
80 break;
81 case TX_STATUS_FAIL_DRAIN_FLOW:
82 priv->_agn.reply_tx_stats.drain_flow++;
83 break;
84 case TX_STATUS_FAIL_RFKILL_FLUSH:
85 priv->_agn.reply_tx_stats.rfkill_flush++;
86 break;
87 case TX_STATUS_FAIL_LIFE_EXPIRE:
88 priv->_agn.reply_tx_stats.life_expire++;
89 break;
90 case TX_STATUS_FAIL_DEST_PS:
91 priv->_agn.reply_tx_stats.dest_ps++;
92 break;
93 case TX_STATUS_FAIL_HOST_ABORTED:
94 priv->_agn.reply_tx_stats.host_abort++;
95 break;
96 case TX_STATUS_FAIL_BT_RETRY:
97 priv->_agn.reply_tx_stats.bt_retry++;
98 break;
99 case TX_STATUS_FAIL_STA_INVALID:
100 priv->_agn.reply_tx_stats.sta_invalid++;
101 break;
102 case TX_STATUS_FAIL_FRAG_DROPPED:
103 priv->_agn.reply_tx_stats.frag_drop++;
104 break;
105 case TX_STATUS_FAIL_TID_DISABLE:
106 priv->_agn.reply_tx_stats.tid_disable++;
107 break;
108 case TX_STATUS_FAIL_FIFO_FLUSHED:
109 priv->_agn.reply_tx_stats.fifo_flush++;
110 break;
111 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
112 priv->_agn.reply_tx_stats.insuff_cf_poll++;
113 break;
114 case TX_STATUS_FAIL_PASSIVE_NO_RX:
115 priv->_agn.reply_tx_stats.fail_hw_drop++;
116 break;
117 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
118 priv->_agn.reply_tx_stats.sta_color_mismatch++;
119 break;
120 default:
121 priv->_agn.reply_tx_stats.unknown++;
122 break;
123 }
124}
125
126static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
127{
128 status &= AGG_TX_STATUS_MSK;
129
130 switch (status) {
131 case AGG_TX_STATE_UNDERRUN_MSK:
132 priv->_agn.reply_agg_tx_stats.underrun++;
133 break;
134 case AGG_TX_STATE_BT_PRIO_MSK:
135 priv->_agn.reply_agg_tx_stats.bt_prio++;
136 break;
137 case AGG_TX_STATE_FEW_BYTES_MSK:
138 priv->_agn.reply_agg_tx_stats.few_bytes++;
139 break;
140 case AGG_TX_STATE_ABORT_MSK:
141 priv->_agn.reply_agg_tx_stats.abort++;
142 break;
143 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
144 priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
145 break;
146 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
147 priv->_agn.reply_agg_tx_stats.last_sent_try++;
148 break;
149 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
150 priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
151 break;
152 case AGG_TX_STATE_SCD_QUERY_MSK:
153 priv->_agn.reply_agg_tx_stats.scd_query++;
154 break;
155 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
156 priv->_agn.reply_agg_tx_stats.bad_crc32++;
157 break;
158 case AGG_TX_STATE_RESPONSE_MSK:
159 priv->_agn.reply_agg_tx_stats.response++;
160 break;
161 case AGG_TX_STATE_DUMP_TX_MSK:
162 priv->_agn.reply_agg_tx_stats.dump_tx++;
163 break;
164 case AGG_TX_STATE_DELAY_TX_MSK:
165 priv->_agn.reply_agg_tx_stats.delay_tx++;
166 break;
167 default:
168 priv->_agn.reply_agg_tx_stats.unknown++;
169 break;
170 }
171}
172
173static void iwlagn_set_tx_status(struct iwl_priv *priv,
174 struct ieee80211_tx_info *info,
175 struct iwl5000_tx_resp *tx_resp,
176 int txq_id, bool is_agg)
177{
178 u16 status = le16_to_cpu(tx_resp->status.status);
179
180 info->status.rates[0].count = tx_resp->failure_frame + 1;
181 if (is_agg)
182 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
183 info->flags |= iwl_tx_status_to_mac80211(status);
184 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
185 info);
186 if (!iwl_is_tx_success(status))
187 iwlagn_count_tx_err_status(priv, status);
188
189 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
190 "0x%x retries %d\n",
191 txq_id,
192 iwl_get_tx_fail_reason(status), status,
193 le32_to_cpu(tx_resp->rate_n_flags),
194 tx_resp->failure_frame);
195}
196
197#ifdef CONFIG_IWLWIFI_DEBUG
198#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
199
200const char *iwl_get_agg_tx_fail_reason(u16 status)
201{
202 status &= AGG_TX_STATUS_MSK;
203 switch (status) {
204 case AGG_TX_STATE_TRANSMITTED:
205 return "SUCCESS";
206 AGG_TX_STATE_FAIL(UNDERRUN_MSK);
207 AGG_TX_STATE_FAIL(BT_PRIO_MSK);
208 AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
209 AGG_TX_STATE_FAIL(ABORT_MSK);
210 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
211 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
212 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
213 AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
214 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
215 AGG_TX_STATE_FAIL(RESPONSE_MSK);
216 AGG_TX_STATE_FAIL(DUMP_TX_MSK);
217 AGG_TX_STATE_FAIL(DELAY_TX_MSK);
218 }
219
220 return "UNKNOWN";
221}
222#endif /* CONFIG_IWLWIFI_DEBUG */
223
49static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv, 224static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
50 struct iwl_ht_agg *agg, 225 struct iwl_ht_agg *agg,
51 struct iwl5000_tx_resp *tx_resp, 226 struct iwl5000_tx_resp *tx_resp,
@@ -53,9 +228,7 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
53{ 228{
54 u16 status; 229 u16 status;
55 struct agg_tx_status *frame_status = &tx_resp->status; 230 struct agg_tx_status *frame_status = &tx_resp->status;
56 struct ieee80211_tx_info *info = NULL;
57 struct ieee80211_hdr *hdr = NULL; 231 struct ieee80211_hdr *hdr = NULL;
58 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
59 int i, sh, idx; 232 int i, sh, idx;
60 u16 seq; 233 u16 seq;
61 234
@@ -64,31 +237,20 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
64 237
65 agg->frame_count = tx_resp->frame_count; 238 agg->frame_count = tx_resp->frame_count;
66 agg->start_idx = start_idx; 239 agg->start_idx = start_idx;
67 agg->rate_n_flags = rate_n_flags; 240 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
68 agg->bitmap = 0; 241 agg->bitmap = 0;
69 242
70 /* # frames attempted by Tx command */ 243 /* # frames attempted by Tx command */
71 if (agg->frame_count == 1) { 244 if (agg->frame_count == 1) {
72 /* Only one frame was attempted; no block-ack will arrive */ 245 /* Only one frame was attempted; no block-ack will arrive */
73 status = le16_to_cpu(frame_status[0].status);
74 idx = start_idx; 246 idx = start_idx;
75 247
76 /* FIXME: code repetition */
77 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", 248 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
78 agg->frame_count, agg->start_idx, idx); 249 agg->frame_count, agg->start_idx, idx);
79 250 iwlagn_set_tx_status(priv,
80 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); 251 IEEE80211_SKB_CB(
81 info->status.rates[0].count = tx_resp->failure_frame + 1; 252 priv->txq[txq_id].txb[idx].skb),
82 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 253 tx_resp, txq_id, true);
83 info->flags |= iwl_tx_status_to_mac80211(status);
84 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
85
86 /* FIXME: code repetition end */
87
88 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
89 status & 0xff, tx_resp->failure_frame);
90 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
91
92 agg->wait_for_ba = 0; 254 agg->wait_for_ba = 0;
93 } else { 255 } else {
94 /* Two or more frames were attempted; expect block-ack */ 256 /* Two or more frames were attempted; expect block-ack */
@@ -109,12 +271,20 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
109 idx = SEQ_TO_INDEX(seq); 271 idx = SEQ_TO_INDEX(seq);
110 txq_id = SEQ_TO_QUEUE(seq); 272 txq_id = SEQ_TO_QUEUE(seq);
111 273
274 if (status & AGG_TX_STATUS_MSK)
275 iwlagn_count_agg_tx_err_status(priv, status);
276
112 if (status & (AGG_TX_STATE_FEW_BYTES_MSK | 277 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
113 AGG_TX_STATE_ABORT_MSK)) 278 AGG_TX_STATE_ABORT_MSK))
114 continue; 279 continue;
115 280
116 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", 281 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
117 agg->frame_count, txq_id, idx); 282 agg->frame_count, txq_id, idx);
283 IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
284 "try-count (0x%08x)\n",
285 iwl_get_agg_tx_fail_reason(status),
286 status & AGG_TX_STATUS_MSK,
287 status & AGG_TX_TRY_MSK);
118 288
119 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 289 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
120 if (!hdr) { 290 if (!hdr) {
@@ -247,7 +417,14 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
247 struct iwl_ht_agg *agg; 417 struct iwl_ht_agg *agg;
248 418
249 agg = &priv->stations[sta_id].tid[tid].agg; 419 agg = &priv->stations[sta_id].tid[tid].agg;
250 420 /*
421 * If the BT kill count is non-zero, we'll get this
422 * notification again.
423 */
424 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
425 priv->cfg->advanced_bt_coexist) {
426 IWL_WARN(priv, "receive reply tx with bt_kill\n");
427 }
251 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); 428 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
252 429
253 /* check if BAR is needed */ 430 /* check if BAR is needed */
@@ -274,20 +451,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
274 } 451 }
275 } else { 452 } else {
276 BUG_ON(txq_id != txq->swq_id); 453 BUG_ON(txq_id != txq->swq_id);
277 454 iwlagn_set_tx_status(priv, info, tx_resp, txq_id, false);
278 info->status.rates[0].count = tx_resp->failure_frame + 1;
279 info->flags |= iwl_tx_status_to_mac80211(status);
280 iwlagn_hwrate_to_tx_control(priv,
281 le32_to_cpu(tx_resp->rate_n_flags),
282 info);
283
284 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
285 "0x%x retries %d\n",
286 txq_id,
287 iwl_get_tx_fail_reason(status), status,
288 le32_to_cpu(tx_resp->rate_n_flags),
289 tx_resp->failure_frame);
290
291 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 455 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
292 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 456 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
293 457
@@ -1098,7 +1262,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1098 if (chan->band != band) 1262 if (chan->band != band)
1099 continue; 1263 continue;
1100 1264
1101 channel = ieee80211_frequency_to_channel(chan->center_freq); 1265 channel = chan->hw_value;
1102 scan_ch->channel = cpu_to_le16(channel); 1266 scan_ch->channel = cpu_to_le16(channel);
1103 1267
1104 ch_info = iwl_get_channel_info(priv, band, channel); 1268 ch_info = iwl_get_channel_info(priv, band, channel);
@@ -1147,7 +1311,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1147 return added; 1311 return added;
1148} 1312}
1149 1313
1150void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) 1314int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1151{ 1315{
1152 struct iwl_host_cmd cmd = { 1316 struct iwl_host_cmd cmd = {
1153 .id = REPLY_SCAN_CMD, 1317 .id = REPLY_SCAN_CMD,
@@ -1155,7 +1319,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1155 .flags = CMD_SIZE_HUGE, 1319 .flags = CMD_SIZE_HUGE,
1156 }; 1320 };
1157 struct iwl_scan_cmd *scan; 1321 struct iwl_scan_cmd *scan;
1158 struct ieee80211_conf *conf = NULL; 1322 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1159 u32 rate_flags = 0; 1323 u32 rate_flags = 0;
1160 u16 cmd_len; 1324 u16 cmd_len;
1161 u16 rx_chain = 0; 1325 u16 rx_chain = 0;
@@ -1167,48 +1331,12 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1167 int chan_mod; 1331 int chan_mod;
1168 u8 active_chains; 1332 u8 active_chains;
1169 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; 1333 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
1334 int ret;
1170 1335
1171 conf = ieee80211_get_hw_conf(priv->hw); 1336 lockdep_assert_held(&priv->mutex);
1172
1173 cancel_delayed_work(&priv->scan_check);
1174
1175 if (!iwl_is_ready(priv)) {
1176 IWL_WARN(priv, "request scan called when driver not ready.\n");
1177 goto done;
1178 }
1179
1180 /* Make sure the scan wasn't canceled before this queued work
1181 * was given the chance to run... */
1182 if (!test_bit(STATUS_SCANNING, &priv->status))
1183 goto done;
1184
1185 /* This should never be called or scheduled if there is currently
1186 * a scan active in the hardware. */
1187 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1188 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
1189 "Ignoring second request.\n");
1190 goto done;
1191 }
1192
1193 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1194 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
1195 goto done;
1196 }
1197
1198 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1199 IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
1200 goto done;
1201 }
1202
1203 if (iwl_is_rfkill(priv)) {
1204 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
1205 goto done;
1206 }
1207 1337
1208 if (!test_bit(STATUS_READY, &priv->status)) { 1338 if (vif)
1209 IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n"); 1339 ctx = iwl_rxon_ctx_from_vif(vif);
1210 goto done;
1211 }
1212 1340
1213 if (!priv->scan_cmd) { 1341 if (!priv->scan_cmd) {
1214 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) + 1342 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
@@ -1216,7 +1344,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1216 if (!priv->scan_cmd) { 1344 if (!priv->scan_cmd) {
1217 IWL_DEBUG_SCAN(priv, 1345 IWL_DEBUG_SCAN(priv,
1218 "fail to allocate memory for scan\n"); 1346 "fail to allocate memory for scan\n");
1219 goto done; 1347 return -ENOMEM;
1220 } 1348 }
1221 } 1349 }
1222 scan = priv->scan_cmd; 1350 scan = priv->scan_cmd;
@@ -1225,7 +1353,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1225 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 1353 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
1226 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 1354 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
1227 1355
1228 if (iwl_is_associated(priv)) { 1356 if (iwl_is_any_associated(priv)) {
1229 u16 interval = 0; 1357 u16 interval = 0;
1230 u32 extra; 1358 u32 extra;
1231 u32 suspend_time = 100; 1359 u32 suspend_time = 100;
@@ -1276,13 +1404,15 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1276 IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); 1404 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
1277 1405
1278 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 1406 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
1279 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id; 1407 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
1280 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 1408 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1281 1409
1282 switch (priv->scan_band) { 1410 switch (priv->scan_band) {
1283 case IEEE80211_BAND_2GHZ: 1411 case IEEE80211_BAND_2GHZ:
1284 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 1412 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
1285 chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK) 1413 chan_mod = le32_to_cpu(
1414 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
1415 RXON_FLG_CHANNEL_MODE_MSK)
1286 >> RXON_FLG_CHANNEL_MODE_POS; 1416 >> RXON_FLG_CHANNEL_MODE_POS;
1287 if (chan_mod == CHANNEL_MODE_PURE_40) { 1417 if (chan_mod == CHANNEL_MODE_PURE_40) {
1288 rate = IWL_RATE_6M_PLCP; 1418 rate = IWL_RATE_6M_PLCP;
@@ -1290,6 +1420,12 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1290 rate = IWL_RATE_1M_PLCP; 1420 rate = IWL_RATE_1M_PLCP;
1291 rate_flags = RATE_MCS_CCK_MSK; 1421 rate_flags = RATE_MCS_CCK_MSK;
1292 } 1422 }
1423 /*
1424 * Internal scans are passive, so we can indiscriminately set
1425 * the BT ignore flag on 2.4 GHz since it applies to TX only.
1426 */
1427 if (priv->cfg->advanced_bt_coexist)
1428 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
1293 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED; 1429 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
1294 break; 1430 break;
1295 case IEEE80211_BAND_5GHZ: 1431 case IEEE80211_BAND_5GHZ:
@@ -1315,8 +1451,8 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1315 IWL_GOOD_CRC_TH_NEVER; 1451 IWL_GOOD_CRC_TH_NEVER;
1316 break; 1452 break;
1317 default: 1453 default:
1318 IWL_WARN(priv, "Invalid scan band count\n"); 1454 IWL_WARN(priv, "Invalid scan band\n");
1319 goto done; 1455 return -EIO;
1320 } 1456 }
1321 1457
1322 band = priv->scan_band; 1458 band = priv->scan_band;
@@ -1327,6 +1463,12 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1327 if (priv->cfg->scan_tx_antennas[band]) 1463 if (priv->cfg->scan_tx_antennas[band])
1328 scan_tx_antennas = priv->cfg->scan_tx_antennas[band]; 1464 scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
1329 1465
1466 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
1467 /* operated as 1x1 in full concurrency mode */
1468 scan_tx_antennas =
1469 first_antenna(priv->cfg->scan_tx_antennas[band]);
1470 }
1471
1330 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band], 1472 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
1331 scan_tx_antennas); 1473 scan_tx_antennas);
1332 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); 1474 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
@@ -1345,6 +1487,11 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1345 1487
1346 rx_ant = first_antenna(active_chains); 1488 rx_ant = first_antenna(active_chains);
1347 } 1489 }
1490 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
1491 /* operated as 1x1 in full concurrency mode */
1492 rx_ant = first_antenna(rx_ant);
1493 }
1494
1348 /* MIMO is not used here, but value is required */ 1495 /* MIMO is not used here, but value is required */
1349 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; 1496 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1350 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 1497 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
@@ -1385,7 +1532,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1385 } 1532 }
1386 if (scan->channel_count == 0) { 1533 if (scan->channel_count == 0) {
1387 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); 1534 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1388 goto done; 1535 return -EIO;
1389 } 1536 }
1390 1537
1391 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 1538 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
@@ -1393,25 +1540,21 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1393 cmd.data = scan; 1540 cmd.data = scan;
1394 scan->len = cpu_to_le16(cmd.len); 1541 scan->len = cpu_to_le16(cmd.len);
1395 1542
1543 if (priv->cfg->ops->hcmd->set_pan_params) {
1544 ret = priv->cfg->ops->hcmd->set_pan_params(priv);
1545 if (ret)
1546 return ret;
1547 }
1548
1396 set_bit(STATUS_SCAN_HW, &priv->status); 1549 set_bit(STATUS_SCAN_HW, &priv->status);
1397 if (iwl_send_cmd_sync(priv, &cmd)) 1550 ret = iwl_send_cmd_sync(priv, &cmd);
1398 goto done; 1551 if (ret) {
1552 clear_bit(STATUS_SCAN_HW, &priv->status);
1553 if (priv->cfg->ops->hcmd->set_pan_params)
1554 priv->cfg->ops->hcmd->set_pan_params(priv);
1555 }
1399 1556
1400 queue_delayed_work(priv->workqueue, &priv->scan_check, 1557 return ret;
1401 IWL_SCAN_CHECK_WATCHDOG);
1402
1403 return;
1404
1405 done:
1406 /* Cannot perform scan. Make sure we clear scanning
1407 * bits from status so next scan request can be performed.
1408 * If we don't clear scanning status bit here all next scan
1409 * will fail
1410 */
1411 clear_bit(STATUS_SCAN_HW, &priv->status);
1412 clear_bit(STATUS_SCANNING, &priv->status);
1413 /* inform mac80211 scan aborted */
1414 queue_work(priv->workqueue, &priv->scan_completed);
1415} 1558}
1416 1559
1417int iwlagn_manage_ibss_station(struct iwl_priv *priv, 1560int iwlagn_manage_ibss_station(struct iwl_priv *priv,
@@ -1420,7 +1563,8 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1420 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1563 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1421 1564
1422 if (add) 1565 if (add)
1423 return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true, 1566 return iwl_add_bssid_station(priv, vif_priv->ctx,
1567 vif->bss_conf.bssid, true,
1424 &vif_priv->ibss_bssid_sta_id); 1568 &vif_priv->ibss_bssid_sta_id);
1425 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, 1569 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1426 vif->bss_conf.bssid); 1570 vif->bss_conf.bssid);
@@ -1453,7 +1597,7 @@ int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
1453 1597
1454 /* waiting for all the tx frames complete might take a while */ 1598 /* waiting for all the tx frames complete might take a while */
1455 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { 1599 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1456 if (cnt == IWL_CMD_QUEUE_NUM) 1600 if (cnt == priv->cmd_queue)
1457 continue; 1601 continue;
1458 txq = &priv->txq[cnt]; 1602 txq = &priv->txq[cnt];
1459 q = &txq->q; 1603 q = &txq->q;
@@ -1518,3 +1662,379 @@ done:
1518 ieee80211_wake_queues(priv->hw); 1662 ieee80211_wake_queues(priv->hw);
1519 mutex_unlock(&priv->mutex); 1663 mutex_unlock(&priv->mutex);
1520} 1664}
1665
1666/*
1667 * BT coex
1668 */
1669/*
1670 * Macros to access the lookup table.
1671 *
1672 * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
1673* wifi_prio, wifi_txrx and wifi_sh_ant_req.
1674 *
1675 * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
1676 *
1677 * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
1678 * one after another in 32-bit registers, and "registers" 0 through 7 contain
1679 * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
1680 *
1681 * These macros encode that format.
1682 */
1683#define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
1684 wifi_txrx, wifi_sh_ant_req) \
1685 (bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
1686 (wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
1687
1688#define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
1689 lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
1690#define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1691 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1692 (!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
1693 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1694 wifi_sh_ant_req))))
1695#define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1696 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1697 LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
1698 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1699 wifi_sh_ant_req))
1700#define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
1701 wifi_req, wifi_prio, wifi_txrx, \
1702 wifi_sh_ant_req) \
1703 LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
1704 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1705 wifi_sh_ant_req))
1706
1707#define LUT_WLAN_KILL_OP(lut, op, val) \
1708 lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
1709#define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1710 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1711 (!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1712 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
1713#define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1714 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1715 LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1716 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1717#define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1718 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1719 LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1720 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1721
1722#define LUT_ANT_SWITCH_OP(lut, op, val) \
1723 lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
1724#define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1725 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1726 (!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1727 wifi_req, wifi_prio, wifi_txrx, \
1728 wifi_sh_ant_req))))
1729#define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1730 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1731 LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1732 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1733#define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1734 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1735 LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1736 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1737
1738static const __le32 iwlagn_def_3w_lookup[12] = {
1739 cpu_to_le32(0xaaaaaaaa),
1740 cpu_to_le32(0xaaaaaaaa),
1741 cpu_to_le32(0xaeaaaaaa),
1742 cpu_to_le32(0xaaaaaaaa),
1743 cpu_to_le32(0xcc00ff28),
1744 cpu_to_le32(0x0000aaaa),
1745 cpu_to_le32(0xcc00aaaa),
1746 cpu_to_le32(0x0000aaaa),
1747 cpu_to_le32(0xc0004000),
1748 cpu_to_le32(0x00004000),
1749 cpu_to_le32(0xf0005000),
1750 cpu_to_le32(0xf0004000),
1751};
1752
1753static const __le32 iwlagn_concurrent_lookup[12] = {
1754 cpu_to_le32(0xaaaaaaaa),
1755 cpu_to_le32(0xaaaaaaaa),
1756 cpu_to_le32(0xaaaaaaaa),
1757 cpu_to_le32(0xaaaaaaaa),
1758 cpu_to_le32(0xaaaaaaaa),
1759 cpu_to_le32(0xaaaaaaaa),
1760 cpu_to_le32(0xaaaaaaaa),
1761 cpu_to_le32(0xaaaaaaaa),
1762 cpu_to_le32(0x00000000),
1763 cpu_to_le32(0x00000000),
1764 cpu_to_le32(0x00000000),
1765 cpu_to_le32(0x00000000),
1766};
1767
1768void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1769{
1770 struct iwlagn_bt_cmd bt_cmd = {
1771 .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
1772 .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
1773 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
1774 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
1775 };
1776
1777 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
1778 sizeof(bt_cmd.bt3_lookup_table));
1779
1780 bt_cmd.prio_boost = priv->cfg->bt_prio_boost;
1781 bt_cmd.kill_ack_mask = priv->kill_ack_mask;
1782 bt_cmd.kill_cts_mask = priv->kill_cts_mask;
1783 bt_cmd.valid = priv->bt_valid;
1784 bt_cmd.tx_prio_boost = 0;
1785 bt_cmd.rx_prio_boost = 0;
1786
1787 /*
1788 * Configure BT coex mode to "no coexistence" when the
1789 * user disabled BT coexistence, we have no interface
1790 * (might be in monitor mode), or the interface is in
1791 * IBSS mode (no proper uCode support for coex then).
1792 */
1793 if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1794 bt_cmd.flags = 0;
1795 } else {
1796 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
1797 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
1798 if (priv->bt_ch_announce)
1799 bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
1800 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags);
1801 }
1802 if (priv->bt_full_concurrent)
1803 memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup,
1804 sizeof(iwlagn_concurrent_lookup));
1805 else
1806 memcpy(bt_cmd.bt3_lookup_table, iwlagn_def_3w_lookup,
1807 sizeof(iwlagn_def_3w_lookup));
1808
1809 IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
1810 bt_cmd.flags ? "active" : "disabled",
1811 priv->bt_full_concurrent ?
1812 "full concurrency" : "3-wire");
1813
1814 if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd))
1815 IWL_ERR(priv, "failed to send BT Coex Config\n");
1816
1817 /*
1818 * When we are doing a restart, need to also reconfigure BT
1819 * SCO to the device. If not doing a restart, bt_sco_active
1820 * will always be false, so there's no need to have an extra
1821 * variable to check for it.
1822 */
1823 if (priv->bt_sco_active) {
1824 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
1825
1826 if (priv->bt_sco_active)
1827 sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
1828 if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_SCO,
1829 sizeof(sco_cmd), &sco_cmd))
1830 IWL_ERR(priv, "failed to send BT SCO command\n");
1831 }
1832}
1833
1834static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1835{
1836 struct iwl_priv *priv =
1837 container_of(work, struct iwl_priv, bt_traffic_change_work);
1838 struct iwl_rxon_context *ctx;
1839 int smps_request = -1;
1840
1841 IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
1842 priv->bt_traffic_load);
1843
1844 switch (priv->bt_traffic_load) {
1845 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1846 smps_request = IEEE80211_SMPS_AUTOMATIC;
1847 break;
1848 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1849 smps_request = IEEE80211_SMPS_DYNAMIC;
1850 break;
1851 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1852 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1853 smps_request = IEEE80211_SMPS_STATIC;
1854 break;
1855 default:
1856 IWL_ERR(priv, "Invalid BT traffic load: %d\n",
1857 priv->bt_traffic_load);
1858 break;
1859 }
1860
1861 mutex_lock(&priv->mutex);
1862
1863 if (priv->cfg->ops->lib->update_chain_flags)
1864 priv->cfg->ops->lib->update_chain_flags(priv);
1865
1866 if (smps_request != -1) {
1867 for_each_context(priv, ctx) {
1868 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
1869 ieee80211_request_smps(ctx->vif, smps_request);
1870 }
1871 }
1872
1873 mutex_unlock(&priv->mutex);
1874}
1875
1876static void iwlagn_print_uartmsg(struct iwl_priv *priv,
1877 struct iwl_bt_uart_msg *uart_msg)
1878{
1879 IWL_DEBUG_NOTIF(priv, "Message Type = 0x%X, SSN = 0x%X, "
1880 "Update Req = 0x%X",
1881 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
1882 BT_UART_MSG_FRAME1MSGTYPE_POS,
1883 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
1884 BT_UART_MSG_FRAME1SSN_POS,
1885 (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
1886 BT_UART_MSG_FRAME1UPDATEREQ_POS);
1887
1888 IWL_DEBUG_NOTIF(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
1889 "Chl_SeqN = 0x%X, In band = 0x%X",
1890 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
1891 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
1892 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
1893 BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
1894 (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
1895 BT_UART_MSG_FRAME2CHLSEQN_POS,
1896 (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
1897 BT_UART_MSG_FRAME2INBAND_POS);
1898
1899 IWL_DEBUG_NOTIF(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
1900 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
1901 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
1902 BT_UART_MSG_FRAME3SCOESCO_POS,
1903 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
1904 BT_UART_MSG_FRAME3SNIFF_POS,
1905 (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
1906 BT_UART_MSG_FRAME3A2DP_POS,
1907 (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
1908 BT_UART_MSG_FRAME3ACL_POS,
1909 (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
1910 BT_UART_MSG_FRAME3MASTER_POS,
1911 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
1912 BT_UART_MSG_FRAME3OBEX_POS);
1913
1914 IWL_DEBUG_NOTIF(priv, "Idle duration = 0x%X",
1915 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
1916 BT_UART_MSG_FRAME4IDLEDURATION_POS);
1917
1918 IWL_DEBUG_NOTIF(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
1919 "eSCO Retransmissions = 0x%X",
1920 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
1921 BT_UART_MSG_FRAME5TXACTIVITY_POS,
1922 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
1923 BT_UART_MSG_FRAME5RXACTIVITY_POS,
1924 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
1925 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
1926
1927 IWL_DEBUG_NOTIF(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
1928 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
1929 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
1930 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
1931 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
1932
1933 IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = "
1934 "0x%X, Connectable = 0x%X",
1935 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
1936 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
1937 (BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK & uart_msg->frame7) >>
1938 BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS,
1939 (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
1940 BT_UART_MSG_FRAME7CONNECTABLE_POS);
1941}
1942
1943static void iwlagn_set_kill_ack_msk(struct iwl_priv *priv,
1944 struct iwl_bt_uart_msg *uart_msg)
1945{
1946 u8 kill_ack_msk;
1947 __le32 bt_kill_ack_msg[2] = {
1948 cpu_to_le32(0xFFFFFFF), cpu_to_le32(0xFFFFFC00) };
1949
1950 kill_ack_msk = (((BT_UART_MSG_FRAME3A2DP_MSK |
1951 BT_UART_MSG_FRAME3SNIFF_MSK |
1952 BT_UART_MSG_FRAME3SCOESCO_MSK) &
1953 uart_msg->frame3) == 0) ? 1 : 0;
1954 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_ack_msk]) {
1955 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
1956 priv->kill_ack_mask = bt_kill_ack_msg[kill_ack_msk];
1957 /* schedule to send runtime bt_config */
1958 queue_work(priv->workqueue, &priv->bt_runtime_config);
1959 }
1960
1961}
1962
1963void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
1964 struct iwl_rx_mem_buffer *rxb)
1965{
1966 unsigned long flags;
1967 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1968 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
1969 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
1970 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
1971 u8 last_traffic_load;
1972
1973 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
1974 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
1975 IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load);
1976 IWL_DEBUG_NOTIF(priv, " CI compliance: %d\n",
1977 coex->bt_ci_compliance);
1978 iwlagn_print_uartmsg(priv, uart_msg);
1979
1980 last_traffic_load = priv->notif_bt_traffic_load;
1981 priv->notif_bt_traffic_load = coex->bt_traffic_load;
1982 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
1983 if (priv->bt_status != coex->bt_status ||
1984 last_traffic_load != coex->bt_traffic_load) {
1985 if (coex->bt_status) {
1986 /* BT on */
1987 if (!priv->bt_ch_announce)
1988 priv->bt_traffic_load =
1989 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1990 else
1991 priv->bt_traffic_load =
1992 coex->bt_traffic_load;
1993 } else {
1994 /* BT off */
1995 priv->bt_traffic_load =
1996 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
1997 }
1998 priv->bt_status = coex->bt_status;
1999 queue_work(priv->workqueue,
2000 &priv->bt_traffic_change_work);
2001 }
2002 if (priv->bt_sco_active !=
2003 (uart_msg->frame3 & BT_UART_MSG_FRAME3SCOESCO_MSK)) {
2004 priv->bt_sco_active = uart_msg->frame3 &
2005 BT_UART_MSG_FRAME3SCOESCO_MSK;
2006 if (priv->bt_sco_active)
2007 sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
2008 iwl_send_cmd_pdu_async(priv, REPLY_BT_COEX_SCO,
2009 sizeof(sco_cmd), &sco_cmd, NULL);
2010 }
2011 }
2012
2013 iwlagn_set_kill_ack_msk(priv, uart_msg);
2014
2015 /* FIXME: based on notification, adjust the prio_boost */
2016
2017 spin_lock_irqsave(&priv->lock, flags);
2018 priv->bt_ci_compliance = coex->bt_ci_compliance;
2019 spin_unlock_irqrestore(&priv->lock, flags);
2020}
2021
2022void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
2023{
2024 iwlagn_rx_handler_setup(priv);
2025 priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
2026 iwlagn_bt_coex_profile_notif;
2027}
2028
2029void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
2030{
2031 iwlagn_setup_deferred_work(priv);
2032
2033 INIT_WORK(&priv->bt_traffic_change_work,
2034 iwlagn_bt_traffic_change_work);
2035}
2036
2037void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
2038{
2039 cancel_work_sync(&priv->bt_traffic_change_work);
2040}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 23e5c42e7d7e..57629fba3a7d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -82,6 +82,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
82 struct iwl_lq_sta *lq_sta); 82 struct iwl_lq_sta *lq_sta);
83static void rs_fill_link_cmd(struct iwl_priv *priv, 83static void rs_fill_link_cmd(struct iwl_priv *priv,
84 struct iwl_lq_sta *lq_sta, u32 rate_n_flags); 84 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
85static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
85 86
86 87
87#ifdef CONFIG_MAC80211_DEBUGFS 88#ifdef CONFIG_MAC80211_DEBUGFS
@@ -300,7 +301,19 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
300 struct ieee80211_sta *sta) 301 struct ieee80211_sta *sta)
301{ 302{
302 int ret = -EAGAIN; 303 int ret = -EAGAIN;
303 u32 load = rs_tl_get_load(lq_data, tid); 304 u32 load;
305
306 /*
307 * Don't create TX aggregation sessions when in high
308 * BT traffic, as they would just be disrupted by BT.
309 */
310 if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
311 IWL_ERR(priv, "BT traffic (%d), no aggregation allowed\n",
312 priv->bt_traffic_load);
313 return ret;
314 }
315
316 load = rs_tl_get_load(lq_data, tid);
304 317
305 if (load > IWL_AGG_LOAD_THRESHOLD) { 318 if (load > IWL_AGG_LOAD_THRESHOLD) {
306 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 319 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
@@ -502,6 +515,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
502 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags); 515 u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
503 u8 mcs; 516 u8 mcs;
504 517
518 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
505 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); 519 *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
506 520
507 if (*rate_idx == IWL_RATE_INVALID) { 521 if (*rate_idx == IWL_RATE_INVALID) {
@@ -588,11 +602,13 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
588 * Green-field mode is valid if the station supports it and 602 * Green-field mode is valid if the station supports it and
589 * there are no non-GF stations present in the BSS. 603 * there are no non-GF stations present in the BSS.
590 */ 604 */
591static inline u8 rs_use_green(struct ieee80211_sta *sta, 605static bool rs_use_green(struct ieee80211_sta *sta)
592 struct iwl_ht_config *ht_conf)
593{ 606{
607 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
608 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
609
594 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && 610 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
595 !(ht_conf->non_GF_STA_present); 611 !(ctx->ht.non_gf_sta_present);
596} 612}
597 613
598/** 614/**
@@ -744,6 +760,32 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
744 (a->is_SGI == b->is_SGI); 760 (a->is_SGI == b->is_SGI);
745} 761}
746 762
763static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
764 struct iwl_lq_sta *lq_sta)
765{
766 struct iwl_scale_tbl_info *tbl;
767 bool full_concurrent;
768 unsigned long flags;
769
770 spin_lock_irqsave(&priv->lock, flags);
771 if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
772 full_concurrent = true;
773 else
774 full_concurrent = false;
775 spin_unlock_irqrestore(&priv->lock, flags);
776
777 if (priv->bt_full_concurrent != full_concurrent) {
778 priv->bt_full_concurrent = full_concurrent;
779
780 /* Update uCode's rate table. */
781 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
782 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
783 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
784
785 queue_work(priv->workqueue, &priv->bt_full_concurrency);
786 }
787}
788
747/* 789/*
748 * mac80211 sends us Tx status 790 * mac80211 sends us Tx status
749 */ 791 */
@@ -763,6 +805,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
763 u32 tx_rate; 805 u32 tx_rate;
764 struct iwl_scale_tbl_info tbl_type; 806 struct iwl_scale_tbl_info tbl_type;
765 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; 807 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
808 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
809 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
766 810
767 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); 811 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
768 812
@@ -829,7 +873,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
829 lq_sta->missed_rate_counter++; 873 lq_sta->missed_rate_counter++;
830 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { 874 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
831 lq_sta->missed_rate_counter = 0; 875 lq_sta->missed_rate_counter = 0;
832 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false); 876 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
833 } 877 }
834 /* Regardless, ignore this status info for outdated rate */ 878 /* Regardless, ignore this status info for outdated rate */
835 return; 879 return;
@@ -848,7 +892,20 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
848 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 892 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
849 } else { 893 } else {
850 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n"); 894 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
851 return; 895 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
896 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
897 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
898 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
899 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
900 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
901 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
902 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
903 /*
904 * no matching table found, let's by-pass the data collection
905 * and continue to perform rate scale to find the rate table
906 */
907 rs_stay_in_table(lq_sta, true);
908 goto done;
852 } 909 }
853 910
854 /* 911 /*
@@ -909,10 +966,14 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
909 } 966 }
910 /* The last TX rate is cached in lq_sta; it's set in if/else above */ 967 /* The last TX rate is cached in lq_sta; it's set in if/else above */
911 lq_sta->last_rate_n_flags = tx_rate; 968 lq_sta->last_rate_n_flags = tx_rate;
912 969done:
913 /* See if there's a better rate or modulation mode to try. */ 970 /* See if there's a better rate or modulation mode to try. */
914 if (sta && sta->supp_rates[sband->band]) 971 if (sta && sta->supp_rates[sband->band])
915 rs_rate_scale_perform(priv, skb, sta, lq_sta); 972 rs_rate_scale_perform(priv, skb, sta, lq_sta);
973
974 /* Is there a need to switch between full concurrency and 3-wire? */
975 if (priv->bt_ant_couple_ok)
976 rs_bt_update_lq(priv, ctx, lq_sta);
916} 977}
917 978
918/* 979/*
@@ -1106,6 +1167,8 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1106 u16 rate_mask; 1167 u16 rate_mask;
1107 s32 rate; 1168 s32 rate;
1108 s8 is_green = lq_sta->is_green; 1169 s8 is_green = lq_sta->is_green;
1170 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1171 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1109 1172
1110 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) 1173 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1111 return -1; 1174 return -1;
@@ -1126,7 +1189,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1126 tbl->max_search = IWL_MAX_SEARCH; 1189 tbl->max_search = IWL_MAX_SEARCH;
1127 rate_mask = lq_sta->active_mimo2_rate; 1190 rate_mask = lq_sta->active_mimo2_rate;
1128 1191
1129 if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap)) 1192 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1130 tbl->is_ht40 = 1; 1193 tbl->is_ht40 = 1;
1131 else 1194 else
1132 tbl->is_ht40 = 0; 1195 tbl->is_ht40 = 0;
@@ -1160,6 +1223,8 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
1160 u16 rate_mask; 1223 u16 rate_mask;
1161 s32 rate; 1224 s32 rate;
1162 s8 is_green = lq_sta->is_green; 1225 s8 is_green = lq_sta->is_green;
1226 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1227 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1163 1228
1164 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) 1229 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1165 return -1; 1230 return -1;
@@ -1180,7 +1245,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
1180 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH; 1245 tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
1181 rate_mask = lq_sta->active_mimo3_rate; 1246 rate_mask = lq_sta->active_mimo3_rate;
1182 1247
1183 if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap)) 1248 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1184 tbl->is_ht40 = 1; 1249 tbl->is_ht40 = 1;
1185 else 1250 else
1186 tbl->is_ht40 = 0; 1251 tbl->is_ht40 = 0;
@@ -1215,6 +1280,8 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1215 u16 rate_mask; 1280 u16 rate_mask;
1216 u8 is_green = lq_sta->is_green; 1281 u8 is_green = lq_sta->is_green;
1217 s32 rate; 1282 s32 rate;
1283 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1284 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1218 1285
1219 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) 1286 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1220 return -1; 1287 return -1;
@@ -1227,7 +1294,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1227 tbl->max_search = IWL_MAX_SEARCH; 1294 tbl->max_search = IWL_MAX_SEARCH;
1228 rate_mask = lq_sta->active_siso_rate; 1295 rate_mask = lq_sta->active_siso_rate;
1229 1296
1230 if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap)) 1297 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1231 tbl->is_ht40 = 1; 1298 tbl->is_ht40 = 1;
1232 else 1299 else
1233 tbl->is_ht40 = 0; 1300 tbl->is_ht40 = 0;
@@ -1265,18 +1332,52 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1265 struct iwl_rate_scale_data *window = &(tbl->win[index]); 1332 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1266 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1333 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1267 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1334 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1268 u8 start_action = tbl->action; 1335 u8 start_action;
1269 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1336 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1270 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1337 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1271 int ret = 0; 1338 int ret = 0;
1272 u8 update_search_tbl_counter = 0; 1339 u8 update_search_tbl_counter = 0;
1273 1340
1341 switch (priv->bt_traffic_load) {
1342 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1343 /* nothing */
1344 break;
1345 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1346 /* avoid antenna B unless MIMO */
1347 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1348 if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
1349 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1350 break;
1351 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1352 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1353 /* avoid antenna B and MIMO */
1354 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1355 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
1356 tbl->action != IWL_LEGACY_SWITCH_SISO)
1357 tbl->action = IWL_LEGACY_SWITCH_SISO;
1358 break;
1359 default:
1360 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1361 break;
1362 }
1363
1274 if (!iwl_ht_enabled(priv)) 1364 if (!iwl_ht_enabled(priv))
1275 /* stay in Legacy */ 1365 /* stay in Legacy */
1276 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; 1366 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1277 else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE && 1367 else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
1278 tbl->action > IWL_LEGACY_SWITCH_SISO) 1368 tbl->action > IWL_LEGACY_SWITCH_SISO)
1279 tbl->action = IWL_LEGACY_SWITCH_SISO; 1369 tbl->action = IWL_LEGACY_SWITCH_SISO;
1370
1371 /* configure as 1x1 if bt full concurrency */
1372 if (priv->bt_full_concurrent) {
1373 if (!iwl_ht_enabled(priv))
1374 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1375 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1376 tbl->action = IWL_LEGACY_SWITCH_SISO;
1377 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1378 }
1379
1380 start_action = tbl->action;
1280 for (; ;) { 1381 for (; ;) {
1281 lq_sta->action_counter++; 1382 lq_sta->action_counter++;
1282 switch (tbl->action) { 1383 switch (tbl->action) {
@@ -1291,7 +1392,10 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1291 break; 1392 break;
1292 1393
1293 /* Don't change antenna if success has been great */ 1394 /* Don't change antenna if success has been great */
1294 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1395 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
1396 !priv->bt_full_concurrent &&
1397 priv->bt_traffic_load ==
1398 IWL_BT_COEX_TRAFFIC_LOAD_NONE)
1295 break; 1399 break;
1296 1400
1297 /* Set up search table to try other antenna */ 1401 /* Set up search table to try other antenna */
@@ -1403,31 +1507,64 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1403 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 1507 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1404 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1508 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1405 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1509 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1406 u8 start_action = tbl->action; 1510 u8 start_action;
1407 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1511 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1408 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1512 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1409 u8 update_search_tbl_counter = 0; 1513 u8 update_search_tbl_counter = 0;
1410 int ret; 1514 int ret;
1411 1515
1516 switch (priv->bt_traffic_load) {
1517 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1518 /* nothing */
1519 break;
1520 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1521 /* avoid antenna B unless MIMO */
1522 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1523 if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
1524 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1525 break;
1526 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1527 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1528 /* avoid antenna B and MIMO */
1529 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1530 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1531 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1532 break;
1533 default:
1534 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1535 break;
1536 }
1537
1412 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE && 1538 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
1413 tbl->action > IWL_SISO_SWITCH_ANTENNA2) { 1539 tbl->action > IWL_SISO_SWITCH_ANTENNA2) {
1414 /* stay in SISO */ 1540 /* stay in SISO */
1415 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1541 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1416 } 1542 }
1543
1544 /* configure as 1x1 if bt full concurrency */
1545 if (priv->bt_full_concurrent) {
1546 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
1547 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1548 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1549 }
1550
1551 start_action = tbl->action;
1417 for (;;) { 1552 for (;;) {
1418 lq_sta->action_counter++; 1553 lq_sta->action_counter++;
1419 switch (tbl->action) { 1554 switch (tbl->action) {
1420 case IWL_SISO_SWITCH_ANTENNA1: 1555 case IWL_SISO_SWITCH_ANTENNA1:
1421 case IWL_SISO_SWITCH_ANTENNA2: 1556 case IWL_SISO_SWITCH_ANTENNA2:
1422 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n"); 1557 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1423
1424 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && 1558 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1425 tx_chains_num <= 1) || 1559 tx_chains_num <= 1) ||
1426 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 && 1560 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1427 tx_chains_num <= 2)) 1561 tx_chains_num <= 2))
1428 break; 1562 break;
1429 1563
1430 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1564 if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
1565 !priv->bt_full_concurrent &&
1566 priv->bt_traffic_load ==
1567 IWL_BT_COEX_TRAFFIC_LOAD_NONE)
1431 break; 1568 break;
1432 1569
1433 memcpy(search_tbl, tbl, sz); 1570 memcpy(search_tbl, tbl, sz);
@@ -1541,18 +1678,47 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1541 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 1678 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1542 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1679 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1543 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1680 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1544 u8 start_action = tbl->action; 1681 u8 start_action;
1545 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1682 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1546 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1683 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1547 u8 update_search_tbl_counter = 0; 1684 u8 update_search_tbl_counter = 0;
1548 int ret; 1685 int ret;
1549 1686
1687 switch (priv->bt_traffic_load) {
1688 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1689 /* nothing */
1690 break;
1691 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1692 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1693 /* avoid antenna B and MIMO */
1694 if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
1695 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1696 break;
1697 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1698 /* avoid antenna B unless MIMO */
1699 if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
1700 tbl->action == IWL_MIMO2_SWITCH_SISO_C)
1701 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1702 break;
1703 default:
1704 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1705 break;
1706 }
1707
1550 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) && 1708 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
1551 (tbl->action < IWL_MIMO2_SWITCH_SISO_A || 1709 (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
1552 tbl->action > IWL_MIMO2_SWITCH_SISO_C)) { 1710 tbl->action > IWL_MIMO2_SWITCH_SISO_C)) {
1553 /* switch in SISO */ 1711 /* switch in SISO */
1554 tbl->action = IWL_MIMO2_SWITCH_SISO_A; 1712 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1555 } 1713 }
1714
1715 /* configure as 1x1 if bt full concurrency */
1716 if (priv->bt_full_concurrent &&
1717 (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
1718 tbl->action > IWL_MIMO2_SWITCH_SISO_C))
1719 tbl->action = IWL_MIMO2_SWITCH_SISO_A;
1720
1721 start_action = tbl->action;
1556 for (;;) { 1722 for (;;) {
1557 lq_sta->action_counter++; 1723 lq_sta->action_counter++;
1558 switch (tbl->action) { 1724 switch (tbl->action) {
@@ -1682,18 +1848,47 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1682 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 1848 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1683 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1849 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1684 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1850 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1685 u8 start_action = tbl->action; 1851 u8 start_action;
1686 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1852 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1687 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1853 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1688 int ret; 1854 int ret;
1689 u8 update_search_tbl_counter = 0; 1855 u8 update_search_tbl_counter = 0;
1690 1856
1857 switch (priv->bt_traffic_load) {
1858 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1859 /* nothing */
1860 break;
1861 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1862 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1863 /* avoid antenna B and MIMO */
1864 if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
1865 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1866 break;
1867 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1868 /* avoid antenna B unless MIMO */
1869 if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
1870 tbl->action == IWL_MIMO3_SWITCH_SISO_C)
1871 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1872 break;
1873 default:
1874 IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
1875 break;
1876 }
1877
1691 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) && 1878 if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
1692 (tbl->action < IWL_MIMO3_SWITCH_SISO_A || 1879 (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
1693 tbl->action > IWL_MIMO3_SWITCH_SISO_C)) { 1880 tbl->action > IWL_MIMO3_SWITCH_SISO_C)) {
1694 /* switch in SISO */ 1881 /* switch in SISO */
1695 tbl->action = IWL_MIMO3_SWITCH_SISO_A; 1882 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1696 } 1883 }
1884
1885 /* configure as 1x1 if bt full concurrency */
1886 if (priv->bt_full_concurrent &&
1887 (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
1888 tbl->action > IWL_MIMO3_SWITCH_SISO_C))
1889 tbl->action = IWL_MIMO3_SWITCH_SISO_A;
1890
1891 start_action = tbl->action;
1697 for (;;) { 1892 for (;;) {
1698 lq_sta->action_counter++; 1893 lq_sta->action_counter++;
1699 switch (tbl->action) { 1894 switch (tbl->action) {
@@ -1820,7 +2015,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1820 * 2) # times calling this function 2015 * 2) # times calling this function
1821 * 3) elapsed time in this mode (not used, for now) 2016 * 3) elapsed time in this mode (not used, for now)
1822 */ 2017 */
1823static void rs_stay_in_table(struct iwl_lq_sta *lq_sta) 2018static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1824{ 2019{
1825 struct iwl_scale_tbl_info *tbl; 2020 struct iwl_scale_tbl_info *tbl;
1826 int i; 2021 int i;
@@ -1851,7 +2046,8 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
1851 * allow a new search. Also (below) reset all bitmaps and 2046 * allow a new search. Also (below) reset all bitmaps and
1852 * stats in active history. 2047 * stats in active history.
1853 */ 2048 */
1854 if ((lq_sta->total_failed > lq_sta->max_failure_limit) || 2049 if (force_search ||
2050 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1855 (lq_sta->total_success > lq_sta->max_success_limit) || 2051 (lq_sta->total_success > lq_sta->max_success_limit) ||
1856 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) 2052 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1857 && (flush_interval_passed))) { 2053 && (flush_interval_passed))) {
@@ -1900,6 +2096,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
1900 * return rate_n_flags as used in the table 2096 * return rate_n_flags as used in the table
1901 */ 2097 */
1902static u32 rs_update_rate_tbl(struct iwl_priv *priv, 2098static u32 rs_update_rate_tbl(struct iwl_priv *priv,
2099 struct iwl_rxon_context *ctx,
1903 struct iwl_lq_sta *lq_sta, 2100 struct iwl_lq_sta *lq_sta,
1904 struct iwl_scale_tbl_info *tbl, 2101 struct iwl_scale_tbl_info *tbl,
1905 int index, u8 is_green) 2102 int index, u8 is_green)
@@ -1909,7 +2106,7 @@ static u32 rs_update_rate_tbl(struct iwl_priv *priv,
1909 /* Update uCode's rate table. */ 2106 /* Update uCode's rate table. */
1910 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); 2107 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
1911 rs_fill_link_cmd(priv, lq_sta, rate); 2108 rs_fill_link_cmd(priv, lq_sta, rate);
1912 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false); 2109 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
1913 2110
1914 return rate; 2111 return rate;
1915} 2112}
@@ -1948,6 +2145,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1948 s32 sr; 2145 s32 sr;
1949 u8 tid = MAX_TID_COUNT; 2146 u8 tid = MAX_TID_COUNT;
1950 struct iwl_tid_data *tid_data; 2147 struct iwl_tid_data *tid_data;
2148 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2149 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1951 2150
1952 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n"); 2151 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
1953 2152
@@ -1986,7 +2185,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1986 if (is_legacy(tbl->lq_type)) 2185 if (is_legacy(tbl->lq_type))
1987 lq_sta->is_green = 0; 2186 lq_sta->is_green = 0;
1988 else 2187 else
1989 lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config); 2188 lq_sta->is_green = rs_use_green(sta);
1990 is_green = lq_sta->is_green; 2189 is_green = lq_sta->is_green;
1991 2190
1992 /* current tx rate */ 2191 /* current tx rate */
@@ -2025,7 +2224,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2025 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 2224 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2026 /* get "active" rate info */ 2225 /* get "active" rate info */
2027 index = iwl_hwrate_to_plcp_idx(tbl->current_rate); 2226 index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
2028 rate = rs_update_rate_tbl(priv, lq_sta, 2227 rate = rs_update_rate_tbl(priv, ctx, lq_sta,
2029 tbl, index, is_green); 2228 tbl, index, is_green);
2030 } 2229 }
2031 return; 2230 return;
@@ -2067,7 +2266,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2067 2266
2068 /* Should we stay with this modulation mode, 2267 /* Should we stay with this modulation mode,
2069 * or search for a new one? */ 2268 * or search for a new one? */
2070 rs_stay_in_table(lq_sta); 2269 rs_stay_in_table(lq_sta, false);
2071 2270
2072 goto out; 2271 goto out;
2073 } 2272 }
@@ -2215,6 +2414,28 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2215 if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI && 2414 if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI &&
2216 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) 2415 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type)))
2217 scale_action = -1; 2416 scale_action = -1;
2417
2418 if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2419 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2420 if (lq_sta->last_bt_traffic > priv->bt_traffic_load) {
2421 /*
2422 * don't set scale_action, don't want to scale up if
2423 * the rate scale doesn't otherwise think that is a
2424 * good idea.
2425 */
2426 } else if (lq_sta->last_bt_traffic <= priv->bt_traffic_load) {
2427 scale_action = -1;
2428 }
2429 }
2430 lq_sta->last_bt_traffic = priv->bt_traffic_load;
2431
2432 if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
2433 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
2434 /* search for a new modulation */
2435 rs_stay_in_table(lq_sta, true);
2436 goto lq_update;
2437 }
2438
2218 switch (scale_action) { 2439 switch (scale_action) {
2219 case -1: 2440 case -1:
2220 /* Decrease starting rate, update uCode's rate table */ 2441 /* Decrease starting rate, update uCode's rate table */
@@ -2245,13 +2466,13 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2245lq_update: 2466lq_update:
2246 /* Replace uCode's rate table for the destination station. */ 2467 /* Replace uCode's rate table for the destination station. */
2247 if (update_lq) 2468 if (update_lq)
2248 rate = rs_update_rate_tbl(priv, lq_sta, 2469 rate = rs_update_rate_tbl(priv, ctx, lq_sta,
2249 tbl, index, is_green); 2470 tbl, index, is_green);
2250 2471
2251 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) { 2472 if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) {
2252 /* Should we stay with this modulation mode, 2473 /* Should we stay with this modulation mode,
2253 * or search for a new one? */ 2474 * or search for a new one? */
2254 rs_stay_in_table(lq_sta); 2475 rs_stay_in_table(lq_sta, false);
2255 } 2476 }
2256 /* 2477 /*
2257 * Search for new modulation mode if we're: 2478 * Search for new modulation mode if we're:
@@ -2287,7 +2508,7 @@ lq_update:
2287 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n", 2508 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
2288 tbl->current_rate, index); 2509 tbl->current_rate, index);
2289 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); 2510 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
2290 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false); 2511 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2291 } else 2512 } else
2292 done_search = 1; 2513 done_search = 1;
2293 } 2514 }
@@ -2357,12 +2578,17 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2357 int rate_idx; 2578 int rate_idx;
2358 int i; 2579 int i;
2359 u32 rate; 2580 u32 rate;
2360 u8 use_green = rs_use_green(sta, &priv->current_ht_config); 2581 u8 use_green = rs_use_green(sta);
2361 u8 active_tbl = 0; 2582 u8 active_tbl = 0;
2362 u8 valid_tx_ant; 2583 u8 valid_tx_ant;
2584 struct iwl_station_priv *sta_priv;
2585 struct iwl_rxon_context *ctx;
2363 2586
2364 if (!sta || !lq_sta) 2587 if (!sta || !lq_sta)
2365 goto out; 2588 return;
2589
2590 sta_priv = (void *)sta->drv_priv;
2591 ctx = sta_priv->common.ctx;
2366 2592
2367 i = lq_sta->last_txrate_idx; 2593 i = lq_sta->last_txrate_idx;
2368 2594
@@ -2394,9 +2620,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2394 rs_set_expected_tpt_table(lq_sta, tbl); 2620 rs_set_expected_tpt_table(lq_sta, tbl);
2395 rs_fill_link_cmd(NULL, lq_sta, rate); 2621 rs_fill_link_cmd(NULL, lq_sta, rate);
2396 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; 2622 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2397 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_SYNC, true); 2623 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2398 out:
2399 return;
2400} 2624}
2401 2625
2402static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, 2626static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
@@ -2524,7 +2748,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2524 lq_sta->is_dup = 0; 2748 lq_sta->is_dup = 0;
2525 lq_sta->max_rate_idx = -1; 2749 lq_sta->max_rate_idx = -1;
2526 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; 2750 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2527 lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config); 2751 lq_sta->is_green = rs_use_green(sta);
2528 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); 2752 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2529 lq_sta->band = priv->band; 2753 lq_sta->band = priv->band;
2530 /* 2754 /*
@@ -2594,10 +2818,15 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2594 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2818 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2595 2819
2596 /* Interpret new_rate (rate_n_flags) */ 2820 /* Interpret new_rate (rate_n_flags) */
2597 memset(&tbl_type, 0, sizeof(tbl_type));
2598 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, 2821 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2599 &tbl_type, &rate_idx); 2822 &tbl_type, &rate_idx);
2600 2823
2824 if (priv && priv->bt_full_concurrent) {
2825 /* 1x1 only */
2826 tbl_type.ant_type =
2827 first_antenna(priv->hw_params.valid_tx_ant);
2828 }
2829
2601 /* How many times should we repeat the initial rate? */ 2830 /* How many times should we repeat the initial rate? */
2602 if (is_legacy(tbl_type.lq_type)) { 2831 if (is_legacy(tbl_type.lq_type)) {
2603 ant_toggle_cnt = 1; 2832 ant_toggle_cnt = 1;
@@ -2622,9 +2851,12 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2622 2851
2623 index++; 2852 index++;
2624 repeat_rate--; 2853 repeat_rate--;
2625 2854 if (priv) {
2626 if (priv) 2855 if (priv->bt_full_concurrent)
2627 valid_tx_ant = priv->hw_params.valid_tx_ant; 2856 valid_tx_ant = ANT_A;
2857 else
2858 valid_tx_ant = priv->hw_params.valid_tx_ant;
2859 }
2628 2860
2629 /* Fill rest of rate table */ 2861 /* Fill rest of rate table */
2630 while (index < LINK_QUAL_MAX_RETRY_NUM) { 2862 while (index < LINK_QUAL_MAX_RETRY_NUM) {
@@ -2639,7 +2871,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2639 rs_toggle_antenna(valid_tx_ant, 2871 rs_toggle_antenna(valid_tx_ant,
2640 &new_rate, &tbl_type)) 2872 &new_rate, &tbl_type))
2641 ant_toggle_cnt = 1; 2873 ant_toggle_cnt = 1;
2642} 2874 }
2643 2875
2644 /* Override next rate if needed for debug purposes */ 2876 /* Override next rate if needed for debug purposes */
2645 rs_dbgfs_set_mcs(lq_sta, &new_rate, index); 2877 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
@@ -2654,6 +2886,12 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2654 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type, 2886 rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
2655 &rate_idx); 2887 &rate_idx);
2656 2888
2889 if (priv && priv->bt_full_concurrent) {
2890 /* 1x1 only */
2891 tbl_type.ant_type =
2892 first_antenna(priv->hw_params.valid_tx_ant);
2893 }
2894
2657 /* Indicate to uCode which entries might be MIMO. 2895 /* Indicate to uCode which entries might be MIMO.
2658 * If initial rate was MIMO, this will finally end up 2896 * If initial rate was MIMO, this will finally end up
2659 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ 2897 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
@@ -2694,8 +2932,18 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2694 2932
2695 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 2933 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2696 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; 2934 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2935
2697 lq_cmd->agg_params.agg_time_limit = 2936 lq_cmd->agg_params.agg_time_limit =
2698 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); 2937 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2938 /*
2939 * overwrite if needed, pass aggregation time limit
2940 * to uCode in uSec
2941 */
2942 if (priv && priv->cfg->agg_time_limit &&
2943 priv->cfg->agg_time_limit >= LINK_QUAL_AGG_TIME_LIMIT_MIN &&
2944 priv->cfg->agg_time_limit <= LINK_QUAL_AGG_TIME_LIMIT_MAX)
2945 lq_cmd->agg_params.agg_time_limit =
2946 cpu_to_le16(priv->cfg->agg_time_limit);
2699} 2947}
2700 2948
2701static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 2949static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2760,6 +3008,9 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2760 char buf[64]; 3008 char buf[64];
2761 int buf_size; 3009 int buf_size;
2762 u32 parsed_rate; 3010 u32 parsed_rate;
3011 struct iwl_station_priv *sta_priv =
3012 container_of(lq_sta, struct iwl_station_priv, lq_sta);
3013 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
2763 3014
2764 priv = lq_sta->drv; 3015 priv = lq_sta->drv;
2765 memset(buf, 0, sizeof(buf)); 3016 memset(buf, 0, sizeof(buf));
@@ -2782,7 +3033,8 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2782 3033
2783 if (lq_sta->dbg_fixed_rate) { 3034 if (lq_sta->dbg_fixed_rate) {
2784 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); 3035 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2785 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false); 3036 iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
3037 false);
2786 } 3038 }
2787 3039
2788 return count; 3040 return count;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 8292f6d48ec6..357cdb26f16d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -432,6 +432,8 @@ struct iwl_lq_sta {
432 u32 last_rate_n_flags; 432 u32 last_rate_n_flags;
433 /* packets destined for this STA are aggregated */ 433 /* packets destined for this STA are aggregated */
434 u8 is_agg; 434 u8 is_agg;
435 /* BT traffic this sta was last updated in */
436 u8 last_bt_traffic;
435}; 437};
436 438
437static inline u8 num_of_ant(u8 mask) 439static inline u8 num_of_ant(u8 mask)
@@ -451,15 +453,6 @@ static inline u8 first_antenna(u8 mask)
451} 453}
452 454
453 455
454static inline u8 iwl_get_prev_ieee_rate(u8 rate_index)
455{
456 u8 rate = iwl_rates[rate_index].prev_ieee;
457
458 if (rate == IWL_RATE_INVALID)
459 rate = rate_index;
460 return rate;
461}
462
463static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index) 456static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
464{ 457{
465 u8 rate = iwl3945_rates[rate_index].prev_ieee; 458 u8 rate = iwl3945_rates[rate_index].prev_ieee;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
new file mode 100644
index 000000000000..07b2c6cadf51
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -0,0 +1,704 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-agn-tt.h"
44
45/* default Thermal Throttling transaction table
46 * Current state | Throttling Down | Throttling Up
47 *=============================================================================
48 * Condition Nxt State Condition Nxt State Condition Nxt State
49 *-----------------------------------------------------------------------------
50 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
51 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
52 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
53 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
54 *=============================================================================
55 */
56static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
57 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
58 {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
59 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
60};
61static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
62 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
63 {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
64 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
65};
66static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
67 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
68 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
69 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
70};
71static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
72 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
73 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
74 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
75};
76
77/* Advance Thermal Throttling default restriction table */
78static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = {
79 {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true },
80 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true },
81 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false },
82 {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false }
83};
84
85bool iwl_tt_is_low_power_state(struct iwl_priv *priv)
86{
87 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
88
89 if (tt->state >= IWL_TI_1)
90 return true;
91 return false;
92}
93
94u8 iwl_tt_current_power_mode(struct iwl_priv *priv)
95{
96 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
97
98 return tt->tt_power_mode;
99}
100
101bool iwl_ht_enabled(struct iwl_priv *priv)
102{
103 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
104 struct iwl_tt_restriction *restriction;
105
106 if (!priv->thermal_throttle.advanced_tt)
107 return true;
108 restriction = tt->restriction + tt->state;
109 return restriction->is_ht;
110}
111
112static bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
113{
114 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
115 bool within_margin = false;
116
117 if (priv->cfg->temperature_kelvin)
118 temp = KELVIN_TO_CELSIUS(priv->temperature);
119
120 if (!priv->thermal_throttle.advanced_tt)
121 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
122 CT_KILL_THRESHOLD_LEGACY) ? true : false;
123 else
124 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
125 CT_KILL_THRESHOLD) ? true : false;
126 return within_margin;
127}
128
129bool iwl_check_for_ct_kill(struct iwl_priv *priv)
130{
131 bool is_ct_kill = false;
132
133 if (iwl_within_ct_kill_margin(priv)) {
134 iwl_tt_enter_ct_kill(priv);
135 is_ct_kill = true;
136 }
137 return is_ct_kill;
138}
139
140enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
141{
142 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
143 struct iwl_tt_restriction *restriction;
144
145 if (!priv->thermal_throttle.advanced_tt)
146 return IWL_ANT_OK_MULTI;
147 restriction = tt->restriction + tt->state;
148 return restriction->tx_stream;
149}
150
151enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
152{
153 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
154 struct iwl_tt_restriction *restriction;
155
156 if (!priv->thermal_throttle.advanced_tt)
157 return IWL_ANT_OK_MULTI;
158 restriction = tt->restriction + tt->state;
159 return restriction->rx_stream;
160}
161
162#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
163#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
164
165/*
166 * toggle the bit to wake up uCode and check the temperature
167 * if the temperature is below CT, uCode will stay awake and send card
168 * state notification with CT_KILL bit clear to inform Thermal Throttling
169 * Management to change state. Otherwise, uCode will go back to sleep
170 * without doing anything, driver should continue the 5 seconds timer
171 * to wake up uCode for temperature check until temperature drop below CT
172 */
173static void iwl_tt_check_exit_ct_kill(unsigned long data)
174{
175 struct iwl_priv *priv = (struct iwl_priv *)data;
176 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
177 unsigned long flags;
178
179 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
180 return;
181
182 if (tt->state == IWL_TI_CT_KILL) {
183 if (priv->thermal_throttle.ct_kill_toggle) {
184 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
185 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
186 priv->thermal_throttle.ct_kill_toggle = false;
187 } else {
188 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
189 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
190 priv->thermal_throttle.ct_kill_toggle = true;
191 }
192 iwl_read32(priv, CSR_UCODE_DRV_GP1);
193 spin_lock_irqsave(&priv->reg_lock, flags);
194 if (!iwl_grab_nic_access(priv))
195 iwl_release_nic_access(priv);
196 spin_unlock_irqrestore(&priv->reg_lock, flags);
197
198 /* Reschedule the ct_kill timer to occur in
199 * CT_KILL_EXIT_DURATION seconds to ensure we get a
200 * thermal update */
201 IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n");
202 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
203 jiffies + CT_KILL_EXIT_DURATION * HZ);
204 }
205}
206
207static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
208 bool stop)
209{
210 if (stop) {
211 IWL_DEBUG_POWER(priv, "Stop all queues\n");
212 if (priv->mac80211_registered)
213 ieee80211_stop_queues(priv->hw);
214 IWL_DEBUG_POWER(priv,
215 "Schedule 5 seconds CT_KILL Timer\n");
216 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
217 jiffies + CT_KILL_EXIT_DURATION * HZ);
218 } else {
219 IWL_DEBUG_POWER(priv, "Wake all queues\n");
220 if (priv->mac80211_registered)
221 ieee80211_wake_queues(priv->hw);
222 }
223}
224
225static void iwl_tt_ready_for_ct_kill(unsigned long data)
226{
227 struct iwl_priv *priv = (struct iwl_priv *)data;
228 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
229
230 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
231 return;
232
233 /* temperature timer expired, ready to go into CT_KILL state */
234 if (tt->state != IWL_TI_CT_KILL) {
235 IWL_DEBUG_POWER(priv, "entering CT_KILL state when "
236 "temperature timer expired\n");
237 tt->state = IWL_TI_CT_KILL;
238 set_bit(STATUS_CT_KILL, &priv->status);
239 iwl_perform_ct_kill_task(priv, true);
240 }
241}
242
243static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
244{
245 IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n");
246 /* make request to retrieve statistics information */
247 iwl_send_statistics_request(priv, CMD_SYNC, false);
248 /* Reschedule the ct_kill wait timer */
249 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
250 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
251}
252
253#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
254#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
255#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
256
257/*
258 * Legacy thermal throttling
259 * 1) Avoid NIC destruction due to high temperatures
260 * Chip will identify dangerously high temperatures that can
261 * harm the device and will power down
262 * 2) Avoid the NIC power down due to high temperature
263 * Throttle early enough to lower the power consumption before
264 * drastic steps are needed
265 */
266static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
267{
268 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
269 enum iwl_tt_state old_state;
270
271#ifdef CONFIG_IWLWIFI_DEBUG
272 if ((tt->tt_previous_temp) &&
273 (temp > tt->tt_previous_temp) &&
274 ((temp - tt->tt_previous_temp) >
275 IWL_TT_INCREASE_MARGIN)) {
276 IWL_DEBUG_POWER(priv,
277 "Temperature increase %d degree Celsius\n",
278 (temp - tt->tt_previous_temp));
279 }
280#endif
281 old_state = tt->state;
282 /* in Celsius */
283 if (temp >= IWL_MINIMAL_POWER_THRESHOLD)
284 tt->state = IWL_TI_CT_KILL;
285 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2)
286 tt->state = IWL_TI_2;
287 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1)
288 tt->state = IWL_TI_1;
289 else
290 tt->state = IWL_TI_0;
291
292#ifdef CONFIG_IWLWIFI_DEBUG
293 tt->tt_previous_temp = temp;
294#endif
295 /* stop ct_kill_waiting_tm timer */
296 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
297 if (tt->state != old_state) {
298 switch (tt->state) {
299 case IWL_TI_0:
300 /*
301 * When the system is ready to go back to IWL_TI_0
302 * we only have to call iwl_power_update_mode() to
303 * do so.
304 */
305 break;
306 case IWL_TI_1:
307 tt->tt_power_mode = IWL_POWER_INDEX_3;
308 break;
309 case IWL_TI_2:
310 tt->tt_power_mode = IWL_POWER_INDEX_4;
311 break;
312 default:
313 tt->tt_power_mode = IWL_POWER_INDEX_5;
314 break;
315 }
316 mutex_lock(&priv->mutex);
317 if (old_state == IWL_TI_CT_KILL)
318 clear_bit(STATUS_CT_KILL, &priv->status);
319 if (tt->state != IWL_TI_CT_KILL &&
320 iwl_power_update_mode(priv, true)) {
321 /* TT state not updated
322 * try again during next temperature read
323 */
324 if (old_state == IWL_TI_CT_KILL)
325 set_bit(STATUS_CT_KILL, &priv->status);
326 tt->state = old_state;
327 IWL_ERR(priv, "Cannot update power mode, "
328 "TT state not updated\n");
329 } else {
330 if (tt->state == IWL_TI_CT_KILL) {
331 if (force) {
332 set_bit(STATUS_CT_KILL, &priv->status);
333 iwl_perform_ct_kill_task(priv, true);
334 } else {
335 iwl_prepare_ct_kill_task(priv);
336 tt->state = old_state;
337 }
338 } else if (old_state == IWL_TI_CT_KILL &&
339 tt->state != IWL_TI_CT_KILL)
340 iwl_perform_ct_kill_task(priv, false);
341 IWL_DEBUG_POWER(priv, "Temperature state changed %u\n",
342 tt->state);
343 IWL_DEBUG_POWER(priv, "Power Index change to %u\n",
344 tt->tt_power_mode);
345 }
346 mutex_unlock(&priv->mutex);
347 }
348}
349
350/*
351 * Advance thermal throttling
352 * 1) Avoid NIC destruction due to high temperatures
353 * Chip will identify dangerously high temperatures that can
354 * harm the device and will power down
355 * 2) Avoid the NIC power down due to high temperature
356 * Throttle early enough to lower the power consumption before
357 * drastic steps are needed
358 * Actions include relaxing the power down sleep thresholds and
359 * decreasing the number of TX streams
360 * 3) Avoid throughput performance impact as much as possible
361 *
362 *=============================================================================
363 * Condition Nxt State Condition Nxt State Condition Nxt State
364 *-----------------------------------------------------------------------------
365 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
366 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
367 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
368 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
369 *=============================================================================
370 */
371static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
372{
373 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
374 int i;
375 bool changed = false;
376 enum iwl_tt_state old_state;
377 struct iwl_tt_trans *transaction;
378
379 old_state = tt->state;
380 for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) {
381 /* based on the current TT state,
382 * find the curresponding transaction table
383 * each table has (IWL_TI_STATE_MAX - 1) entries
384 * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
385 * will advance to the correct table.
386 * then based on the current temperature
387 * find the next state need to transaction to
388 * go through all the possible (IWL_TI_STATE_MAX - 1) entries
389 * in the current table to see if transaction is needed
390 */
391 transaction = tt->transaction +
392 ((old_state * (IWL_TI_STATE_MAX - 1)) + i);
393 if (temp >= transaction->tt_low &&
394 temp <= transaction->tt_high) {
395#ifdef CONFIG_IWLWIFI_DEBUG
396 if ((tt->tt_previous_temp) &&
397 (temp > tt->tt_previous_temp) &&
398 ((temp - tt->tt_previous_temp) >
399 IWL_TT_INCREASE_MARGIN)) {
400 IWL_DEBUG_POWER(priv,
401 "Temperature increase %d "
402 "degree Celsius\n",
403 (temp - tt->tt_previous_temp));
404 }
405 tt->tt_previous_temp = temp;
406#endif
407 if (old_state !=
408 transaction->next_state) {
409 changed = true;
410 tt->state =
411 transaction->next_state;
412 }
413 break;
414 }
415 }
416 /* stop ct_kill_waiting_tm timer */
417 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
418 if (changed) {
419 if (tt->state >= IWL_TI_1) {
420 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
421 tt->tt_power_mode = IWL_POWER_INDEX_5;
422
423 if (!iwl_ht_enabled(priv)) {
424 struct iwl_rxon_context *ctx;
425
426 for_each_context(priv, ctx) {
427 struct iwl_rxon_cmd *rxon;
428
429 rxon = &ctx->staging;
430
431 /* disable HT */
432 rxon->flags &= ~(
433 RXON_FLG_CHANNEL_MODE_MSK |
434 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
435 RXON_FLG_HT40_PROT_MSK |
436 RXON_FLG_HT_PROT_MSK);
437 }
438 } else {
439 /* check HT capability and set
440 * according to the system HT capability
441 * in case get disabled before */
442 iwl_set_rxon_ht(priv, &priv->current_ht_config);
443 }
444
445 } else {
446 /*
447 * restore system power setting -- it will be
448 * recalculated automatically.
449 */
450
451 /* check HT capability and set
452 * according to the system HT capability
453 * in case get disabled before */
454 iwl_set_rxon_ht(priv, &priv->current_ht_config);
455 }
456 mutex_lock(&priv->mutex);
457 if (old_state == IWL_TI_CT_KILL)
458 clear_bit(STATUS_CT_KILL, &priv->status);
459 if (tt->state != IWL_TI_CT_KILL &&
460 iwl_power_update_mode(priv, true)) {
461 /* TT state not updated
462 * try again during next temperature read
463 */
464 IWL_ERR(priv, "Cannot update power mode, "
465 "TT state not updated\n");
466 if (old_state == IWL_TI_CT_KILL)
467 set_bit(STATUS_CT_KILL, &priv->status);
468 tt->state = old_state;
469 } else {
470 IWL_DEBUG_POWER(priv,
471 "Thermal Throttling to new state: %u\n",
472 tt->state);
473 if (old_state != IWL_TI_CT_KILL &&
474 tt->state == IWL_TI_CT_KILL) {
475 if (force) {
476 IWL_DEBUG_POWER(priv,
477 "Enter IWL_TI_CT_KILL\n");
478 set_bit(STATUS_CT_KILL, &priv->status);
479 iwl_perform_ct_kill_task(priv, true);
480 } else {
481 iwl_prepare_ct_kill_task(priv);
482 tt->state = old_state;
483 }
484 } else if (old_state == IWL_TI_CT_KILL &&
485 tt->state != IWL_TI_CT_KILL) {
486 IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n");
487 iwl_perform_ct_kill_task(priv, false);
488 }
489 }
490 mutex_unlock(&priv->mutex);
491 }
492}
493
494/* Card State Notification indicated reach critical temperature
495 * if PSP not enable, no Thermal Throttling function will be performed
496 * just set the GP1 bit to acknowledge the event
497 * otherwise, go into IWL_TI_CT_KILL state
498 * since Card State Notification will not provide any temperature reading
499 * for Legacy mode
500 * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
501 * for advance mode
502 * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
503 */
504static void iwl_bg_ct_enter(struct work_struct *work)
505{
506 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
507 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
508
509 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
510 return;
511
512 if (!iwl_is_ready(priv))
513 return;
514
515 if (tt->state != IWL_TI_CT_KILL) {
516 IWL_ERR(priv, "Device reached critical temperature "
517 "- ucode going to sleep!\n");
518 if (!priv->thermal_throttle.advanced_tt)
519 iwl_legacy_tt_handler(priv,
520 IWL_MINIMAL_POWER_THRESHOLD,
521 true);
522 else
523 iwl_advance_tt_handler(priv,
524 CT_KILL_THRESHOLD + 1, true);
525 }
526}
527
528/* Card State Notification indicated out of critical temperature
529 * since Card State Notification will not provide any temperature reading
530 * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
531 * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
532 */
533static void iwl_bg_ct_exit(struct work_struct *work)
534{
535 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
536 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
537
538 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
539 return;
540
541 if (!iwl_is_ready(priv))
542 return;
543
544 /* stop ct_kill_exit_tm timer */
545 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
546
547 if (tt->state == IWL_TI_CT_KILL) {
548 IWL_ERR(priv,
549 "Device temperature below critical"
550 "- ucode awake!\n");
551 /*
552 * exit from CT_KILL state
553 * reset the current temperature reading
554 */
555 priv->temperature = 0;
556 if (!priv->thermal_throttle.advanced_tt)
557 iwl_legacy_tt_handler(priv,
558 IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
559 true);
560 else
561 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
562 true);
563 }
564}
565
566void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
567{
568 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
569 return;
570
571 IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n");
572 queue_work(priv->workqueue, &priv->ct_enter);
573}
574EXPORT_SYMBOL(iwl_tt_enter_ct_kill);
575
576void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
577{
578 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
579 return;
580
581 IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n");
582 queue_work(priv->workqueue, &priv->ct_exit);
583}
584EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
585
586static void iwl_bg_tt_work(struct work_struct *work)
587{
588 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
589 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
590
591 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
592 return;
593
594 if (priv->cfg->temperature_kelvin)
595 temp = KELVIN_TO_CELSIUS(priv->temperature);
596
597 if (!priv->thermal_throttle.advanced_tt)
598 iwl_legacy_tt_handler(priv, temp, false);
599 else
600 iwl_advance_tt_handler(priv, temp, false);
601}
602
603void iwl_tt_handler(struct iwl_priv *priv)
604{
605 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
606 return;
607
608 IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n");
609 queue_work(priv->workqueue, &priv->tt_work);
610}
611EXPORT_SYMBOL(iwl_tt_handler);
612
613/* Thermal throttling initialization
614 * For advance thermal throttling:
615 * Initialize Thermal Index and temperature threshold table
616 * Initialize thermal throttling restriction table
617 */
618void iwl_tt_initialize(struct iwl_priv *priv)
619{
620 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
621 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
622 struct iwl_tt_trans *transaction;
623
624 IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
625
626 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
627
628 tt->state = IWL_TI_0;
629 init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
630 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
631 priv->thermal_throttle.ct_kill_exit_tm.function =
632 iwl_tt_check_exit_ct_kill;
633 init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
634 priv->thermal_throttle.ct_kill_waiting_tm.data =
635 (unsigned long)priv;
636 priv->thermal_throttle.ct_kill_waiting_tm.function =
637 iwl_tt_ready_for_ct_kill;
638 /* setup deferred ct kill work */
639 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
640 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
641 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
642
643 if (priv->cfg->adv_thermal_throttle) {
644 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
645 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
646 IWL_TI_STATE_MAX, GFP_KERNEL);
647 tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) *
648 IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1),
649 GFP_KERNEL);
650 if (!tt->restriction || !tt->transaction) {
651 IWL_ERR(priv, "Fallback to Legacy Throttling\n");
652 priv->thermal_throttle.advanced_tt = false;
653 kfree(tt->restriction);
654 tt->restriction = NULL;
655 kfree(tt->transaction);
656 tt->transaction = NULL;
657 } else {
658 transaction = tt->transaction +
659 (IWL_TI_0 * (IWL_TI_STATE_MAX - 1));
660 memcpy(transaction, &tt_range_0[0], size);
661 transaction = tt->transaction +
662 (IWL_TI_1 * (IWL_TI_STATE_MAX - 1));
663 memcpy(transaction, &tt_range_1[0], size);
664 transaction = tt->transaction +
665 (IWL_TI_2 * (IWL_TI_STATE_MAX - 1));
666 memcpy(transaction, &tt_range_2[0], size);
667 transaction = tt->transaction +
668 (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1));
669 memcpy(transaction, &tt_range_3[0], size);
670 size = sizeof(struct iwl_tt_restriction) *
671 IWL_TI_STATE_MAX;
672 memcpy(tt->restriction,
673 &restriction_range[0], size);
674 priv->thermal_throttle.advanced_tt = true;
675 }
676 } else {
677 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
678 priv->thermal_throttle.advanced_tt = false;
679 }
680}
681EXPORT_SYMBOL(iwl_tt_initialize);
682
683/* cleanup thermal throttling management related memory and timer */
684void iwl_tt_exit(struct iwl_priv *priv)
685{
686 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
687
688 /* stop ct_kill_exit_tm timer if activated */
689 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
690 /* stop ct_kill_waiting_tm timer if activated */
691 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
692 cancel_work_sync(&priv->tt_work);
693 cancel_work_sync(&priv->ct_enter);
694 cancel_work_sync(&priv->ct_exit);
695
696 if (priv->thermal_throttle.advanced_tt) {
697 /* free advance thermal throttling memory */
698 kfree(tt->restriction);
699 tt->restriction = NULL;
700 kfree(tt->transaction);
701 tt->transaction = NULL;
702 }
703}
704EXPORT_SYMBOL(iwl_tt_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
new file mode 100644
index 000000000000..d55060427cac
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
@@ -0,0 +1,129 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_tt_setting_h__
29#define __iwl_tt_setting_h__
30
31#include "iwl-commands.h"
32
33#define IWL_ABSOLUTE_ZERO 0
34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
35#define IWL_TT_INCREASE_MARGIN 5
36#define IWL_TT_CT_KILL_MARGIN 3
37
38enum iwl_antenna_ok {
39 IWL_ANT_OK_NONE,
40 IWL_ANT_OK_SINGLE,
41 IWL_ANT_OK_MULTI,
42};
43
44/* Thermal Throttling State Machine states */
45enum iwl_tt_state {
46 IWL_TI_0, /* normal temperature, system power state */
47 IWL_TI_1, /* high temperature detect, low power state */
48 IWL_TI_2, /* higher temperature detected, lower power state */
49 IWL_TI_CT_KILL, /* critical temperature detected, lowest power state */
50 IWL_TI_STATE_MAX
51};
52
53/**
54 * struct iwl_tt_restriction - Thermal Throttling restriction table
55 * @tx_stream: number of tx stream allowed
56 * @is_ht: ht enable/disable
57 * @rx_stream: number of rx stream allowed
58 *
59 * This table is used by advance thermal throttling management
60 * based on the current thermal throttling state, and determines
61 * the number of tx/rx streams and the status of HT operation.
62 */
63struct iwl_tt_restriction {
64 enum iwl_antenna_ok tx_stream;
65 enum iwl_antenna_ok rx_stream;
66 bool is_ht;
67};
68
69/**
70 * struct iwl_tt_trans - Thermal Throttling transaction table
71 * @next_state: next thermal throttling mode
72 * @tt_low: low temperature threshold to change state
73 * @tt_high: high temperature threshold to change state
74 *
75 * This is used by the advanced thermal throttling algorithm
76 * to determine the next thermal state to go based on the
77 * current temperature.
78 */
79struct iwl_tt_trans {
80 enum iwl_tt_state next_state;
81 u32 tt_low;
82 u32 tt_high;
83};
84
85/**
86 * struct iwl_tt_mgnt - Thermal Throttling Management structure
87 * @advanced_tt: advanced thermal throttle required
88 * @state: current Thermal Throttling state
89 * @tt_power_mode: Thermal Throttling power mode index
90 * being used to set power level when
91 * when thermal throttling state != IWL_TI_0
92 * the tt_power_mode should set to different
93 * power mode based on the current tt state
94 * @tt_previous_temperature: last measured temperature
95 * @iwl_tt_restriction: ptr to restriction tbl, used by advance
96 * thermal throttling to determine how many tx/rx streams
97 * should be used in tt state; and can HT be enabled or not
98 * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling
99 * state transaction
100 * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature
101 * @ct_kill_exit_tm: timer to exit thermal kill
102 */
103struct iwl_tt_mgmt {
104 enum iwl_tt_state state;
105 bool advanced_tt;
106 u8 tt_power_mode;
107 bool ct_kill_toggle;
108#ifdef CONFIG_IWLWIFI_DEBUG
109 s32 tt_previous_temp;
110#endif
111 struct iwl_tt_restriction *restriction;
112 struct iwl_tt_trans *transaction;
113 struct timer_list ct_kill_exit_tm;
114 struct timer_list ct_kill_waiting_tm;
115};
116
117u8 iwl_tt_current_power_mode(struct iwl_priv *priv);
118bool iwl_tt_is_low_power_state(struct iwl_priv *priv);
119bool iwl_ht_enabled(struct iwl_priv *priv);
120bool iwl_check_for_ct_kill(struct iwl_priv *priv);
121enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
122enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
123void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
124void iwl_tt_exit_ct_kill(struct iwl_priv *priv);
125void iwl_tt_handler(struct iwl_priv *priv);
126void iwl_tt_initialize(struct iwl_priv *priv);
127void iwl_tt_exit(struct iwl_priv *priv);
128
129#endif /* __iwl_tt_setting_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 69155aa448fb..5950184d9860 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -71,18 +71,6 @@ static const u8 tid_to_ac[] = {
71 2, 3, 3, 2, 1, 1, 0, 0 71 2, 3, 3, 2, 1, 1, 0, 0
72}; 72};
73 73
74static const u8 ac_to_fifo[] = {
75 IWL_TX_FIFO_VO,
76 IWL_TX_FIFO_VI,
77 IWL_TX_FIFO_BE,
78 IWL_TX_FIFO_BK,
79};
80
81static inline int get_fifo_from_ac(u8 ac)
82{
83 return ac_to_fifo[ac];
84}
85
86static inline int get_ac_from_tid(u16 tid) 74static inline int get_ac_from_tid(u16 tid)
87{ 75{
88 if (likely(tid < ARRAY_SIZE(tid_to_ac))) 76 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
@@ -92,10 +80,10 @@ static inline int get_ac_from_tid(u16 tid)
92 return -EINVAL; 80 return -EINVAL;
93} 81}
94 82
95static inline int get_fifo_from_tid(u16 tid) 83static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
96{ 84{
97 if (likely(tid < ARRAY_SIZE(tid_to_ac))) 85 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
98 return get_fifo_from_ac(tid_to_ac[tid]); 86 return ctx->ac_to_fifo[tid_to_ac[tid]];
99 87
100 /* no support for TIDs 8-15 yet */ 88 /* no support for TIDs 8-15 yet */
101 return -EINVAL; 89 return -EINVAL;
@@ -118,7 +106,7 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
118 106
119 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); 107 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
120 108
121 if (txq_id != IWL_CMD_QUEUE_NUM) { 109 if (txq_id != priv->cmd_queue) {
122 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; 110 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
123 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; 111 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
124 112
@@ -155,7 +143,7 @@ void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
155 143
156 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 144 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
157 145
158 if (txq_id != IWL_CMD_QUEUE_NUM) 146 if (txq_id != priv->cmd_queue)
159 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; 147 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
160 148
161 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 149 bc_ent = cpu_to_le16(1 | (sta_id << 12));
@@ -333,19 +321,15 @@ void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
333 iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask); 321 iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
334} 322}
335 323
336static inline int get_queue_from_ac(u16 ac)
337{
338 return ac;
339}
340
341/* 324/*
342 * handle build REPLY_TX command notification. 325 * handle build REPLY_TX command notification.
343 */ 326 */
344static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, 327static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
345 struct iwl_tx_cmd *tx_cmd, 328 struct sk_buff *skb,
346 struct ieee80211_tx_info *info, 329 struct iwl_tx_cmd *tx_cmd,
347 struct ieee80211_hdr *hdr, 330 struct ieee80211_tx_info *info,
348 u8 std_id) 331 struct ieee80211_hdr *hdr,
332 u8 std_id)
349{ 333{
350 __le16 fc = hdr->frame_control; 334 __le16 fc = hdr->frame_control;
351 __le32 tx_flags = tx_cmd->tx_flags; 335 __le32 tx_flags = tx_cmd->tx_flags;
@@ -365,6 +349,12 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
365 349
366 if (ieee80211_is_back_req(fc)) 350 if (ieee80211_is_back_req(fc))
367 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; 351 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
352 else if (info->band == IEEE80211_BAND_2GHZ &&
353 priv->cfg->advanced_bt_coexist &&
354 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
355 ieee80211_is_reassoc_req(fc) ||
356 skb->protocol == cpu_to_be16(ETH_P_PAE)))
357 tx_flags |= TX_CMD_FLG_IGNORE_BT;
368 358
369 359
370 tx_cmd->sta_id = std_id; 360 tx_cmd->sta_id = std_id;
@@ -454,7 +444,12 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
454 rate_flags |= RATE_MCS_CCK_MSK; 444 rate_flags |= RATE_MCS_CCK_MSK;
455 445
456 /* Set up antennas */ 446 /* Set up antennas */
457 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 447 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
448 /* operated as 1x1 in full concurrency mode */
449 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
450 first_antenna(priv->hw_params.valid_tx_ant));
451 } else
452 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
458 priv->hw_params.valid_tx_ant); 453 priv->hw_params.valid_tx_ant);
459 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 454 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
460 455
@@ -470,8 +465,8 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
470{ 465{
471 struct ieee80211_key_conf *keyconf = info->control.hw_key; 466 struct ieee80211_key_conf *keyconf = info->control.hw_key;
472 467
473 switch (keyconf->alg) { 468 switch (keyconf->cipher) {
474 case ALG_CCMP: 469 case WLAN_CIPHER_SUITE_CCMP:
475 tx_cmd->sec_ctl = TX_CMD_SEC_CCM; 470 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
476 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); 471 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
477 if (info->flags & IEEE80211_TX_CTL_AMPDU) 472 if (info->flags & IEEE80211_TX_CTL_AMPDU)
@@ -479,20 +474,20 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
479 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); 474 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
480 break; 475 break;
481 476
482 case ALG_TKIP: 477 case WLAN_CIPHER_SUITE_TKIP:
483 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; 478 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
484 ieee80211_get_tkip_key(keyconf, skb_frag, 479 ieee80211_get_tkip_key(keyconf, skb_frag,
485 IEEE80211_TKIP_P2_KEY, tx_cmd->key); 480 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
486 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); 481 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
487 break; 482 break;
488 483
489 case ALG_WEP: 484 case WLAN_CIPHER_SUITE_WEP104:
485 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
486 /* fall through */
487 case WLAN_CIPHER_SUITE_WEP40:
490 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | 488 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
491 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); 489 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
492 490
493 if (keyconf->keylen == WEP_KEY_LEN_128)
494 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
495
496 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); 491 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
497 492
498 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " 493 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
@@ -500,7 +495,7 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
500 break; 495 break;
501 496
502 default: 497 default:
503 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); 498 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
504 break; 499 break;
505 } 500 }
506} 501}
@@ -519,6 +514,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
519 struct iwl_device_cmd *out_cmd; 514 struct iwl_device_cmd *out_cmd;
520 struct iwl_cmd_meta *out_meta; 515 struct iwl_cmd_meta *out_meta;
521 struct iwl_tx_cmd *tx_cmd; 516 struct iwl_tx_cmd *tx_cmd;
517 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
522 int swq_id, txq_id; 518 int swq_id, txq_id;
523 dma_addr_t phys_addr; 519 dma_addr_t phys_addr;
524 dma_addr_t txcmd_phys; 520 dma_addr_t txcmd_phys;
@@ -533,6 +529,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
533 u8 *qc = NULL; 529 u8 *qc = NULL;
534 unsigned long flags; 530 unsigned long flags;
535 531
532 if (info->control.vif)
533 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
534
536 spin_lock_irqsave(&priv->lock, flags); 535 spin_lock_irqsave(&priv->lock, flags);
537 if (iwl_is_rfkill(priv)) { 536 if (iwl_is_rfkill(priv)) {
538 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); 537 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
@@ -553,7 +552,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
553 hdr_len = ieee80211_hdrlen(fc); 552 hdr_len = ieee80211_hdrlen(fc);
554 553
555 /* Find index into station table for destination station */ 554 /* Find index into station table for destination station */
556 sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta); 555 sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
557 if (sta_id == IWL_INVALID_STATION) { 556 if (sta_id == IWL_INVALID_STATION) {
558 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 557 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
559 hdr->addr1); 558 hdr->addr1);
@@ -565,8 +564,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
565 if (sta) 564 if (sta)
566 sta_priv = (void *)sta->drv_priv; 565 sta_priv = (void *)sta->drv_priv;
567 566
568 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && 567 if (sta_priv && sta_priv->asleep) {
569 sta_priv->asleep) {
570 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); 568 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
571 /* 569 /*
572 * This sends an asynchronous command to the device, 570 * This sends an asynchronous command to the device,
@@ -580,7 +578,20 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
580 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); 578 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
581 } 579 }
582 580
583 txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); 581 /*
582 * Send this frame after DTIM -- there's a special queue
583 * reserved for this for contexts that support AP mode.
584 */
585 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
586 txq_id = ctx->mcast_queue;
587 /*
588 * The microcode will clear the more data
589 * bit in the last frame it transmits.
590 */
591 hdr->frame_control |=
592 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
593 } else
594 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
584 595
585 /* irqs already disabled/saved above when locking priv->lock */ 596 /* irqs already disabled/saved above when locking priv->lock */
586 spin_lock(&priv->sta_lock); 597 spin_lock(&priv->sta_lock);
@@ -625,6 +636,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
625 /* Set up driver data for this TFD */ 636 /* Set up driver data for this TFD */
626 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 637 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
627 txq->txb[q->write_ptr].skb = skb; 638 txq->txb[q->write_ptr].skb = skb;
639 txq->txb[q->write_ptr].ctx = ctx;
628 640
629 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 641 /* Set up first empty entry in queue's array of Tx/cmd buffers */
630 out_cmd = txq->cmd[q->write_ptr]; 642 out_cmd = txq->cmd[q->write_ptr];
@@ -655,7 +667,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
655 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); 667 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
656 668
657 /* TODO need this for burst mode later on */ 669 /* TODO need this for burst mode later on */
658 iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); 670 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
659 iwl_dbg_log_tx_data_frame(priv, len, hdr); 671 iwl_dbg_log_tx_data_frame(priv, len, hdr);
660 672
661 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); 673 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
@@ -813,7 +825,7 @@ void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
813 /* Tx queues */ 825 /* Tx queues */
814 if (priv->txq) { 826 if (priv->txq) {
815 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 827 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
816 if (txq_id == IWL_CMD_QUEUE_NUM) 828 if (txq_id == priv->cmd_queue)
817 iwl_cmd_queue_free(priv); 829 iwl_cmd_queue_free(priv);
818 else 830 else
819 iwl_tx_queue_free(priv, txq_id); 831 iwl_tx_queue_free(priv, txq_id);
@@ -870,9 +882,9 @@ int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
870 882
871 spin_unlock_irqrestore(&priv->lock, flags); 883 spin_unlock_irqrestore(&priv->lock, flags);
872 884
873 /* Alloc and init all Tx queues, including the command queue (#4) */ 885 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
874 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 886 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
875 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 887 slots_num = (txq_id == priv->cmd_queue) ?
876 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 888 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
877 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 889 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
878 txq_id); 890 txq_id);
@@ -910,7 +922,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
910 922
911 /* Alloc and init all Tx queues, including the command queue (#4) */ 923 /* Alloc and init all Tx queues, including the command queue (#4) */
912 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 924 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
913 slots_num = txq_id == IWL_CMD_QUEUE_NUM ? 925 slots_num = txq_id == priv->cmd_queue ?
914 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 926 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
915 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); 927 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
916 } 928 }
@@ -968,7 +980,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
968 unsigned long flags; 980 unsigned long flags;
969 struct iwl_tid_data *tid_data; 981 struct iwl_tid_data *tid_data;
970 982
971 tx_fifo = get_fifo_from_tid(tid); 983 tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
972 if (unlikely(tx_fifo < 0)) 984 if (unlikely(tx_fifo < 0))
973 return tx_fifo; 985 return tx_fifo;
974 986
@@ -1024,12 +1036,12 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
1024int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, 1036int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1025 struct ieee80211_sta *sta, u16 tid) 1037 struct ieee80211_sta *sta, u16 tid)
1026{ 1038{
1027 int tx_fifo_id, txq_id, sta_id, ssn = -1; 1039 int tx_fifo_id, txq_id, sta_id, ssn;
1028 struct iwl_tid_data *tid_data; 1040 struct iwl_tid_data *tid_data;
1029 int write_ptr, read_ptr; 1041 int write_ptr, read_ptr;
1030 unsigned long flags; 1042 unsigned long flags;
1031 1043
1032 tx_fifo_id = get_fifo_from_tid(tid); 1044 tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
1033 if (unlikely(tx_fifo_id < 0)) 1045 if (unlikely(tx_fifo_id < 0))
1034 return tx_fifo_id; 1046 return tx_fifo_id;
1035 1047
@@ -1042,21 +1054,26 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1042 1054
1043 spin_lock_irqsave(&priv->sta_lock, flags); 1055 spin_lock_irqsave(&priv->sta_lock, flags);
1044 1056
1045 if (priv->stations[sta_id].tid[tid].agg.state ==
1046 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1047 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1048 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1049 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1050 spin_unlock_irqrestore(&priv->sta_lock, flags);
1051 return 0;
1052 }
1053
1054 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1055 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1056
1057 tid_data = &priv->stations[sta_id].tid[tid]; 1057 tid_data = &priv->stations[sta_id].tid[tid];
1058 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 1058 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1059 txq_id = tid_data->agg.txq_id; 1059 txq_id = tid_data->agg.txq_id;
1060
1061 switch (priv->stations[sta_id].tid[tid].agg.state) {
1062 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1063 /*
1064 * This can happen if the peer stops aggregation
1065 * again before we've had a chance to drain the
1066 * queue we selected previously, i.e. before the
1067 * session was really started completely.
1068 */
1069 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1070 goto turn_off;
1071 case IWL_AGG_ON:
1072 break;
1073 default:
1074 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1075 }
1076
1060 write_ptr = priv->txq[txq_id].q.write_ptr; 1077 write_ptr = priv->txq[txq_id].q.write_ptr;
1061 read_ptr = priv->txq[txq_id].q.read_ptr; 1078 read_ptr = priv->txq[txq_id].q.read_ptr;
1062 1079
@@ -1070,6 +1087,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1070 } 1087 }
1071 1088
1072 IWL_DEBUG_HT(priv, "HW queue is empty\n"); 1089 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1090 turn_off:
1073 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 1091 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1074 1092
1075 /* do not restore/save irqs */ 1093 /* do not restore/save irqs */
@@ -1098,6 +1116,9 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1098 struct iwl_queue *q = &priv->txq[txq_id].q; 1116 struct iwl_queue *q = &priv->txq[txq_id].q;
1099 u8 *addr = priv->stations[sta_id].sta.sta.addr; 1117 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1100 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; 1118 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1119 struct iwl_rxon_context *ctx;
1120
1121 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1101 1122
1102 lockdep_assert_held(&priv->sta_lock); 1123 lockdep_assert_held(&priv->sta_lock);
1103 1124
@@ -1108,12 +1129,12 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1108 if ((txq_id == tid_data->agg.txq_id) && 1129 if ((txq_id == tid_data->agg.txq_id) &&
1109 (q->read_ptr == q->write_ptr)) { 1130 (q->read_ptr == q->write_ptr)) {
1110 u16 ssn = SEQ_TO_SN(tid_data->seq_number); 1131 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1111 int tx_fifo = get_fifo_from_tid(tid); 1132 int tx_fifo = get_fifo_from_tid(ctx, tid);
1112 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); 1133 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1113 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, 1134 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1114 ssn, tx_fifo); 1135 ssn, tx_fifo);
1115 tid_data->agg.state = IWL_AGG_OFF; 1136 tid_data->agg.state = IWL_AGG_OFF;
1116 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); 1137 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1117 } 1138 }
1118 break; 1139 break;
1119 case IWL_EMPTYING_HW_QUEUE_ADDBA: 1140 case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1121,7 +1142,7 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1121 if (tid_data->tfds_in_queue == 0) { 1142 if (tid_data->tfds_in_queue == 0) {
1122 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); 1143 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1123 tid_data->agg.state = IWL_AGG_ON; 1144 tid_data->agg.state = IWL_AGG_ON;
1124 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); 1145 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1125 } 1146 }
1126 break; 1147 break;
1127 } 1148 }
@@ -1129,14 +1150,14 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1129 return 0; 1150 return 0;
1130} 1151}
1131 1152
1132static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb) 1153static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info)
1133{ 1154{
1134 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1155 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1135 struct ieee80211_sta *sta; 1156 struct ieee80211_sta *sta;
1136 struct iwl_station_priv *sta_priv; 1157 struct iwl_station_priv *sta_priv;
1137 1158
1138 rcu_read_lock(); 1159 rcu_read_lock();
1139 sta = ieee80211_find_sta(priv->vif, hdr->addr1); 1160 sta = ieee80211_find_sta(tx_info->ctx->vif, hdr->addr1);
1140 if (sta) { 1161 if (sta) {
1141 sta_priv = (void *)sta->drv_priv; 1162 sta_priv = (void *)sta->drv_priv;
1142 /* avoid atomic ops if this isn't a client */ 1163 /* avoid atomic ops if this isn't a client */
@@ -1146,7 +1167,7 @@ static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1146 } 1167 }
1147 rcu_read_unlock(); 1168 rcu_read_unlock();
1148 1169
1149 ieee80211_tx_status_irqsafe(priv->hw, skb); 1170 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1150} 1171}
1151 1172
1152int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) 1173int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
@@ -1169,7 +1190,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1169 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1190 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1170 1191
1171 tx_info = &txq->txb[txq->q.read_ptr]; 1192 tx_info = &txq->txb[txq->q.read_ptr];
1172 iwlagn_tx_status(priv, tx_info->skb); 1193 iwlagn_tx_status(priv, tx_info);
1173 1194
1174 hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1195 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1175 if (hdr && ieee80211_is_data_qos(hdr->frame_control)) 1196 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 6f77441cb65a..8bfb0495a76b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -52,6 +52,19 @@ static const s8 iwlagn_default_queue_to_tx_fifo[] = {
52 IWL_TX_FIFO_UNUSED, 52 IWL_TX_FIFO_UNUSED,
53}; 53};
54 54
55static const s8 iwlagn_ipan_queue_to_tx_fifo[] = {
56 IWL_TX_FIFO_VO,
57 IWL_TX_FIFO_VI,
58 IWL_TX_FIFO_BE,
59 IWL_TX_FIFO_BK,
60 IWL_TX_FIFO_BK_IPAN,
61 IWL_TX_FIFO_BE_IPAN,
62 IWL_TX_FIFO_VI_IPAN,
63 IWL_TX_FIFO_VO_IPAN,
64 IWL_TX_FIFO_BE_IPAN,
65 IWLAGN_CMD_FIFO_NUM,
66};
67
55static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { 68static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
56 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP, 69 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
57 0, COEX_UNASSOC_IDLE_FLAGS}, 70 0, COEX_UNASSOC_IDLE_FLAGS},
@@ -294,6 +307,17 @@ void iwlagn_init_alive_start(struct iwl_priv *priv)
294 goto restart; 307 goto restart;
295 } 308 }
296 309
310 if (priv->cfg->advanced_bt_coexist) {
311 /*
312 * Tell uCode we are ready to perform calibration
313 * need to perform this before any calibration
314 * no need to close the envlope since we are going
315 * to load the runtime uCode later.
316 */
317 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
318 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
319
320 }
297 iwlagn_send_calib_cfg(priv); 321 iwlagn_send_calib_cfg(priv);
298 return; 322 return;
299 323
@@ -329,8 +353,54 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
329 sizeof(coex_cmd), &coex_cmd); 353 sizeof(coex_cmd), &coex_cmd);
330} 354}
331 355
356static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
357 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
358 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
359 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
360 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
361 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
362 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
363 ((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
364 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
365 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
366 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
367 ((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
368 (1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
369 ((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
370 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
371 ((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
372 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
373 ((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
374 (0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
375 0, 0, 0, 0, 0, 0, 0
376};
377
378void iwlagn_send_prio_tbl(struct iwl_priv *priv)
379{
380 struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
381
382 memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
383 sizeof(iwlagn_bt_prio_tbl));
384 if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PRIO_TABLE,
385 sizeof(prio_tbl_cmd), &prio_tbl_cmd))
386 IWL_ERR(priv, "failed to send BT prio tbl command\n");
387}
388
389void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
390{
391 struct iwl_bt_coex_prot_env_cmd env_cmd;
392
393 env_cmd.action = action;
394 env_cmd.type = type;
395 if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PROT_ENV,
396 sizeof(env_cmd), &env_cmd))
397 IWL_ERR(priv, "failed to send BT env command\n");
398}
399
400
332int iwlagn_alive_notify(struct iwl_priv *priv) 401int iwlagn_alive_notify(struct iwl_priv *priv)
333{ 402{
403 const s8 *queues;
334 u32 a; 404 u32 a;
335 unsigned long flags; 405 unsigned long flags;
336 int i, chan; 406 int i, chan;
@@ -365,7 +435,7 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
365 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 435 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
366 436
367 iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, 437 iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
368 IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num)); 438 IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
369 iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0); 439 iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
370 440
371 /* initiate the queues */ 441 /* initiate the queues */
@@ -391,7 +461,13 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
391 /* Activate all Tx DMA/FIFO channels */ 461 /* Activate all Tx DMA/FIFO channels */
392 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); 462 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
393 463
394 iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 464 /* map queues to FIFOs */
465 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
466 queues = iwlagn_ipan_queue_to_tx_fifo;
467 else
468 queues = iwlagn_default_queue_to_tx_fifo;
469
470 iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
395 471
396 /* make sure all queue are not stopped */ 472 /* make sure all queue are not stopped */
397 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); 473 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
@@ -400,11 +476,12 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
400 476
401 /* reset to 0 to enable all the queue first */ 477 /* reset to 0 to enable all the queue first */
402 priv->txq_ctx_active_msk = 0; 478 priv->txq_ctx_active_msk = 0;
403 /* map qos queues to fifos one-to-one */ 479
404 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10); 480 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
481 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
405 482
406 for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) { 483 for (i = 0; i < 10; i++) {
407 int ac = iwlagn_default_queue_to_tx_fifo[i]; 484 int ac = queues[i];
408 485
409 iwl_txq_ctx_activate(priv, i); 486 iwl_txq_ctx_activate(priv, i);
410 487
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 10d7b9b7f064..646864a26eaf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38#include <linux/delay.h> 39#include <linux/delay.h>
@@ -86,6 +87,9 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
86MODULE_LICENSE("GPL"); 87MODULE_LICENSE("GPL");
87MODULE_ALIAS("iwl4965"); 88MODULE_ALIAS("iwl4965");
88 89
90static int iwlagn_ant_coupling;
91static bool iwlagn_bt_ch_announce = 1;
92
89/** 93/**
90 * iwl_commit_rxon - commit staging_rxon to hardware 94 * iwl_commit_rxon - commit staging_rxon to hardware
91 * 95 *
@@ -94,21 +98,25 @@ MODULE_ALIAS("iwl4965");
94 * function correctly transitions out of the RXON_ASSOC_MSK state if 98 * function correctly transitions out of the RXON_ASSOC_MSK state if
95 * a HW tune is required based on the RXON structure changes. 99 * a HW tune is required based on the RXON structure changes.
96 */ 100 */
97int iwl_commit_rxon(struct iwl_priv *priv) 101int iwl_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
98{ 102{
99 /* cast away the const for active_rxon in this function */ 103 /* cast away the const for active_rxon in this function */
100 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 104 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
101 int ret; 105 int ret;
102 bool new_assoc = 106 bool new_assoc =
103 !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK); 107 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
108 bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
104 109
105 if (!iwl_is_alive(priv)) 110 if (!iwl_is_alive(priv))
106 return -EBUSY; 111 return -EBUSY;
107 112
113 if (!ctx->is_active)
114 return 0;
115
108 /* always get timestamp with Rx frame */ 116 /* always get timestamp with Rx frame */
109 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; 117 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
110 118
111 ret = iwl_check_rxon_cmd(priv); 119 ret = iwl_check_rxon_cmd(priv, ctx);
112 if (ret) { 120 if (ret) {
113 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); 121 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
114 return -EINVAL; 122 return -EINVAL;
@@ -119,7 +127,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
119 * abort any previous channel switch if still in process 127 * abort any previous channel switch if still in process
120 */ 128 */
121 if (priv->switch_rxon.switch_in_progress && 129 if (priv->switch_rxon.switch_in_progress &&
122 (priv->switch_rxon.channel != priv->staging_rxon.channel)) { 130 (priv->switch_rxon.channel != ctx->staging.channel)) {
123 IWL_DEBUG_11H(priv, "abort channel switch on %d\n", 131 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
124 le16_to_cpu(priv->switch_rxon.channel)); 132 le16_to_cpu(priv->switch_rxon.channel));
125 iwl_chswitch_done(priv, false); 133 iwl_chswitch_done(priv, false);
@@ -128,15 +136,15 @@ int iwl_commit_rxon(struct iwl_priv *priv)
128 /* If we don't need to send a full RXON, we can use 136 /* If we don't need to send a full RXON, we can use
129 * iwl_rxon_assoc_cmd which is used to reconfigure filter 137 * iwl_rxon_assoc_cmd which is used to reconfigure filter
130 * and other flags for the current radio configuration. */ 138 * and other flags for the current radio configuration. */
131 if (!iwl_full_rxon_required(priv)) { 139 if (!iwl_full_rxon_required(priv, ctx)) {
132 ret = iwl_send_rxon_assoc(priv); 140 ret = iwl_send_rxon_assoc(priv, ctx);
133 if (ret) { 141 if (ret) {
134 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); 142 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
135 return ret; 143 return ret;
136 } 144 }
137 145
138 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 146 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
139 iwl_print_rx_config_cmd(priv); 147 iwl_print_rx_config_cmd(priv, ctx);
140 return 0; 148 return 0;
141 } 149 }
142 150
@@ -144,13 +152,13 @@ int iwl_commit_rxon(struct iwl_priv *priv)
144 * an RXON_ASSOC and the new config wants the associated mask enabled, 152 * an RXON_ASSOC and the new config wants the associated mask enabled,
145 * we must clear the associated from the active configuration 153 * we must clear the associated from the active configuration
146 * before we apply the new config */ 154 * before we apply the new config */
147 if (iwl_is_associated(priv) && new_assoc) { 155 if (iwl_is_associated_ctx(ctx) && new_assoc) {
148 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); 156 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
149 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 157 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
150 158
151 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 159 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
152 sizeof(struct iwl_rxon_cmd), 160 sizeof(struct iwl_rxon_cmd),
153 &priv->active_rxon); 161 active_rxon);
154 162
155 /* If the mask clearing failed then we set 163 /* If the mask clearing failed then we set
156 * active_rxon back to what it was previously */ 164 * active_rxon back to what it was previously */
@@ -159,9 +167,9 @@ int iwl_commit_rxon(struct iwl_priv *priv)
159 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); 167 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
160 return ret; 168 return ret;
161 } 169 }
162 iwl_clear_ucode_stations(priv); 170 iwl_clear_ucode_stations(priv, ctx);
163 iwl_restore_stations(priv); 171 iwl_restore_stations(priv, ctx);
164 ret = iwl_restore_default_wep_keys(priv); 172 ret = iwl_restore_default_wep_keys(priv, ctx);
165 if (ret) { 173 if (ret) {
166 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 174 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
167 return ret; 175 return ret;
@@ -173,47 +181,65 @@ int iwl_commit_rxon(struct iwl_priv *priv)
173 "* channel = %d\n" 181 "* channel = %d\n"
174 "* bssid = %pM\n", 182 "* bssid = %pM\n",
175 (new_assoc ? "" : "out"), 183 (new_assoc ? "" : "out"),
176 le16_to_cpu(priv->staging_rxon.channel), 184 le16_to_cpu(ctx->staging.channel),
177 priv->staging_rxon.bssid_addr); 185 ctx->staging.bssid_addr);
178 186
179 iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto); 187 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
188
189 if (!old_assoc) {
190 /*
191 * First of all, before setting associated, we need to
192 * send RXON timing so the device knows about the DTIM
193 * period and other timing values
194 */
195 ret = iwl_send_rxon_timing(priv, ctx);
196 if (ret) {
197 IWL_ERR(priv, "Error setting RXON timing!\n");
198 return ret;
199 }
200 }
201
202 if (priv->cfg->ops->hcmd->set_pan_params) {
203 ret = priv->cfg->ops->hcmd->set_pan_params(priv);
204 if (ret)
205 return ret;
206 }
180 207
181 /* Apply the new configuration 208 /* Apply the new configuration
182 * RXON unassoc clears the station table in uCode so restoration of 209 * RXON unassoc clears the station table in uCode so restoration of
183 * stations is needed after it (the RXON command) completes 210 * stations is needed after it (the RXON command) completes
184 */ 211 */
185 if (!new_assoc) { 212 if (!new_assoc) {
186 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 213 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
187 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); 214 sizeof(struct iwl_rxon_cmd), &ctx->staging);
188 if (ret) { 215 if (ret) {
189 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 216 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
190 return ret; 217 return ret;
191 } 218 }
192 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n"); 219 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
193 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 220 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
194 iwl_clear_ucode_stations(priv); 221 iwl_clear_ucode_stations(priv, ctx);
195 iwl_restore_stations(priv); 222 iwl_restore_stations(priv, ctx);
196 ret = iwl_restore_default_wep_keys(priv); 223 ret = iwl_restore_default_wep_keys(priv, ctx);
197 if (ret) { 224 if (ret) {
198 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); 225 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
199 return ret; 226 return ret;
200 } 227 }
201 } 228 }
202
203 priv->start_calib = 0;
204 if (new_assoc) { 229 if (new_assoc) {
230 priv->start_calib = 0;
205 /* Apply the new configuration 231 /* Apply the new configuration
206 * RXON assoc doesn't clear the station table in uCode, 232 * RXON assoc doesn't clear the station table in uCode,
207 */ 233 */
208 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 234 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
209 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); 235 sizeof(struct iwl_rxon_cmd), &ctx->staging);
210 if (ret) { 236 if (ret) {
211 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 237 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
212 return ret; 238 return ret;
213 } 239 }
214 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 240 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
215 } 241 }
216 iwl_print_rx_config_cmd(priv); 242 iwl_print_rx_config_cmd(priv, ctx);
217 243
218 iwl_init_sensitivity(priv); 244 iwl_init_sensitivity(priv);
219 245
@@ -230,10 +256,14 @@ int iwl_commit_rxon(struct iwl_priv *priv)
230 256
231void iwl_update_chain_flags(struct iwl_priv *priv) 257void iwl_update_chain_flags(struct iwl_priv *priv)
232{ 258{
259 struct iwl_rxon_context *ctx;
233 260
234 if (priv->cfg->ops->hcmd->set_rxon_chain) 261 if (priv->cfg->ops->hcmd->set_rxon_chain) {
235 priv->cfg->ops->hcmd->set_rxon_chain(priv); 262 for_each_context(priv, ctx) {
236 iwlcore_commit_rxon(priv); 263 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
264 iwlcore_commit_rxon(priv, ctx);
265 }
266 }
237} 267}
238 268
239static void iwl_clear_free_frames(struct iwl_priv *priv) 269static void iwl_clear_free_frames(struct iwl_priv *priv)
@@ -337,6 +367,13 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
337 * beacon contents. 367 * beacon contents.
338 */ 368 */
339 369
370 lockdep_assert_held(&priv->mutex);
371
372 if (!priv->beacon_ctx) {
373 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
374 return 0;
375 }
376
340 /* Initialize memory */ 377 /* Initialize memory */
341 tx_beacon_cmd = &frame->u.beacon; 378 tx_beacon_cmd = &frame->u.beacon;
342 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); 379 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
@@ -349,7 +386,7 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
349 386
350 /* Set up TX command fields */ 387 /* Set up TX command fields */
351 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); 388 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
352 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id; 389 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
353 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 390 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
354 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | 391 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
355 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; 392 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
@@ -359,7 +396,7 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
359 frame_size); 396 frame_size);
360 397
361 /* Set up packet rate and flags */ 398 /* Set up packet rate and flags */
362 rate = iwl_rate_get_lowest_plcp(priv); 399 rate = iwl_rate_get_lowest_plcp(priv, priv->beacon_ctx);
363 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 400 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
364 priv->hw_params.valid_tx_ant); 401 priv->hw_params.valid_tx_ant);
365 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 402 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
@@ -592,23 +629,84 @@ static void iwl_bg_beacon_update(struct work_struct *work)
592 container_of(work, struct iwl_priv, beacon_update); 629 container_of(work, struct iwl_priv, beacon_update);
593 struct sk_buff *beacon; 630 struct sk_buff *beacon;
594 631
595 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 632 mutex_lock(&priv->mutex);
596 beacon = ieee80211_beacon_get(priv->hw, priv->vif); 633 if (!priv->beacon_ctx) {
634 IWL_ERR(priv, "updating beacon w/o beacon context!\n");
635 goto out;
636 }
637
638 if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) {
639 /*
640 * The ucode will send beacon notifications even in
641 * IBSS mode, but we don't want to process them. But
642 * we need to defer the type check to here due to
643 * requiring locking around the beacon_ctx access.
644 */
645 goto out;
646 }
597 647
648 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
649 beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
598 if (!beacon) { 650 if (!beacon) {
599 IWL_ERR(priv, "update beacon failed\n"); 651 IWL_ERR(priv, "update beacon failed\n");
600 return; 652 goto out;
601 } 653 }
602 654
603 mutex_lock(&priv->mutex);
604 /* new beacon skb is allocated every time; dispose previous.*/ 655 /* new beacon skb is allocated every time; dispose previous.*/
605 if (priv->ibss_beacon) 656 if (priv->ibss_beacon)
606 dev_kfree_skb(priv->ibss_beacon); 657 dev_kfree_skb(priv->ibss_beacon);
607 658
608 priv->ibss_beacon = beacon; 659 priv->ibss_beacon = beacon;
609 mutex_unlock(&priv->mutex);
610 660
611 iwl_send_beacon_cmd(priv); 661 iwl_send_beacon_cmd(priv);
662 out:
663 mutex_unlock(&priv->mutex);
664}
665
666static void iwl_bg_bt_runtime_config(struct work_struct *work)
667{
668 struct iwl_priv *priv =
669 container_of(work, struct iwl_priv, bt_runtime_config);
670
671 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
672 return;
673
674 /* dont send host command if rf-kill is on */
675 if (!iwl_is_ready_rf(priv))
676 return;
677 priv->cfg->ops->hcmd->send_bt_config(priv);
678}
679
680static void iwl_bg_bt_full_concurrency(struct work_struct *work)
681{
682 struct iwl_priv *priv =
683 container_of(work, struct iwl_priv, bt_full_concurrency);
684 struct iwl_rxon_context *ctx;
685
686 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
687 return;
688
689 /* dont send host command if rf-kill is on */
690 if (!iwl_is_ready_rf(priv))
691 return;
692
693 IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
694 priv->bt_full_concurrent ?
695 "full concurrency" : "3-wire");
696
697 /*
698 * LQ & RXON updated cmds must be sent before BT Config cmd
699 * to avoid 3-wire collisions
700 */
701 mutex_lock(&priv->mutex);
702 for_each_context(priv, ctx) {
703 if (priv->cfg->ops->hcmd->set_rxon_chain)
704 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
705 iwlcore_commit_rxon(priv, ctx);
706 }
707 mutex_unlock(&priv->mutex);
708
709 priv->cfg->ops->hcmd->send_bt_config(priv);
612} 710}
613 711
614/** 712/**
@@ -763,10 +861,10 @@ static void iwl_bg_ucode_trace(unsigned long data)
763static void iwl_rx_beacon_notif(struct iwl_priv *priv, 861static void iwl_rx_beacon_notif(struct iwl_priv *priv,
764 struct iwl_rx_mem_buffer *rxb) 862 struct iwl_rx_mem_buffer *rxb)
765{ 863{
766#ifdef CONFIG_IWLWIFI_DEBUG
767 struct iwl_rx_packet *pkt = rxb_addr(rxb); 864 struct iwl_rx_packet *pkt = rxb_addr(rxb);
768 struct iwl4965_beacon_notif *beacon = 865 struct iwl4965_beacon_notif *beacon =
769 (struct iwl4965_beacon_notif *)pkt->u.raw; 866 (struct iwl4965_beacon_notif *)pkt->u.raw;
867#ifdef CONFIG_IWLWIFI_DEBUG
770 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 868 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
771 869
772 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " 870 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
@@ -778,8 +876,9 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
778 le32_to_cpu(beacon->low_tsf), rate); 876 le32_to_cpu(beacon->low_tsf), rate);
779#endif 877#endif
780 878
781 if ((priv->iw_mode == NL80211_IFTYPE_AP) && 879 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
782 (!test_bit(STATUS_EXIT_PENDING, &priv->status))) 880
881 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
783 queue_work(priv->workqueue, &priv->beacon_update); 882 queue_work(priv->workqueue, &priv->beacon_update);
784} 883}
785 884
@@ -1181,7 +1280,6 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1181 IWL_ERR(priv, "Microcode SW error detected. " 1280 IWL_ERR(priv, "Microcode SW error detected. "
1182 " Restarting 0x%X.\n", inta); 1281 " Restarting 0x%X.\n", inta);
1183 priv->isr_stats.sw++; 1282 priv->isr_stats.sw++;
1184 priv->isr_stats.sw_err = inta;
1185 iwl_irq_handle_error(priv); 1283 iwl_irq_handle_error(priv);
1186 handled |= CSR_INT_BIT_SW_ERR; 1284 handled |= CSR_INT_BIT_SW_ERR;
1187 } 1285 }
@@ -1362,7 +1460,6 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1362 IWL_ERR(priv, "Microcode SW error detected. " 1460 IWL_ERR(priv, "Microcode SW error detected. "
1363 " Restarting 0x%X.\n", inta); 1461 " Restarting 0x%X.\n", inta);
1364 priv->isr_stats.sw++; 1462 priv->isr_stats.sw++;
1365 priv->isr_stats.sw_err = inta;
1366 iwl_irq_handle_error(priv); 1463 iwl_irq_handle_error(priv);
1367 handled |= CSR_INT_BIT_SW_ERR; 1464 handled |= CSR_INT_BIT_SW_ERR;
1368 } 1465 }
@@ -1650,30 +1747,44 @@ static void iwl_nic_start(struct iwl_priv *priv)
1650struct iwlagn_ucode_capabilities { 1747struct iwlagn_ucode_capabilities {
1651 u32 max_probe_length; 1748 u32 max_probe_length;
1652 u32 standard_phy_calibration_size; 1749 u32 standard_phy_calibration_size;
1750 bool pan;
1653}; 1751};
1654 1752
1655static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); 1753static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
1656static int iwl_mac_setup_register(struct iwl_priv *priv, 1754static int iwl_mac_setup_register(struct iwl_priv *priv,
1657 struct iwlagn_ucode_capabilities *capa); 1755 struct iwlagn_ucode_capabilities *capa);
1658 1756
1757#define UCODE_EXPERIMENTAL_INDEX 100
1758#define UCODE_EXPERIMENTAL_TAG "exp"
1759
1659static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first) 1760static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
1660{ 1761{
1661 const char *name_pre = priv->cfg->fw_name_pre; 1762 const char *name_pre = priv->cfg->fw_name_pre;
1763 char tag[8];
1662 1764
1663 if (first) 1765 if (first) {
1766#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
1767 priv->fw_index = UCODE_EXPERIMENTAL_INDEX;
1768 strcpy(tag, UCODE_EXPERIMENTAL_TAG);
1769 } else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
1770#endif
1664 priv->fw_index = priv->cfg->ucode_api_max; 1771 priv->fw_index = priv->cfg->ucode_api_max;
1665 else 1772 sprintf(tag, "%d", priv->fw_index);
1773 } else {
1666 priv->fw_index--; 1774 priv->fw_index--;
1775 sprintf(tag, "%d", priv->fw_index);
1776 }
1667 1777
1668 if (priv->fw_index < priv->cfg->ucode_api_min) { 1778 if (priv->fw_index < priv->cfg->ucode_api_min) {
1669 IWL_ERR(priv, "no suitable firmware found!\n"); 1779 IWL_ERR(priv, "no suitable firmware found!\n");
1670 return -ENOENT; 1780 return -ENOENT;
1671 } 1781 }
1672 1782
1673 sprintf(priv->firmware_name, "%s%d%s", 1783 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1674 name_pre, priv->fw_index, ".ucode");
1675 1784
1676 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n", 1785 IWL_DEBUG_INFO(priv, "attempting to load firmware %s'%s'\n",
1786 (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
1787 ? "EXPERIMENTAL " : "",
1677 priv->firmware_name); 1788 priv->firmware_name);
1678 1789
1679 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name, 1790 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
@@ -1874,6 +1985,11 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
1874 capa->max_probe_length = 1985 capa->max_probe_length =
1875 le32_to_cpup((__le32 *)tlv_data); 1986 le32_to_cpup((__le32 *)tlv_data);
1876 break; 1987 break;
1988 case IWL_UCODE_TLV_PAN:
1989 if (tlv_len)
1990 goto invalid_tlv_len;
1991 capa->pan = true;
1992 break;
1877 case IWL_UCODE_TLV_INIT_EVTLOG_PTR: 1993 case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
1878 if (tlv_len != sizeof(u32)) 1994 if (tlv_len != sizeof(u32))
1879 goto invalid_tlv_len; 1995 goto invalid_tlv_len;
@@ -1968,8 +2084,10 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1968 memset(&pieces, 0, sizeof(pieces)); 2084 memset(&pieces, 0, sizeof(pieces));
1969 2085
1970 if (!ucode_raw) { 2086 if (!ucode_raw) {
1971 IWL_ERR(priv, "request for firmware file '%s' failed.\n", 2087 if (priv->fw_index <= priv->cfg->ucode_api_max)
1972 priv->firmware_name); 2088 IWL_ERR(priv,
2089 "request for firmware file '%s' failed.\n",
2090 priv->firmware_name);
1973 goto try_again; 2091 goto try_again;
1974 } 2092 }
1975 2093
@@ -2016,7 +2134,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
2016 api_max, api_ver); 2134 api_max, api_ver);
2017 2135
2018 if (build) 2136 if (build)
2019 sprintf(buildstr, " build %u", build); 2137 sprintf(buildstr, " build %u%s", build,
2138 (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
2139 ? " (EXP)" : "");
2020 else 2140 else
2021 buildstr[0] = '\0'; 2141 buildstr[0] = '\0';
2022 2142
@@ -2145,6 +2265,12 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
2145 priv->_agn.inst_evtlog_size = priv->cfg->max_event_log_size; 2265 priv->_agn.inst_evtlog_size = priv->cfg->max_event_log_size;
2146 priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr; 2266 priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr;
2147 2267
2268 if (ucode_capa.pan) {
2269 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
2270 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
2271 } else
2272 priv->sta_key_max_num = STA_KEY_MAX_NUM;
2273
2148 /* Copy images into buffers for card's bus-master reads ... */ 2274 /* Copy images into buffers for card's bus-master reads ... */
2149 2275
2150 /* Runtime instructions (first block of data in file) */ 2276 /* Runtime instructions (first block of data in file) */
@@ -2341,6 +2467,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
2341 } 2467 }
2342 2468
2343 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); 2469 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
2470 priv->isr_stats.err_code = desc;
2344 pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32)); 2471 pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32));
2345 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); 2472 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
2346 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); 2473 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
@@ -2543,6 +2670,9 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2543 return pos; 2670 return pos;
2544 } 2671 }
2545 2672
2673 /* enable/disable bt channel announcement */
2674 priv->bt_ch_announce = iwlagn_bt_ch_announce;
2675
2546#ifdef CONFIG_IWLWIFI_DEBUG 2676#ifdef CONFIG_IWLWIFI_DEBUG
2547 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) 2677 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
2548 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) 2678 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
@@ -2589,6 +2719,52 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2589 return pos; 2719 return pos;
2590} 2720}
2591 2721
2722static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2723{
2724 struct iwl_ct_kill_config cmd;
2725 struct iwl_ct_kill_throttling_config adv_cmd;
2726 unsigned long flags;
2727 int ret = 0;
2728
2729 spin_lock_irqsave(&priv->lock, flags);
2730 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2731 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
2732 spin_unlock_irqrestore(&priv->lock, flags);
2733 priv->thermal_throttle.ct_kill_toggle = false;
2734
2735 if (priv->cfg->support_ct_kill_exit) {
2736 adv_cmd.critical_temperature_enter =
2737 cpu_to_le32(priv->hw_params.ct_kill_threshold);
2738 adv_cmd.critical_temperature_exit =
2739 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
2740
2741 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
2742 sizeof(adv_cmd), &adv_cmd);
2743 if (ret)
2744 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
2745 else
2746 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
2747 "succeeded, "
2748 "critical temperature enter is %d,"
2749 "exit is %d\n",
2750 priv->hw_params.ct_kill_threshold,
2751 priv->hw_params.ct_kill_exit_threshold);
2752 } else {
2753 cmd.critical_temperature_R =
2754 cpu_to_le32(priv->hw_params.ct_kill_threshold);
2755
2756 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
2757 sizeof(cmd), &cmd);
2758 if (ret)
2759 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
2760 else
2761 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
2762 "succeeded, "
2763 "critical temperature is %d\n",
2764 priv->hw_params.ct_kill_threshold);
2765 }
2766}
2767
2592/** 2768/**
2593 * iwl_alive_start - called after REPLY_ALIVE notification received 2769 * iwl_alive_start - called after REPLY_ALIVE notification received
2594 * from protocol/runtime uCode (initialization uCode's 2770 * from protocol/runtime uCode (initialization uCode's
@@ -2597,6 +2773,7 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2597static void iwl_alive_start(struct iwl_priv *priv) 2773static void iwl_alive_start(struct iwl_priv *priv)
2598{ 2774{
2599 int ret = 0; 2775 int ret = 0;
2776 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2600 2777
2601 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 2778 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2602 2779
@@ -2637,6 +2814,22 @@ static void iwl_alive_start(struct iwl_priv *priv)
2637 if (iwl_is_rfkill(priv)) 2814 if (iwl_is_rfkill(priv))
2638 return; 2815 return;
2639 2816
2817 if (priv->cfg->advanced_bt_coexist) {
2818 /* Configure Bluetooth device coexistence support */
2819 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
2820 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
2821 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
2822 priv->cfg->ops->hcmd->send_bt_config(priv);
2823 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
2824 if (bt_coex_active && priv->iw_mode != NL80211_IFTYPE_ADHOC)
2825 iwlagn_send_prio_tbl(priv);
2826
2827 /* FIXME: w/a to force change uCode BT state machine */
2828 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
2829 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
2830 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
2831 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
2832 }
2640 ieee80211_wake_queues(priv->hw); 2833 ieee80211_wake_queues(priv->hw);
2641 2834
2642 priv->active_rate = IWL_RATES_MASK; 2835 priv->active_rate = IWL_RATES_MASK;
@@ -2645,27 +2838,31 @@ static void iwl_alive_start(struct iwl_priv *priv)
2645 if (priv->cfg->ops->hcmd->set_tx_ant) 2838 if (priv->cfg->ops->hcmd->set_tx_ant)
2646 priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant); 2839 priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant);
2647 2840
2648 if (iwl_is_associated(priv)) { 2841 if (iwl_is_associated_ctx(ctx)) {
2649 struct iwl_rxon_cmd *active_rxon = 2842 struct iwl_rxon_cmd *active_rxon =
2650 (struct iwl_rxon_cmd *)&priv->active_rxon; 2843 (struct iwl_rxon_cmd *)&ctx->active;
2651 /* apply any changes in staging */ 2844 /* apply any changes in staging */
2652 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2845 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2653 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2846 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2654 } else { 2847 } else {
2848 struct iwl_rxon_context *tmp;
2655 /* Initialize our rx_config data */ 2849 /* Initialize our rx_config data */
2656 iwl_connection_init_rx_config(priv, NULL); 2850 for_each_context(priv, tmp)
2851 iwl_connection_init_rx_config(priv, tmp);
2657 2852
2658 if (priv->cfg->ops->hcmd->set_rxon_chain) 2853 if (priv->cfg->ops->hcmd->set_rxon_chain)
2659 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2854 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2660 } 2855 }
2661 2856
2662 /* Configure Bluetooth device coexistence support */ 2857 if (!priv->cfg->advanced_bt_coexist) {
2663 priv->cfg->ops->hcmd->send_bt_config(priv); 2858 /* Configure Bluetooth device coexistence support */
2859 priv->cfg->ops->hcmd->send_bt_config(priv);
2860 }
2664 2861
2665 iwl_reset_run_time_calib(priv); 2862 iwl_reset_run_time_calib(priv);
2666 2863
2667 /* Configure the adapter for unassociated operation */ 2864 /* Configure the adapter for unassociated operation */
2668 iwlcore_commit_rxon(priv); 2865 iwlcore_commit_rxon(priv, ctx);
2669 2866
2670 /* At this point, the NIC is initialized and operational */ 2867 /* At this point, the NIC is initialized and operational */
2671 iwl_rf_kill_ct_config(priv); 2868 iwl_rf_kill_ct_config(priv);
@@ -2695,13 +2892,26 @@ static void __iwl_down(struct iwl_priv *priv)
2695 2892
2696 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2893 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2697 2894
2698 if (!exit_pending) 2895 iwl_scan_cancel_timeout(priv, 200);
2699 set_bit(STATUS_EXIT_PENDING, &priv->status);
2700 2896
2701 iwl_clear_ucode_stations(priv); 2897 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2702 iwl_dealloc_bcast_station(priv); 2898
2899 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2900 * to prevent rearm timer */
2901 if (priv->cfg->ops->lib->recover_from_tx_stall)
2902 del_timer_sync(&priv->monitor_recover);
2903
2904 iwl_clear_ucode_stations(priv, NULL);
2905 iwl_dealloc_bcast_stations(priv);
2703 iwl_clear_driver_stations(priv); 2906 iwl_clear_driver_stations(priv);
2704 2907
2908 /* reset BT coex data */
2909 priv->bt_status = 0;
2910 priv->bt_traffic_load = priv->cfg->bt_init_traffic_load;
2911 priv->bt_sco_active = false;
2912 priv->bt_full_concurrent = false;
2913 priv->bt_ci_compliance = 0;
2914
2705 /* Unblock any waiting calls */ 2915 /* Unblock any waiting calls */
2706 wake_up_interruptible_all(&priv->wait_command_queue); 2916 wake_up_interruptible_all(&priv->wait_command_queue);
2707 2917
@@ -2834,6 +3044,7 @@ static int iwl_prepare_card_hw(struct iwl_priv *priv)
2834 3044
2835static int __iwl_up(struct iwl_priv *priv) 3045static int __iwl_up(struct iwl_priv *priv)
2836{ 3046{
3047 struct iwl_rxon_context *ctx;
2837 int i; 3048 int i;
2838 int ret; 3049 int ret;
2839 3050
@@ -2847,9 +3058,13 @@ static int __iwl_up(struct iwl_priv *priv)
2847 return -EIO; 3058 return -EIO;
2848 } 3059 }
2849 3060
2850 ret = iwl_alloc_bcast_station(priv, true); 3061 for_each_context(priv, ctx) {
2851 if (ret) 3062 ret = iwl_alloc_bcast_station(priv, ctx, true);
2852 return ret; 3063 if (ret) {
3064 iwl_dealloc_bcast_stations(priv);
3065 return ret;
3066 }
3067 }
2853 3068
2854 iwl_prepare_card_hw(priv); 3069 iwl_prepare_card_hw(priv);
2855 3070
@@ -2874,6 +3089,12 @@ static int __iwl_up(struct iwl_priv *priv)
2874 3089
2875 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 3090 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2876 3091
3092 /* must be initialised before iwl_hw_nic_init */
3093 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
3094 priv->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
3095 else
3096 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
3097
2877 ret = iwlagn_hw_nic_init(priv); 3098 ret = iwlagn_hw_nic_init(priv);
2878 if (ret) { 3099 if (ret) {
2879 IWL_ERR(priv, "Unable to init nic\n"); 3100 IWL_ERR(priv, "Unable to init nic\n");
@@ -3004,11 +3225,42 @@ static void iwl_bg_restart(struct work_struct *data)
3004 return; 3225 return;
3005 3226
3006 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { 3227 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
3228 struct iwl_rxon_context *ctx;
3229 bool bt_sco, bt_full_concurrent;
3230 u8 bt_ci_compliance;
3231 u8 bt_load;
3232 u8 bt_status;
3233
3007 mutex_lock(&priv->mutex); 3234 mutex_lock(&priv->mutex);
3008 priv->vif = NULL; 3235 for_each_context(priv, ctx)
3236 ctx->vif = NULL;
3009 priv->is_open = 0; 3237 priv->is_open = 0;
3238
3239 /*
3240 * __iwl_down() will clear the BT status variables,
3241 * which is correct, but when we restart we really
3242 * want to keep them so restore them afterwards.
3243 *
3244 * The restart process will later pick them up and
3245 * re-configure the hw when we reconfigure the BT
3246 * command.
3247 */
3248 bt_sco = priv->bt_sco_active;
3249 bt_full_concurrent = priv->bt_full_concurrent;
3250 bt_ci_compliance = priv->bt_ci_compliance;
3251 bt_load = priv->bt_traffic_load;
3252 bt_status = priv->bt_status;
3253
3254 __iwl_down(priv);
3255
3256 priv->bt_sco_active = bt_sco;
3257 priv->bt_full_concurrent = bt_full_concurrent;
3258 priv->bt_ci_compliance = bt_ci_compliance;
3259 priv->bt_traffic_load = bt_load;
3260 priv->bt_status = bt_status;
3261
3010 mutex_unlock(&priv->mutex); 3262 mutex_unlock(&priv->mutex);
3011 iwl_down(priv); 3263 iwl_cancel_deferred_work(priv);
3012 ieee80211_restart_hw(priv->hw); 3264 ieee80211_restart_hw(priv->hw);
3013 } else { 3265 } else {
3014 iwl_down(priv); 3266 iwl_down(priv);
@@ -3039,12 +3291,15 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
3039 3291
3040void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif) 3292void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3041{ 3293{
3294 struct iwl_rxon_context *ctx;
3042 struct ieee80211_conf *conf = NULL; 3295 struct ieee80211_conf *conf = NULL;
3043 int ret = 0; 3296 int ret = 0;
3044 3297
3045 if (!vif || !priv->is_open) 3298 if (!vif || !priv->is_open)
3046 return; 3299 return;
3047 3300
3301 ctx = iwl_rxon_ctx_from_vif(vif);
3302
3048 if (vif->type == NL80211_IFTYPE_AP) { 3303 if (vif->type == NL80211_IFTYPE_AP) {
3049 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); 3304 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
3050 return; 3305 return;
@@ -3057,44 +3312,42 @@ void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3057 3312
3058 conf = ieee80211_get_hw_conf(priv->hw); 3313 conf = ieee80211_get_hw_conf(priv->hw);
3059 3314
3060 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3315 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3061 iwlcore_commit_rxon(priv); 3316 iwlcore_commit_rxon(priv, ctx);
3062 3317
3063 iwl_setup_rxon_timing(priv, vif); 3318 ret = iwl_send_rxon_timing(priv, ctx);
3064 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3065 sizeof(priv->rxon_timing), &priv->rxon_timing);
3066 if (ret) 3319 if (ret)
3067 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3320 IWL_WARN(priv, "RXON timing - "
3068 "Attempting to continue.\n"); 3321 "Attempting to continue.\n");
3069 3322
3070 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3323 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3071 3324
3072 iwl_set_rxon_ht(priv, &priv->current_ht_config); 3325 iwl_set_rxon_ht(priv, &priv->current_ht_config);
3073 3326
3074 if (priv->cfg->ops->hcmd->set_rxon_chain) 3327 if (priv->cfg->ops->hcmd->set_rxon_chain)
3075 priv->cfg->ops->hcmd->set_rxon_chain(priv); 3328 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
3076 3329
3077 priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid); 3330 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
3078 3331
3079 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 3332 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3080 vif->bss_conf.aid, vif->bss_conf.beacon_int); 3333 vif->bss_conf.aid, vif->bss_conf.beacon_int);
3081 3334
3082 if (vif->bss_conf.use_short_preamble) 3335 if (vif->bss_conf.use_short_preamble)
3083 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3336 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3084 else 3337 else
3085 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 3338 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3086 3339
3087 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3340 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3088 if (vif->bss_conf.use_short_slot) 3341 if (vif->bss_conf.use_short_slot)
3089 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 3342 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3090 else 3343 else
3091 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3344 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3092 } 3345 }
3093 3346
3094 iwlcore_commit_rxon(priv); 3347 iwlcore_commit_rxon(priv, ctx);
3095 3348
3096 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3349 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3097 vif->bss_conf.aid, priv->active_rxon.bssid_addr); 3350 vif->bss_conf.aid, ctx->active.bssid_addr);
3098 3351
3099 switch (vif->type) { 3352 switch (vif->type) {
3100 case NL80211_IFTYPE_STATION: 3353 case NL80211_IFTYPE_STATION:
@@ -3137,11 +3390,14 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3137{ 3390{
3138 int ret; 3391 int ret;
3139 struct ieee80211_hw *hw = priv->hw; 3392 struct ieee80211_hw *hw = priv->hw;
3393 struct iwl_rxon_context *ctx;
3394
3140 hw->rate_control_algorithm = "iwl-agn-rs"; 3395 hw->rate_control_algorithm = "iwl-agn-rs";
3141 3396
3142 /* Tell mac80211 our characteristics */ 3397 /* Tell mac80211 our characteristics */
3143 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3398 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3144 IEEE80211_HW_AMPDU_AGGREGATION | 3399 IEEE80211_HW_AMPDU_AGGREGATION |
3400 IEEE80211_HW_NEED_DTIM_PERIOD |
3145 IEEE80211_HW_SPECTRUM_MGMT; 3401 IEEE80211_HW_SPECTRUM_MGMT;
3146 3402
3147 if (!priv->cfg->broken_powersave) 3403 if (!priv->cfg->broken_powersave)
@@ -3155,9 +3411,10 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3155 hw->sta_data_size = sizeof(struct iwl_station_priv); 3411 hw->sta_data_size = sizeof(struct iwl_station_priv);
3156 hw->vif_data_size = sizeof(struct iwl_vif_priv); 3412 hw->vif_data_size = sizeof(struct iwl_vif_priv);
3157 3413
3158 hw->wiphy->interface_modes = 3414 for_each_context(priv, ctx) {
3159 BIT(NL80211_IFTYPE_STATION) | 3415 hw->wiphy->interface_modes |= ctx->interface_modes;
3160 BIT(NL80211_IFTYPE_ADHOC); 3416 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
3417 }
3161 3418
3162 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 3419 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3163 WIPHY_FLAG_DISABLE_BEACON_HINTS; 3420 WIPHY_FLAG_DISABLE_BEACON_HINTS;
@@ -3247,15 +3504,6 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
3247 3504
3248 priv->is_open = 0; 3505 priv->is_open = 0;
3249 3506
3250 if (iwl_is_ready_rf(priv) || test_bit(STATUS_SCAN_HW, &priv->status)) {
3251 /* stop mac, cancel any scan request and clear
3252 * RXON_FILTER_ASSOC_MSK BIT
3253 */
3254 mutex_lock(&priv->mutex);
3255 iwl_scan_cancel_timeout(priv, 100);
3256 mutex_unlock(&priv->mutex);
3257 }
3258
3259 iwl_down(priv); 3507 iwl_down(priv);
3260 3508
3261 flush_workqueue(priv->workqueue); 3509 flush_workqueue(priv->workqueue);
@@ -3285,24 +3533,25 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3285 3533
3286void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) 3534void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3287{ 3535{
3536 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
3288 int ret = 0; 3537 int ret = 0;
3289 3538
3539 lockdep_assert_held(&priv->mutex);
3540
3290 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3541 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3291 return; 3542 return;
3292 3543
3293 /* The following should be done only at AP bring up */ 3544 /* The following should be done only at AP bring up */
3294 if (!iwl_is_associated(priv)) { 3545 if (!iwl_is_associated_ctx(ctx)) {
3295 3546
3296 /* RXON - unassoc (to set timing command) */ 3547 /* RXON - unassoc (to set timing command) */
3297 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3548 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3298 iwlcore_commit_rxon(priv); 3549 iwlcore_commit_rxon(priv, ctx);
3299 3550
3300 /* RXON Timing */ 3551 /* RXON Timing */
3301 iwl_setup_rxon_timing(priv, vif); 3552 ret = iwl_send_rxon_timing(priv, ctx);
3302 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3303 sizeof(priv->rxon_timing), &priv->rxon_timing);
3304 if (ret) 3553 if (ret)
3305 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3554 IWL_WARN(priv, "RXON timing failed - "
3306 "Attempting to continue.\n"); 3555 "Attempting to continue.\n");
3307 3556
3308 /* AP has all antennas */ 3557 /* AP has all antennas */
@@ -3310,28 +3559,30 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3310 priv->hw_params.valid_rx_ant; 3559 priv->hw_params.valid_rx_ant;
3311 iwl_set_rxon_ht(priv, &priv->current_ht_config); 3560 iwl_set_rxon_ht(priv, &priv->current_ht_config);
3312 if (priv->cfg->ops->hcmd->set_rxon_chain) 3561 if (priv->cfg->ops->hcmd->set_rxon_chain)
3313 priv->cfg->ops->hcmd->set_rxon_chain(priv); 3562 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
3314 3563
3315 priv->staging_rxon.assoc_id = 0; 3564 ctx->staging.assoc_id = 0;
3316 3565
3317 if (vif->bss_conf.use_short_preamble) 3566 if (vif->bss_conf.use_short_preamble)
3318 priv->staging_rxon.flags |= 3567 ctx->staging.flags |=
3319 RXON_FLG_SHORT_PREAMBLE_MSK; 3568 RXON_FLG_SHORT_PREAMBLE_MSK;
3320 else 3569 else
3321 priv->staging_rxon.flags &= 3570 ctx->staging.flags &=
3322 ~RXON_FLG_SHORT_PREAMBLE_MSK; 3571 ~RXON_FLG_SHORT_PREAMBLE_MSK;
3323 3572
3324 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3573 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3325 if (vif->bss_conf.use_short_slot) 3574 if (vif->bss_conf.use_short_slot)
3326 priv->staging_rxon.flags |= 3575 ctx->staging.flags |=
3327 RXON_FLG_SHORT_SLOT_MSK; 3576 RXON_FLG_SHORT_SLOT_MSK;
3328 else 3577 else
3329 priv->staging_rxon.flags &= 3578 ctx->staging.flags &=
3330 ~RXON_FLG_SHORT_SLOT_MSK; 3579 ~RXON_FLG_SHORT_SLOT_MSK;
3331 } 3580 }
3581 /* need to send beacon cmd before committing assoc RXON! */
3582 iwl_send_beacon_cmd(priv);
3332 /* restore RXON assoc */ 3583 /* restore RXON assoc */
3333 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3584 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3334 iwlcore_commit_rxon(priv); 3585 iwlcore_commit_rxon(priv, ctx);
3335 } 3586 }
3336 iwl_send_beacon_cmd(priv); 3587 iwl_send_beacon_cmd(priv);
3337 3588
@@ -3348,9 +3599,11 @@ static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
3348{ 3599{
3349 3600
3350 struct iwl_priv *priv = hw->priv; 3601 struct iwl_priv *priv = hw->priv;
3602 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
3603
3351 IWL_DEBUG_MAC80211(priv, "enter\n"); 3604 IWL_DEBUG_MAC80211(priv, "enter\n");
3352 3605
3353 iwl_update_tkip_key(priv, keyconf, sta, 3606 iwl_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
3354 iv32, phase1key); 3607 iv32, phase1key);
3355 3608
3356 IWL_DEBUG_MAC80211(priv, "leave\n"); 3609 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -3362,6 +3615,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3362 struct ieee80211_key_conf *key) 3615 struct ieee80211_key_conf *key)
3363{ 3616{
3364 struct iwl_priv *priv = hw->priv; 3617 struct iwl_priv *priv = hw->priv;
3618 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
3619 struct iwl_rxon_context *ctx = vif_priv->ctx;
3365 int ret; 3620 int ret;
3366 u8 sta_id; 3621 u8 sta_id;
3367 bool is_default_wep_key = false; 3622 bool is_default_wep_key = false;
@@ -3373,7 +3628,7 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3373 return -EOPNOTSUPP; 3628 return -EOPNOTSUPP;
3374 } 3629 }
3375 3630
3376 sta_id = iwl_sta_id_or_broadcast(priv, sta); 3631 sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
3377 if (sta_id == IWL_INVALID_STATION) 3632 if (sta_id == IWL_INVALID_STATION)
3378 return -EINVAL; 3633 return -EINVAL;
3379 3634
@@ -3386,9 +3641,11 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3386 * in 1X mode. 3641 * in 1X mode.
3387 * In legacy wep mode, we use another host command to the uCode. 3642 * In legacy wep mode, we use another host command to the uCode.
3388 */ 3643 */
3389 if (key->alg == ALG_WEP && !sta && vif->type != NL80211_IFTYPE_AP) { 3644 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3645 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3646 !sta) {
3390 if (cmd == SET_KEY) 3647 if (cmd == SET_KEY)
3391 is_default_wep_key = !priv->key_mapping_key; 3648 is_default_wep_key = !ctx->key_mapping_keys;
3392 else 3649 else
3393 is_default_wep_key = 3650 is_default_wep_key =
3394 (key->hw_key_idx == HW_KEY_DEFAULT); 3651 (key->hw_key_idx == HW_KEY_DEFAULT);
@@ -3397,17 +3654,18 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3397 switch (cmd) { 3654 switch (cmd) {
3398 case SET_KEY: 3655 case SET_KEY:
3399 if (is_default_wep_key) 3656 if (is_default_wep_key)
3400 ret = iwl_set_default_wep_key(priv, key); 3657 ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
3401 else 3658 else
3402 ret = iwl_set_dynamic_key(priv, key, sta_id); 3659 ret = iwl_set_dynamic_key(priv, vif_priv->ctx,
3660 key, sta_id);
3403 3661
3404 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); 3662 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
3405 break; 3663 break;
3406 case DISABLE_KEY: 3664 case DISABLE_KEY:
3407 if (is_default_wep_key) 3665 if (is_default_wep_key)
3408 ret = iwl_remove_default_wep_key(priv, key); 3666 ret = iwl_remove_default_wep_key(priv, ctx, key);
3409 else 3667 else
3410 ret = iwl_remove_dynamic_key(priv, key, sta_id); 3668 ret = iwl_remove_dynamic_key(priv, ctx, key, sta_id);
3411 3669
3412 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); 3670 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
3413 break; 3671 break;
@@ -3476,8 +3734,8 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3476 3734
3477 sta_priv->lq_sta.lq.general_params.flags &= 3735 sta_priv->lq_sta.lq.general_params.flags &=
3478 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; 3736 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
3479 iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq, 3737 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
3480 CMD_ASYNC, false); 3738 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
3481 } 3739 }
3482 break; 3740 break;
3483 case IEEE80211_AMPDU_TX_OPERATIONAL: 3741 case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -3492,8 +3750,8 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3492 3750
3493 sta_priv->lq_sta.lq.general_params.flags |= 3751 sta_priv->lq_sta.lq.general_params.flags |=
3494 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; 3752 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
3495 iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq, 3753 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
3496 CMD_ASYNC, false); 3754 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
3497 } 3755 }
3498 ret = 0; 3756 ret = 0;
3499 break; 3757 break;
@@ -3539,6 +3797,7 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3539{ 3797{
3540 struct iwl_priv *priv = hw->priv; 3798 struct iwl_priv *priv = hw->priv;
3541 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 3799 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
3800 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
3542 bool is_ap = vif->type == NL80211_IFTYPE_STATION; 3801 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3543 int ret; 3802 int ret;
3544 u8 sta_id; 3803 u8 sta_id;
@@ -3554,8 +3813,8 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3554 if (vif->type == NL80211_IFTYPE_AP) 3813 if (vif->type == NL80211_IFTYPE_AP)
3555 sta_priv->client = true; 3814 sta_priv->client = true;
3556 3815
3557 ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap, 3816 ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
3558 &sta_id); 3817 is_ap, sta, &sta_id);
3559 if (ret) { 3818 if (ret) {
3560 IWL_ERR(priv, "Unable to add station %pM (%d)\n", 3819 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3561 sta->addr, ret); 3820 sta->addr, ret);
@@ -3581,7 +3840,17 @@ static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
3581 struct iwl_priv *priv = hw->priv; 3840 struct iwl_priv *priv = hw->priv;
3582 const struct iwl_channel_info *ch_info; 3841 const struct iwl_channel_info *ch_info;
3583 struct ieee80211_conf *conf = &hw->conf; 3842 struct ieee80211_conf *conf = &hw->conf;
3843 struct ieee80211_channel *channel = ch_switch->channel;
3584 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 3844 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
3845 /*
3846 * MULTI-FIXME
3847 * When we add support for multiple interfaces, we need to
3848 * revisit this. The channel switch command in the device
3849 * only affects the BSS context, but what does that really
3850 * mean? And what if we get a CSA on the second interface?
3851 * This needs a lot of work.
3852 */
3853 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3585 u16 ch; 3854 u16 ch;
3586 unsigned long flags = 0; 3855 unsigned long flags = 0;
3587 3856
@@ -3594,7 +3863,7 @@ static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
3594 test_bit(STATUS_SCANNING, &priv->status)) 3863 test_bit(STATUS_SCANNING, &priv->status))
3595 goto out_exit; 3864 goto out_exit;
3596 3865
3597 if (!iwl_is_associated(priv)) 3866 if (!iwl_is_associated_ctx(ctx))
3598 goto out_exit; 3867 goto out_exit;
3599 3868
3600 /* channel switch in progress */ 3869 /* channel switch in progress */
@@ -3604,11 +3873,10 @@ static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
3604 mutex_lock(&priv->mutex); 3873 mutex_lock(&priv->mutex);
3605 if (priv->cfg->ops->lib->set_channel_switch) { 3874 if (priv->cfg->ops->lib->set_channel_switch) {
3606 3875
3607 ch = ieee80211_frequency_to_channel( 3876 ch = channel->hw_value;
3608 ch_switch->channel->center_freq); 3877 if (le16_to_cpu(ctx->active.channel) != ch) {
3609 if (le16_to_cpu(priv->active_rxon.channel) != ch) {
3610 ch_info = iwl_get_channel_info(priv, 3878 ch_info = iwl_get_channel_info(priv,
3611 conf->channel->band, 3879 channel->band,
3612 ch); 3880 ch);
3613 if (!is_channel_valid(ch_info)) { 3881 if (!is_channel_valid(ch_info)) {
3614 IWL_DEBUG_MAC80211(priv, "invalid channel\n"); 3882 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
@@ -3619,34 +3887,31 @@ static void iwl_mac_channel_switch(struct ieee80211_hw *hw,
3619 priv->current_ht_config.smps = conf->smps_mode; 3887 priv->current_ht_config.smps = conf->smps_mode;
3620 3888
3621 /* Configure HT40 channels */ 3889 /* Configure HT40 channels */
3622 ht_conf->is_ht = conf_is_ht(conf); 3890 ctx->ht.enabled = conf_is_ht(conf);
3623 if (ht_conf->is_ht) { 3891 if (ctx->ht.enabled) {
3624 if (conf_is_ht40_minus(conf)) { 3892 if (conf_is_ht40_minus(conf)) {
3625 ht_conf->extension_chan_offset = 3893 ctx->ht.extension_chan_offset =
3626 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 3894 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
3627 ht_conf->is_40mhz = true; 3895 ctx->ht.is_40mhz = true;
3628 } else if (conf_is_ht40_plus(conf)) { 3896 } else if (conf_is_ht40_plus(conf)) {
3629 ht_conf->extension_chan_offset = 3897 ctx->ht.extension_chan_offset =
3630 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 3898 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
3631 ht_conf->is_40mhz = true; 3899 ctx->ht.is_40mhz = true;
3632 } else { 3900 } else {
3633 ht_conf->extension_chan_offset = 3901 ctx->ht.extension_chan_offset =
3634 IEEE80211_HT_PARAM_CHA_SEC_NONE; 3902 IEEE80211_HT_PARAM_CHA_SEC_NONE;
3635 ht_conf->is_40mhz = false; 3903 ctx->ht.is_40mhz = false;
3636 } 3904 }
3637 } else 3905 } else
3638 ht_conf->is_40mhz = false; 3906 ctx->ht.is_40mhz = false;
3639 3907
3640 /* if we are switching from ht to 2.4 clear flags 3908 if ((le16_to_cpu(ctx->staging.channel) != ch))
3641 * from any ht related info since 2.4 does not 3909 ctx->staging.flags = 0;
3642 * support ht */
3643 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
3644 priv->staging_rxon.flags = 0;
3645 3910
3646 iwl_set_rxon_channel(priv, conf->channel); 3911 iwl_set_rxon_channel(priv, channel, ctx);
3647 iwl_set_rxon_ht(priv, ht_conf); 3912 iwl_set_rxon_ht(priv, ht_conf);
3648 iwl_set_flags_for_band(priv, conf->channel->band, 3913 iwl_set_flags_for_band(priv, ctx, channel->band,
3649 priv->vif); 3914 ctx->vif);
3650 spin_unlock_irqrestore(&priv->lock, flags); 3915 spin_unlock_irqrestore(&priv->lock, flags);
3651 3916
3652 iwl_set_rate(priv); 3917 iwl_set_rate(priv);
@@ -3663,7 +3928,7 @@ out:
3663 mutex_unlock(&priv->mutex); 3928 mutex_unlock(&priv->mutex);
3664out_exit: 3929out_exit:
3665 if (!priv->switch_rxon.switch_in_progress) 3930 if (!priv->switch_rxon.switch_in_progress)
3666 ieee80211_chswitch_done(priv->vif, false); 3931 ieee80211_chswitch_done(ctx->vif, false);
3667 IWL_DEBUG_MAC80211(priv, "leave\n"); 3932 IWL_DEBUG_MAC80211(priv, "leave\n");
3668} 3933}
3669 3934
@@ -3674,6 +3939,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3674{ 3939{
3675 struct iwl_priv *priv = hw->priv; 3940 struct iwl_priv *priv = hw->priv;
3676 __le32 filter_or = 0, filter_nand = 0; 3941 __le32 filter_or = 0, filter_nand = 0;
3942 struct iwl_rxon_context *ctx;
3677 3943
3678#define CHK(test, flag) do { \ 3944#define CHK(test, flag) do { \
3679 if (*total_flags & (test)) \ 3945 if (*total_flags & (test)) \
@@ -3693,10 +3959,11 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3693 3959
3694 mutex_lock(&priv->mutex); 3960 mutex_lock(&priv->mutex);
3695 3961
3696 priv->staging_rxon.filter_flags &= ~filter_nand; 3962 for_each_context(priv, ctx) {
3697 priv->staging_rxon.filter_flags |= filter_or; 3963 ctx->staging.filter_flags &= ~filter_nand;
3698 3964 ctx->staging.filter_flags |= filter_or;
3699 iwlcore_commit_rxon(priv); 3965 iwlcore_commit_rxon(priv, ctx);
3966 }
3700 3967
3701 mutex_unlock(&priv->mutex); 3968 mutex_unlock(&priv->mutex);
3702 3969
@@ -3765,6 +4032,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3765 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); 4032 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
3766 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); 4033 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
3767 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush); 4034 INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
4035 INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
4036 INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
3768 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); 4037 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
3769 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); 4038 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
3770 4039
@@ -3802,15 +4071,17 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3802 priv->cfg->ops->lib->cancel_deferred_work(priv); 4071 priv->cfg->ops->lib->cancel_deferred_work(priv);
3803 4072
3804 cancel_delayed_work_sync(&priv->init_alive_start); 4073 cancel_delayed_work_sync(&priv->init_alive_start);
3805 cancel_delayed_work(&priv->scan_check);
3806 cancel_work_sync(&priv->start_internal_scan);
3807 cancel_delayed_work(&priv->alive_start); 4074 cancel_delayed_work(&priv->alive_start);
3808 cancel_work_sync(&priv->run_time_calib_work); 4075 cancel_work_sync(&priv->run_time_calib_work);
3809 cancel_work_sync(&priv->beacon_update); 4076 cancel_work_sync(&priv->beacon_update);
4077
4078 iwl_cancel_scan_deferred_work(priv);
4079
4080 cancel_work_sync(&priv->bt_full_concurrency);
4081 cancel_work_sync(&priv->bt_runtime_config);
4082
3810 del_timer_sync(&priv->statistics_periodic); 4083 del_timer_sync(&priv->statistics_periodic);
3811 del_timer_sync(&priv->ucode_trace); 4084 del_timer_sync(&priv->ucode_trace);
3812 if (priv->cfg->ops->lib->recover_from_tx_stall)
3813 del_timer_sync(&priv->monitor_recover);
3814} 4085}
3815 4086
3816static void iwl_init_hw_rates(struct iwl_priv *priv, 4087static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3865,10 +4136,22 @@ static int iwl_init_drv(struct iwl_priv *priv)
3865 4136
3866 /* Choose which receivers/antennas to use */ 4137 /* Choose which receivers/antennas to use */
3867 if (priv->cfg->ops->hcmd->set_rxon_chain) 4138 if (priv->cfg->ops->hcmd->set_rxon_chain)
3868 priv->cfg->ops->hcmd->set_rxon_chain(priv); 4139 priv->cfg->ops->hcmd->set_rxon_chain(priv,
4140 &priv->contexts[IWL_RXON_CTX_BSS]);
3869 4141
3870 iwl_init_scan_params(priv); 4142 iwl_init_scan_params(priv);
3871 4143
4144 /* init bt coex */
4145 if (priv->cfg->advanced_bt_coexist) {
4146 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
4147 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
4148 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
4149 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
4150 priv->bt_duration = BT_DURATION_LIMIT_DEF;
4151 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
4152 priv->dynamic_agg_thresh = BT_AGG_THRESHOLD_DEF;
4153 }
4154
3872 /* Set the tx_power_user_lmt to the lowest power level 4155 /* Set the tx_power_user_lmt to the lowest power level
3873 * this value will get overwritten by channel max power avg 4156 * this value will get overwritten by channel max power avg
3874 * from eeprom */ 4157 * from eeprom */
@@ -3923,11 +4206,60 @@ static struct ieee80211_ops iwl_hw_ops = {
3923 .sta_remove = iwl_mac_sta_remove, 4206 .sta_remove = iwl_mac_sta_remove,
3924 .channel_switch = iwl_mac_channel_switch, 4207 .channel_switch = iwl_mac_channel_switch,
3925 .flush = iwl_mac_flush, 4208 .flush = iwl_mac_flush,
4209 .tx_last_beacon = iwl_mac_tx_last_beacon,
4210};
4211
4212static void iwl_hw_detect(struct iwl_priv *priv)
4213{
4214 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
4215 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
4216 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
4217 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
4218}
4219
4220static int iwl_set_hw_params(struct iwl_priv *priv)
4221{
4222 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
4223 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
4224 if (priv->cfg->mod_params->amsdu_size_8K)
4225 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
4226 else
4227 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
4228
4229 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
4230
4231 if (priv->cfg->mod_params->disable_11n)
4232 priv->cfg->sku &= ~IWL_SKU_N;
4233
4234 /* Device-specific setup */
4235 return priv->cfg->ops->lib->set_hw_params(priv);
4236}
4237
4238static const u8 iwlagn_bss_ac_to_fifo[] = {
4239 IWL_TX_FIFO_VO,
4240 IWL_TX_FIFO_VI,
4241 IWL_TX_FIFO_BE,
4242 IWL_TX_FIFO_BK,
4243};
4244
4245static const u8 iwlagn_bss_ac_to_queue[] = {
4246 0, 1, 2, 3,
4247};
4248
4249static const u8 iwlagn_pan_ac_to_fifo[] = {
4250 IWL_TX_FIFO_VO_IPAN,
4251 IWL_TX_FIFO_VI_IPAN,
4252 IWL_TX_FIFO_BE_IPAN,
4253 IWL_TX_FIFO_BK_IPAN,
4254};
4255
4256static const u8 iwlagn_pan_ac_to_queue[] = {
4257 7, 6, 5, 4,
3926}; 4258};
3927 4259
3928static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 4260static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3929{ 4261{
3930 int err = 0; 4262 int err = 0, i;
3931 struct iwl_priv *priv; 4263 struct iwl_priv *priv;
3932 struct ieee80211_hw *hw; 4264 struct ieee80211_hw *hw;
3933 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 4265 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
@@ -3955,6 +4287,53 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3955 priv = hw->priv; 4287 priv = hw->priv;
3956 /* At this point both hw and priv are allocated. */ 4288 /* At this point both hw and priv are allocated. */
3957 4289
4290 /*
4291 * The default context is always valid,
4292 * more may be discovered when firmware
4293 * is loaded.
4294 */
4295 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
4296
4297 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
4298 priv->contexts[i].ctxid = i;
4299
4300 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
4301 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
4302 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
4303 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
4304 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
4305 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
4306 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
4307 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
4308 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwlagn_bss_ac_to_fifo;
4309 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwlagn_bss_ac_to_queue;
4310 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
4311 BIT(NL80211_IFTYPE_ADHOC);
4312 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
4313 BIT(NL80211_IFTYPE_STATION);
4314 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
4315 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
4316 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
4317
4318 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
4319 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd = REPLY_WIPAN_RXON_TIMING;
4320 priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd = REPLY_WIPAN_RXON_ASSOC;
4321 priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
4322 priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
4323 priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
4324 priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
4325 priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
4326 priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo = iwlagn_pan_ac_to_fifo;
4327 priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue = iwlagn_pan_ac_to_queue;
4328 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
4329 priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
4330 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
4331 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
4332 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
4333 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
4334
4335 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
4336
3958 SET_IEEE80211_DEV(hw, &pdev->dev); 4337 SET_IEEE80211_DEV(hw, &pdev->dev);
3959 4338
3960 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); 4339 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
@@ -3962,12 +4341,23 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3962 priv->pci_dev = pdev; 4341 priv->pci_dev = pdev;
3963 priv->inta_mask = CSR_INI_SET_MASK; 4342 priv->inta_mask = CSR_INI_SET_MASK;
3964 4343
4344 /* is antenna coupling more than 35dB ? */
4345 priv->bt_ant_couple_ok =
4346 (iwlagn_ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
4347 true : false;
4348
4349 /* enable/disable bt channel announcement */
4350 priv->bt_ch_announce = iwlagn_bt_ch_announce;
4351
3965 if (iwl_alloc_traffic_mem(priv)) 4352 if (iwl_alloc_traffic_mem(priv))
3966 IWL_ERR(priv, "Not enough memory to generate traffic log\n"); 4353 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3967 4354
3968 /************************** 4355 /**************************
3969 * 2. Initializing PCI bus 4356 * 2. Initializing PCI bus
3970 **************************/ 4357 **************************/
4358 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
4359 PCIE_LINK_STATE_CLKPM);
4360
3971 if (pci_enable_device(pdev)) { 4361 if (pci_enable_device(pdev)) {
3972 err = -ENODEV; 4362 err = -ENODEV;
3973 goto out_ieee80211_free_hw; 4363 goto out_ieee80211_free_hw;
@@ -4492,3 +4882,11 @@ module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
4492 S_IRUGO); 4882 S_IRUGO);
4493MODULE_PARM_DESC(ucode_alternative, 4883MODULE_PARM_DESC(ucode_alternative,
4494 "specify ucode alternative to use from ucode file"); 4884 "specify ucode alternative to use from ucode file");
4885
4886module_param_named(antenna_coupling, iwlagn_ant_coupling, int, S_IRUGO);
4887MODULE_PARM_DESC(antenna_coupling,
4888 "specify antenna coupling in dB (defualt: 0 dB)");
4889
4890module_param_named(bt_ch_announce, iwlagn_bt_ch_announce, bool, S_IRUGO);
4891MODULE_PARM_DESC(bt_ch_announce,
4892 "Enable BT channel announcement mode (default: enable)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index cc6464dc72e5..a372184ac210 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -95,6 +95,7 @@ extern struct iwl_cfg iwl1000_bg_cfg;
95 95
96extern struct iwl_mod_params iwlagn_mod_params; 96extern struct iwl_mod_params iwlagn_mod_params;
97extern struct iwl_hcmd_ops iwlagn_hcmd; 97extern struct iwl_hcmd_ops iwlagn_hcmd;
98extern struct iwl_hcmd_ops iwlagn_bt_hcmd;
98extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils; 99extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
99 100
100int iwl_reset_ict(struct iwl_priv *priv); 101int iwl_reset_ict(struct iwl_priv *priv);
@@ -133,6 +134,8 @@ void iwlagn_rx_calib_complete(struct iwl_priv *priv,
133void iwlagn_init_alive_start(struct iwl_priv *priv); 134void iwlagn_init_alive_start(struct iwl_priv *priv);
134int iwlagn_alive_notify(struct iwl_priv *priv); 135int iwlagn_alive_notify(struct iwl_priv *priv);
135int iwl_verify_ucode(struct iwl_priv *priv); 136int iwl_verify_ucode(struct iwl_priv *priv);
137void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
138void iwlagn_send_prio_tbl(struct iwl_priv *priv);
136 139
137/* lib */ 140/* lib */
138void iwl_check_abort_status(struct iwl_priv *priv, 141void iwl_check_abort_status(struct iwl_priv *priv,
@@ -216,14 +219,28 @@ void iwl_reply_statistics(struct iwl_priv *priv,
216 struct iwl_rx_mem_buffer *rxb); 219 struct iwl_rx_mem_buffer *rxb);
217 220
218/* scan */ 221/* scan */
219void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); 222int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
220 223
221/* station mgmt */ 224/* station mgmt */
222int iwlagn_manage_ibss_station(struct iwl_priv *priv, 225int iwlagn_manage_ibss_station(struct iwl_priv *priv,
223 struct ieee80211_vif *vif, bool add); 226 struct ieee80211_vif *vif, bool add);
224 227
225/* hcmd */ 228/* hcmd */
226int iwlagn_send_rxon_assoc(struct iwl_priv *priv); 229int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
230 struct iwl_rxon_context *ctx);
227int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant); 231int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
228 232
233/* bt coex */
234void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
235void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
236 struct iwl_rx_mem_buffer *rxb);
237void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
238void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
239void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
240
241#ifdef CONFIG_IWLWIFI_DEBUG
242const char *iwl_get_agg_tx_fail_reason(u16 status);
243#else
244static inline const char *iwl_get_agg_tx_fail_reason(u16 status) { return ""; }
245#endif
229#endif /* __iwl_agn_h__ */ 246#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 60725a5c1b69..27e250c8d4b5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -62,7 +62,7 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63/* 63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions. 64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-4965-hw.h for hardware-related definitions. 65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions. 66 * Please use iwl-dev.h for driver implementation definitions.
67 */ 67 */
68 68
@@ -173,6 +173,23 @@ enum {
173 REPLY_RX_MPDU_CMD = 0xc1, 173 REPLY_RX_MPDU_CMD = 0xc1,
174 REPLY_RX = 0xc3, 174 REPLY_RX = 0xc3,
175 REPLY_COMPRESSED_BA = 0xc5, 175 REPLY_COMPRESSED_BA = 0xc5,
176
177 /* BT Coex */
178 REPLY_BT_COEX_PRIO_TABLE = 0xcc,
179 REPLY_BT_COEX_PROT_ENV = 0xcd,
180 REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
181 REPLY_BT_COEX_SCO = 0xcf,
182
183 /* PAN commands */
184 REPLY_WIPAN_PARAMS = 0xb2,
185 REPLY_WIPAN_RXON = 0xb3, /* use REPLY_RXON structure */
186 REPLY_WIPAN_RXON_TIMING = 0xb4, /* use REPLY_RXON_TIMING structure */
187 REPLY_WIPAN_RXON_ASSOC = 0xb6, /* use REPLY_RXON_ASSOC structure */
188 REPLY_WIPAN_QOS_PARAM = 0xb7, /* use REPLY_QOS_PARAM structure */
189 REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */
190 REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
191 REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
192
176 REPLY_MAX = 0xff 193 REPLY_MAX = 0xff
177}; 194};
178 195
@@ -600,6 +617,9 @@ enum {
600 RXON_DEV_TYPE_ESS = 3, 617 RXON_DEV_TYPE_ESS = 3,
601 RXON_DEV_TYPE_IBSS = 4, 618 RXON_DEV_TYPE_IBSS = 4,
602 RXON_DEV_TYPE_SNIFFER = 6, 619 RXON_DEV_TYPE_SNIFFER = 6,
620 RXON_DEV_TYPE_CP = 7,
621 RXON_DEV_TYPE_2STA = 8,
622 RXON_DEV_TYPE_P2P = 9,
603}; 623};
604 624
605 625
@@ -816,7 +836,8 @@ struct iwl_rxon_time_cmd {
816 __le16 atim_window; 836 __le16 atim_window;
817 __le32 beacon_init_val; 837 __le32 beacon_init_val;
818 __le16 listen_interval; 838 __le16 listen_interval;
819 __le16 reserved; 839 u8 dtim_period;
840 u8 delta_cp_bss_tbtts;
820} __packed; 841} __packed;
821 842
822/* 843/*
@@ -953,11 +974,13 @@ struct iwl_qosparam_cmd {
953 974
954/* Special, dedicated locations within device's station table */ 975/* Special, dedicated locations within device's station table */
955#define IWL_AP_ID 0 976#define IWL_AP_ID 0
977#define IWL_AP_ID_PAN 1
956#define IWL_STA_ID 2 978#define IWL_STA_ID 2
957#define IWL3945_BROADCAST_ID 24 979#define IWL3945_BROADCAST_ID 24
958#define IWL3945_STATION_COUNT 25 980#define IWL3945_STATION_COUNT 25
959#define IWL4965_BROADCAST_ID 31 981#define IWL4965_BROADCAST_ID 31
960#define IWL4965_STATION_COUNT 32 982#define IWL4965_STATION_COUNT 32
983#define IWLAGN_PAN_BCAST_ID 14
961#define IWLAGN_BROADCAST_ID 15 984#define IWLAGN_BROADCAST_ID 15
962#define IWLAGN_STATION_COUNT 16 985#define IWLAGN_STATION_COUNT 16
963 986
@@ -966,6 +989,7 @@ struct iwl_qosparam_cmd {
966 989
967#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) 990#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
968#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) 991#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
992#define STA_FLG_PAN_STATION cpu_to_le32(1 << 13)
969#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17) 993#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
970#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18) 994#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
971#define STA_FLG_MAX_AGG_SIZE_POS (19) 995#define STA_FLG_MAX_AGG_SIZE_POS (19)
@@ -994,6 +1018,7 @@ struct iwl_qosparam_cmd {
994#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000) 1018#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
995#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000) 1019#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
996#define STA_KEY_MAX_NUM 8 1020#define STA_KEY_MAX_NUM 8
1021#define STA_KEY_MAX_NUM_PAN 16
997 1022
998/* Flags indicate whether to modify vs. don't change various station params */ 1023/* Flags indicate whether to modify vs. don't change various station params */
999#define STA_MODIFY_KEY_MASK 0x01 1024#define STA_MODIFY_KEY_MASK 0x01
@@ -1056,7 +1081,8 @@ struct sta_id_modify {
1056 * 1081 *
1057 * The device contains an internal table of per-station information, 1082 * The device contains an internal table of per-station information,
1058 * with info on security keys, aggregation parameters, and Tx rates for 1083 * with info on security keys, aggregation parameters, and Tx rates for
1059 * initial Tx attempt and any retries (4965 uses REPLY_TX_LINK_QUALITY_CMD, 1084 * initial Tx attempt and any retries (agn devices uses
1085 * REPLY_TX_LINK_QUALITY_CMD,
1060 * 3945 uses REPLY_RATE_SCALE to set up rate tables). 1086 * 3945 uses REPLY_RATE_SCALE to set up rate tables).
1061 * 1087 *
1062 * REPLY_ADD_STA sets up the table entry for one station, either creating 1088 * REPLY_ADD_STA sets up the table entry for one station, either creating
@@ -1367,21 +1393,24 @@ struct iwl4965_rx_non_cfg_phy {
1367} __packed; 1393} __packed;
1368 1394
1369 1395
1370#define IWL50_RX_RES_PHY_CNT 8 1396#define IWLAGN_RX_RES_PHY_CNT 8
1371#define IWL50_RX_RES_AGC_IDX 1 1397#define IWLAGN_RX_RES_AGC_IDX 1
1372#define IWL50_RX_RES_RSSI_AB_IDX 2 1398#define IWLAGN_RX_RES_RSSI_AB_IDX 2
1373#define IWL50_RX_RES_RSSI_C_IDX 3 1399#define IWLAGN_RX_RES_RSSI_C_IDX 3
1374#define IWL50_OFDM_AGC_MSK 0xfe00 1400#define IWLAGN_OFDM_AGC_MSK 0xfe00
1375#define IWL50_OFDM_AGC_BIT_POS 9 1401#define IWLAGN_OFDM_AGC_BIT_POS 9
1376#define IWL50_OFDM_RSSI_A_MSK 0x00ff 1402#define IWLAGN_OFDM_RSSI_INBAND_A_BITMSK 0x00ff
1377#define IWL50_OFDM_RSSI_A_BIT_POS 0 1403#define IWLAGN_OFDM_RSSI_ALLBAND_A_BITMSK 0xff00
1378#define IWL50_OFDM_RSSI_B_MSK 0xff0000 1404#define IWLAGN_OFDM_RSSI_A_BIT_POS 0
1379#define IWL50_OFDM_RSSI_B_BIT_POS 16 1405#define IWLAGN_OFDM_RSSI_INBAND_B_BITMSK 0xff0000
1380#define IWL50_OFDM_RSSI_C_MSK 0x00ff 1406#define IWLAGN_OFDM_RSSI_ALLBAND_B_BITMSK 0xff000000
1381#define IWL50_OFDM_RSSI_C_BIT_POS 0 1407#define IWLAGN_OFDM_RSSI_B_BIT_POS 16
1408#define IWLAGN_OFDM_RSSI_INBAND_C_BITMSK 0x00ff
1409#define IWLAGN_OFDM_RSSI_ALLBAND_C_BITMSK 0xff00
1410#define IWLAGN_OFDM_RSSI_C_BIT_POS 0
1382 1411
1383struct iwl5000_non_cfg_phy { 1412struct iwlagn_non_cfg_phy {
1384 __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */ 1413 __le32 non_cfg_phy[IWLAGN_RX_RES_PHY_CNT]; /* up to 8 phy entries */
1385} __packed; 1414} __packed;
1386 1415
1387 1416
@@ -1401,7 +1430,7 @@ struct iwl_rx_phy_res {
1401 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ 1430 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1402 __le32 rate_n_flags; /* RATE_MCS_* */ 1431 __le32 rate_n_flags; /* RATE_MCS_* */
1403 __le16 byte_count; /* frame's byte-count */ 1432 __le16 byte_count; /* frame's byte-count */
1404 __le16 reserved3; 1433 __le16 frame_time; /* frame's time on the air */
1405} __packed; 1434} __packed;
1406 1435
1407struct iwl_rx_mpdu_res_start { 1436struct iwl_rx_mpdu_res_start {
@@ -1424,12 +1453,12 @@ struct iwl_rx_mpdu_res_start {
1424 * uCode handles all timing and protocol related to control frames 1453 * uCode handles all timing and protocol related to control frames
1425 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler 1454 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1426 * handle reception of block-acks; uCode updates the host driver via 1455 * handle reception of block-acks; uCode updates the host driver via
1427 * REPLY_COMPRESSED_BA (4965). 1456 * REPLY_COMPRESSED_BA.
1428 * 1457 *
1429 * uCode handles retrying Tx when an ACK is expected but not received. 1458 * uCode handles retrying Tx when an ACK is expected but not received.
1430 * This includes trying lower data rates than the one requested in the Tx 1459 * This includes trying lower data rates than the one requested in the Tx
1431 * command, as set up by the REPLY_RATE_SCALE (for 3945) or 1460 * command, as set up by the REPLY_RATE_SCALE (for 3945) or
1432 * REPLY_TX_LINK_QUALITY_CMD (4965). 1461 * REPLY_TX_LINK_QUALITY_CMD (agn).
1433 * 1462 *
1434 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. 1463 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
1435 * This command must be executed after every RXON command, before Tx can occur. 1464 * This command must be executed after every RXON command, before Tx can occur.
@@ -1465,7 +1494,7 @@ struct iwl_rx_mpdu_res_start {
1465 * Set this for unicast frames, but not broadcast/multicast. */ 1494 * Set this for unicast frames, but not broadcast/multicast. */
1466#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) 1495#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1467 1496
1468/* For 4965: 1497/* For agn devices:
1469 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). 1498 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
1470 * Tx command's initial_rate_index indicates first rate to try; 1499 * Tx command's initial_rate_index indicates first rate to try;
1471 * uCode walks through table for additional Tx attempts. 1500 * uCode walks through table for additional Tx attempts.
@@ -1484,7 +1513,7 @@ struct iwl_rx_mpdu_res_start {
1484 */ 1513 */
1485#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7) 1514#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1486 1515
1487/* Tx antenna selection field; used only for 3945, reserved (0) for 4965. 1516/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices.
1488 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */ 1517 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
1489#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00) 1518#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1490#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8) 1519#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
@@ -1791,13 +1820,8 @@ enum {
1791 TX_STATUS_FAIL_TID_DISABLE = 0x8d, 1820 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1792 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, 1821 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1793 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, 1822 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1794 /* uCode drop due to FW drop request */ 1823 TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
1795 TX_STATUS_FAIL_FW_DROP = 0x90, 1824 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1796 /*
1797 * uCode drop due to station color mismatch
1798 * between tx command and station table
1799 */
1800 TX_STATUS_FAIL_STA_COLOR_MISMATCH_DROP = 0x91,
1801}; 1825};
1802 1826
1803#define TX_PACKET_MODE_REGULAR 0x0000 1827#define TX_PACKET_MODE_REGULAR 0x0000
@@ -1839,6 +1863,9 @@ enum {
1839 AGG_TX_STATE_DELAY_TX_MSK = 0x400 1863 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1840}; 1864};
1841 1865
1866#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
1867#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
1868
1842#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \ 1869#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1843 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \ 1870 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \
1844 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK) 1871 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK)
@@ -1867,9 +1894,10 @@ enum {
1867 * frame in this new agg block failed in previous agg block(s). 1894 * frame in this new agg block failed in previous agg block(s).
1868 * 1895 *
1869 * Note that, for aggregation, ACK (block-ack) status is not delivered here; 1896 * Note that, for aggregation, ACK (block-ack) status is not delivered here;
1870 * block-ack has not been received by the time the 4965 records this status. 1897 * block-ack has not been received by the time the agn device records
1898 * this status.
1871 * This status relates to reasons the tx might have been blocked or aborted 1899 * This status relates to reasons the tx might have been blocked or aborted
1872 * within the sending station (this 4965), rather than whether it was 1900 * within the sending station (this agn device), rather than whether it was
1873 * received successfully by the destination station. 1901 * received successfully by the destination station.
1874 */ 1902 */
1875struct agg_tx_status { 1903struct agg_tx_status {
@@ -2092,8 +2120,8 @@ struct iwl_link_qual_general_params {
2092} __packed; 2120} __packed;
2093 2121
2094#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 2122#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
2095#define LINK_QUAL_AGG_TIME_LIMIT_MAX (65535) 2123#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
2096#define LINK_QUAL_AGG_TIME_LIMIT_MIN (0) 2124#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
2097 2125
2098#define LINK_QUAL_AGG_DISABLE_START_DEF (3) 2126#define LINK_QUAL_AGG_DISABLE_START_DEF (3)
2099#define LINK_QUAL_AGG_DISABLE_START_MAX (255) 2127#define LINK_QUAL_AGG_DISABLE_START_MAX (255)
@@ -2110,8 +2138,10 @@ struct iwl_link_qual_general_params {
2110 */ 2138 */
2111struct iwl_link_qual_agg_params { 2139struct iwl_link_qual_agg_params {
2112 2140
2113 /* Maximum number of uSec in aggregation. 2141 /*
2114 * Driver should set this to 4000 (4 milliseconds). */ 2142 *Maximum number of uSec in aggregation.
2143 * default set to 4000 (4 milliseconds) if not configured in .cfg
2144 */
2115 __le16 agg_time_limit; 2145 __le16 agg_time_limit;
2116 2146
2117 /* 2147 /*
@@ -2135,14 +2165,16 @@ struct iwl_link_qual_agg_params {
2135/* 2165/*
2136 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) 2166 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
2137 * 2167 *
2138 * For 4965 only; 3945 uses REPLY_RATE_SCALE. 2168 * For agn devices only; 3945 uses REPLY_RATE_SCALE.
2139 * 2169 *
2140 * Each station in the 4965's internal station table has its own table of 16 2170 * Each station in the agn device's internal station table has its own table
2171 * of 16
2141 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when 2172 * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
2142 * an ACK is not received. This command replaces the entire table for 2173 * an ACK is not received. This command replaces the entire table for
2143 * one station. 2174 * one station.
2144 * 2175 *
2145 * NOTE: Station must already be in 4965's station table. Use REPLY_ADD_STA. 2176 * NOTE: Station must already be in agn device's station table.
2177 * Use REPLY_ADD_STA.
2146 * 2178 *
2147 * The rate scaling procedures described below work well. Of course, other 2179 * The rate scaling procedures described below work well. Of course, other
2148 * procedures are possible, and may work better for particular environments. 2180 * procedures are possible, and may work better for particular environments.
@@ -2179,12 +2211,12 @@ struct iwl_link_qual_agg_params {
2179 * 2211 *
2180 * ACCUMULATING HISTORY 2212 * ACCUMULATING HISTORY
2181 * 2213 *
2182 * The rate scaling algorithm for 4965, as implemented in Linux driver, uses 2214 * The rate scaling algorithm for agn devices, as implemented in Linux driver,
2183 * two sets of frame Tx success history: One for the current/active modulation 2215 * uses two sets of frame Tx success history: One for the current/active
2184 * mode, and one for a speculative/search mode that is being attempted. If the 2216 * modulation mode, and one for a speculative/search mode that is being
2185 * speculative mode turns out to be more effective (i.e. actual transfer 2217 * attempted. If the speculative mode turns out to be more effective (i.e.
2186 * rate is better), then the driver continues to use the speculative mode 2218 * actual transfer rate is better), then the driver continues to use the
2187 * as the new current active mode. 2219 * speculative mode as the new current active mode.
2188 * 2220 *
2189 * Each history set contains, separately for each possible rate, data for a 2221 * Each history set contains, separately for each possible rate, data for a
2190 * sliding window of the 62 most recent tx attempts at that rate. The data 2222 * sliding window of the 62 most recent tx attempts at that rate. The data
@@ -2195,12 +2227,12 @@ struct iwl_link_qual_agg_params {
2195 * The driver uses the bit map to remove successes from the success sum, as 2227 * The driver uses the bit map to remove successes from the success sum, as
2196 * the oldest tx attempts fall out of the window. 2228 * the oldest tx attempts fall out of the window.
2197 * 2229 *
2198 * When the 4965 makes multiple tx attempts for a given frame, each attempt 2230 * When the agn device makes multiple tx attempts for a given frame, each
2199 * might be at a different rate, and have different modulation characteristics 2231 * attempt might be at a different rate, and have different modulation
2200 * (e.g. antenna, fat channel, short guard interval), as set up in the rate 2232 * characteristics (e.g. antenna, fat channel, short guard interval), as set
2201 * scaling table in the Link Quality command. The driver must determine 2233 * up in the rate scaling table in the Link Quality command. The driver must
2202 * which rate table entry was used for each tx attempt, to determine which 2234 * determine which rate table entry was used for each tx attempt, to determine
2203 * rate-specific history to update, and record only those attempts that 2235 * which rate-specific history to update, and record only those attempts that
2204 * match the modulation characteristics of the history set. 2236 * match the modulation characteristics of the history set.
2205 * 2237 *
2206 * When using block-ack (aggregation), all frames are transmitted at the same 2238 * When using block-ack (aggregation), all frames are transmitted at the same
@@ -2330,7 +2362,7 @@ struct iwl_link_quality_cmd {
2330 /* 2362 /*
2331 * Rate info; when using rate-scaling, Tx command's initial_rate_index 2363 * Rate info; when using rate-scaling, Tx command's initial_rate_index
2332 * specifies 1st Tx rate attempted, via index into this table. 2364 * specifies 1st Tx rate attempted, via index into this table.
2333 * 4965 works its way through table when retrying Tx. 2365 * agn devices works its way through table when retrying Tx.
2334 */ 2366 */
2335 struct { 2367 struct {
2336 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ 2368 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
@@ -2363,10 +2395,26 @@ struct iwl_link_quality_cmd {
2363#define BT_MAX_KILL_DEF (0x5) 2395#define BT_MAX_KILL_DEF (0x5)
2364#define BT_MAX_KILL_MAX (0xFF) 2396#define BT_MAX_KILL_MAX (0xFF)
2365 2397
2398#define BT_DURATION_LIMIT_DEF 625
2399#define BT_DURATION_LIMIT_MAX 1250
2400#define BT_DURATION_LIMIT_MIN 625
2401
2402#define BT_ON_THRESHOLD_DEF 4
2403#define BT_ON_THRESHOLD_MAX 1000
2404#define BT_ON_THRESHOLD_MIN 1
2405
2406#define BT_FRAG_THRESHOLD_DEF 0
2407#define BT_FRAG_THRESHOLD_MAX 0
2408#define BT_FRAG_THRESHOLD_MIN 0
2409
2410#define BT_AGG_THRESHOLD_DEF 0
2411#define BT_AGG_THRESHOLD_MAX 0
2412#define BT_AGG_THRESHOLD_MIN 0
2413
2366/* 2414/*
2367 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) 2415 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
2368 * 2416 *
2369 * 3945 and 4965 support hardware handshake with Bluetooth device on 2417 * 3945 and agn devices support hardware handshake with Bluetooth device on
2370 * same platform. Bluetooth device alerts wireless device when it will Tx; 2418 * same platform. Bluetooth device alerts wireless device when it will Tx;
2371 * wireless device can delay or kill its own Tx to accommodate. 2419 * wireless device can delay or kill its own Tx to accommodate.
2372 */ 2420 */
@@ -2379,6 +2427,79 @@ struct iwl_bt_cmd {
2379 __le32 kill_cts_mask; 2427 __le32 kill_cts_mask;
2380} __packed; 2428} __packed;
2381 2429
2430#define IWLAGN_BT_FLAG_CHANNEL_INHIBITION BIT(0)
2431
2432#define IWLAGN_BT_FLAG_COEX_MODE_MASK (BIT(3)|BIT(4)|BIT(5))
2433#define IWLAGN_BT_FLAG_COEX_MODE_SHIFT 3
2434#define IWLAGN_BT_FLAG_COEX_MODE_DISABLED 0
2435#define IWLAGN_BT_FLAG_COEX_MODE_LEGACY_2W 1
2436#define IWLAGN_BT_FLAG_COEX_MODE_3W 2
2437#define IWLAGN_BT_FLAG_COEX_MODE_4W 3
2438
2439#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6)
2440#define IWLAGN_BT_FLAG_NOCOEX_NOTIF BIT(7)
2441
2442#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
2443#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
2444#define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0
2445
2446#define IWLAGN_BT_MAX_KILL_DEFAULT 5
2447
2448#define IWLAGN_BT3_T7_DEFAULT 1
2449
2450#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffffffff)
2451#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffffffff)
2452
2453#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2
2454
2455#define IWLAGN_BT3_T2_DEFAULT 0xc
2456
2457#define IWLAGN_BT_VALID_ENABLE_FLAGS cpu_to_le16(BIT(0))
2458#define IWLAGN_BT_VALID_BOOST cpu_to_le16(BIT(1))
2459#define IWLAGN_BT_VALID_MAX_KILL cpu_to_le16(BIT(2))
2460#define IWLAGN_BT_VALID_3W_TIMERS cpu_to_le16(BIT(3))
2461#define IWLAGN_BT_VALID_KILL_ACK_MASK cpu_to_le16(BIT(4))
2462#define IWLAGN_BT_VALID_KILL_CTS_MASK cpu_to_le16(BIT(5))
2463#define IWLAGN_BT_VALID_BT4_TIMES cpu_to_le16(BIT(6))
2464#define IWLAGN_BT_VALID_3W_LUT cpu_to_le16(BIT(7))
2465
2466#define IWLAGN_BT_ALL_VALID_MSK (IWLAGN_BT_VALID_ENABLE_FLAGS | \
2467 IWLAGN_BT_VALID_BOOST | \
2468 IWLAGN_BT_VALID_MAX_KILL | \
2469 IWLAGN_BT_VALID_3W_TIMERS | \
2470 IWLAGN_BT_VALID_KILL_ACK_MASK | \
2471 IWLAGN_BT_VALID_KILL_CTS_MASK | \
2472 IWLAGN_BT_VALID_BT4_TIMES | \
2473 IWLAGN_BT_VALID_3W_LUT)
2474
2475struct iwlagn_bt_cmd {
2476 u8 flags;
2477 u8 ledtime; /* unused */
2478 u8 max_kill;
2479 u8 bt3_timer_t7_value;
2480 __le32 kill_ack_mask;
2481 __le32 kill_cts_mask;
2482 u8 bt3_prio_sample_time;
2483 u8 bt3_timer_t2_value;
2484 __le16 bt4_reaction_time; /* unused */
2485 __le32 bt3_lookup_table[12];
2486 __le16 bt4_decision_time; /* unused */
2487 __le16 valid;
2488 u8 prio_boost;
2489 /*
2490 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
2491 * if configure the following patterns
2492 */
2493 u8 tx_prio_boost; /* SW boost of WiFi tx priority */
2494 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
2495};
2496
2497#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0))
2498
2499struct iwlagn_bt_sco_cmd {
2500 __le32 flags;
2501};
2502
2382/****************************************************************************** 2503/******************************************************************************
2383 * (6) 2504 * (6)
2384 * Spectrum Management (802.11h) Commands, Responses, Notifications: 2505 * Spectrum Management (802.11h) Commands, Responses, Notifications:
@@ -2567,7 +2688,7 @@ struct iwl_powertable_cmd {
2567 2688
2568/* 2689/*
2569 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) 2690 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
2570 * 3945 and 4965 identical. 2691 * all devices identical.
2571 */ 2692 */
2572struct iwl_sleep_notification { 2693struct iwl_sleep_notification {
2573 u8 pm_sleep_mode; 2694 u8 pm_sleep_mode;
@@ -2578,7 +2699,7 @@ struct iwl_sleep_notification {
2578 __le32 bcon_timer; 2699 __le32 bcon_timer;
2579} __packed; 2700} __packed;
2580 2701
2581/* Sleep states. 3945 and 4965 identical. */ 2702/* Sleep states. all devices identical. */
2582enum { 2703enum {
2583 IWL_PM_NO_SLEEP = 0, 2704 IWL_PM_NO_SLEEP = 0,
2584 IWL_PM_SLP_MAC = 1, 2705 IWL_PM_SLP_MAC = 1,
@@ -2887,6 +3008,12 @@ struct iwl_scanstart_notification {
2887#define SCAN_OWNER_STATUS 0x1; 3008#define SCAN_OWNER_STATUS 0x1;
2888#define MEASURE_OWNER_STATUS 0x2; 3009#define MEASURE_OWNER_STATUS 0x2;
2889 3010
3011#define IWL_PROBE_STATUS_OK 0
3012#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
3013/* error statuses combined with TX_FAILED */
3014#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
3015#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
3016
2890#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ 3017#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
2891/* 3018/*
2892 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) 3019 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
@@ -2894,7 +3021,8 @@ struct iwl_scanstart_notification {
2894struct iwl_scanresults_notification { 3021struct iwl_scanresults_notification {
2895 u8 channel; 3022 u8 channel;
2896 u8 band; 3023 u8 band;
2897 u8 reserved[2]; 3024 u8 probe_status;
3025 u8 num_probe_not_sent; /* not enough time to send */
2898 __le32 tsf_low; 3026 __le32 tsf_low;
2899 __le32 tsf_high; 3027 __le32 tsf_high;
2900 __le32 statistics[NUMBER_OF_STATISTICS]; 3028 __le32 statistics[NUMBER_OF_STATISTICS];
@@ -2906,7 +3034,7 @@ struct iwl_scanresults_notification {
2906struct iwl_scancomplete_notification { 3034struct iwl_scancomplete_notification {
2907 u8 scanned_channels; 3035 u8 scanned_channels;
2908 u8 status; 3036 u8 status;
2909 u8 reserved; 3037 u8 bt_status; /* BT On/Off status */
2910 u8 last_channel; 3038 u8 last_channel;
2911 __le32 tsf_low; 3039 __le32 tsf_low;
2912 __le32 tsf_high; 3040 __le32 tsf_high;
@@ -2919,6 +3047,11 @@ struct iwl_scancomplete_notification {
2919 * 3047 *
2920 *****************************************************************************/ 3048 *****************************************************************************/
2921 3049
3050enum iwl_ibss_manager {
3051 IWL_NOT_IBSS_MANAGER = 0,
3052 IWL_IBSS_MANAGER = 1,
3053};
3054
2922/* 3055/*
2923 * BEACON_NOTIFICATION = 0x90 (notification only, not a command) 3056 * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
2924 */ 3057 */
@@ -3260,7 +3393,7 @@ struct statistics_general_bt {
3260 3393
3261/* 3394/*
3262 * REPLY_STATISTICS_CMD = 0x9c, 3395 * REPLY_STATISTICS_CMD = 0x9c,
3263 * 3945 and 4965 identical. 3396 * all devices identical.
3264 * 3397 *
3265 * This command triggers an immediate response containing uCode statistics. 3398 * This command triggers an immediate response containing uCode statistics.
3266 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. 3399 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
@@ -3598,7 +3731,7 @@ struct iwl_enhance_sensitivity_cmd {
3598/** 3731/**
3599 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) 3732 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
3600 * 3733 *
3601 * This command sets the relative gains of 4965's 3 radio receiver chains. 3734 * This command sets the relative gains of agn device's 3 radio receiver chains.
3602 * 3735 *
3603 * After the first association, driver should accumulate signal and noise 3736 * After the first association, driver should accumulate signal and noise
3604 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 3737 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
@@ -3955,6 +4088,201 @@ struct iwl_coex_event_resp {
3955 4088
3956 4089
3957/****************************************************************************** 4090/******************************************************************************
4091 * Bluetooth Coexistence commands
4092 *
4093 *****************************************************************************/
4094
4095/*
4096 * BT Status notification
4097 * REPLY_BT_COEX_PROFILE_NOTIF = 0xce
4098 */
4099enum iwl_bt_coex_profile_traffic_load {
4100 IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0,
4101 IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1,
4102 IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2,
4103 IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3,
4104/*
4105 * There are no more even though below is a u8, the
4106 * indication from the BT device only has two bits.
4107 */
4108};
4109
4110#define BT_UART_MSG_FRAME1MSGTYPE_POS (0)
4111#define BT_UART_MSG_FRAME1MSGTYPE_MSK \
4112 (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
4113#define BT_UART_MSG_FRAME1SSN_POS (3)
4114#define BT_UART_MSG_FRAME1SSN_MSK \
4115 (0x3 << BT_UART_MSG_FRAME1SSN_POS)
4116#define BT_UART_MSG_FRAME1UPDATEREQ_POS (5)
4117#define BT_UART_MSG_FRAME1UPDATEREQ_MSK \
4118 (0x1 << BT_UART_MSG_FRAME1UPDATEREQ_POS)
4119#define BT_UART_MSG_FRAME1RESERVED_POS (6)
4120#define BT_UART_MSG_FRAME1RESERVED_MSK \
4121 (0x3 << BT_UART_MSG_FRAME1RESERVED_POS)
4122
4123#define BT_UART_MSG_FRAME2OPENCONNECTIONS_POS (0)
4124#define BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK \
4125 (0x3 << BT_UART_MSG_FRAME2OPENCONNECTIONS_POS)
4126#define BT_UART_MSG_FRAME2TRAFFICLOAD_POS (2)
4127#define BT_UART_MSG_FRAME2TRAFFICLOAD_MSK \
4128 (0x3 << BT_UART_MSG_FRAME2TRAFFICLOAD_POS)
4129#define BT_UART_MSG_FRAME2CHLSEQN_POS (4)
4130#define BT_UART_MSG_FRAME2CHLSEQN_MSK \
4131 (0x1 << BT_UART_MSG_FRAME2CHLSEQN_POS)
4132#define BT_UART_MSG_FRAME2INBAND_POS (5)
4133#define BT_UART_MSG_FRAME2INBAND_MSK \
4134 (0x1 << BT_UART_MSG_FRAME2INBAND_POS)
4135#define BT_UART_MSG_FRAME2RESERVED_POS (6)
4136#define BT_UART_MSG_FRAME2RESERVED_MSK \
4137 (0x3 << BT_UART_MSG_FRAME2RESERVED_POS)
4138
4139#define BT_UART_MSG_FRAME3SCOESCO_POS (0)
4140#define BT_UART_MSG_FRAME3SCOESCO_MSK \
4141 (0x1 << BT_UART_MSG_FRAME3SCOESCO_POS)
4142#define BT_UART_MSG_FRAME3SNIFF_POS (1)
4143#define BT_UART_MSG_FRAME3SNIFF_MSK \
4144 (0x1 << BT_UART_MSG_FRAME3SNIFF_POS)
4145#define BT_UART_MSG_FRAME3A2DP_POS (2)
4146#define BT_UART_MSG_FRAME3A2DP_MSK \
4147 (0x1 << BT_UART_MSG_FRAME3A2DP_POS)
4148#define BT_UART_MSG_FRAME3ACL_POS (3)
4149#define BT_UART_MSG_FRAME3ACL_MSK \
4150 (0x1 << BT_UART_MSG_FRAME3ACL_POS)
4151#define BT_UART_MSG_FRAME3MASTER_POS (4)
4152#define BT_UART_MSG_FRAME3MASTER_MSK \
4153 (0x1 << BT_UART_MSG_FRAME3MASTER_POS)
4154#define BT_UART_MSG_FRAME3OBEX_POS (5)
4155#define BT_UART_MSG_FRAME3OBEX_MSK \
4156 (0x1 << BT_UART_MSG_FRAME3OBEX_POS)
4157#define BT_UART_MSG_FRAME3RESERVED_POS (6)
4158#define BT_UART_MSG_FRAME3RESERVED_MSK \
4159 (0x3 << BT_UART_MSG_FRAME3RESERVED_POS)
4160
4161#define BT_UART_MSG_FRAME4IDLEDURATION_POS (0)
4162#define BT_UART_MSG_FRAME4IDLEDURATION_MSK \
4163 (0x3F << BT_UART_MSG_FRAME4IDLEDURATION_POS)
4164#define BT_UART_MSG_FRAME4RESERVED_POS (6)
4165#define BT_UART_MSG_FRAME4RESERVED_MSK \
4166 (0x3 << BT_UART_MSG_FRAME4RESERVED_POS)
4167
4168#define BT_UART_MSG_FRAME5TXACTIVITY_POS (0)
4169#define BT_UART_MSG_FRAME5TXACTIVITY_MSK \
4170 (0x3 << BT_UART_MSG_FRAME5TXACTIVITY_POS)
4171#define BT_UART_MSG_FRAME5RXACTIVITY_POS (2)
4172#define BT_UART_MSG_FRAME5RXACTIVITY_MSK \
4173 (0x3 << BT_UART_MSG_FRAME5RXACTIVITY_POS)
4174#define BT_UART_MSG_FRAME5ESCORETRANSMIT_POS (4)
4175#define BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK \
4176 (0x3 << BT_UART_MSG_FRAME5ESCORETRANSMIT_POS)
4177#define BT_UART_MSG_FRAME5RESERVED_POS (6)
4178#define BT_UART_MSG_FRAME5RESERVED_MSK \
4179 (0x3 << BT_UART_MSG_FRAME5RESERVED_POS)
4180
4181#define BT_UART_MSG_FRAME6SNIFFINTERVAL_POS (0)
4182#define BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK \
4183 (0x1F << BT_UART_MSG_FRAME6SNIFFINTERVAL_POS)
4184#define BT_UART_MSG_FRAME6DISCOVERABLE_POS (5)
4185#define BT_UART_MSG_FRAME6DISCOVERABLE_MSK \
4186 (0x1 << BT_UART_MSG_FRAME6DISCOVERABLE_POS)
4187#define BT_UART_MSG_FRAME6RESERVED_POS (6)
4188#define BT_UART_MSG_FRAME6RESERVED_MSK \
4189 (0x3 << BT_UART_MSG_FRAME6RESERVED_POS)
4190
4191#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0)
4192#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \
4193 (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
4194#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS (3)
4195#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK \
4196 (0x3 << BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS)
4197#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5)
4198#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \
4199 (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
4200#define BT_UART_MSG_FRAME7RESERVED_POS (6)
4201#define BT_UART_MSG_FRAME7RESERVED_MSK \
4202 (0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
4203
4204
4205struct iwl_bt_uart_msg {
4206 u8 header;
4207 u8 frame1;
4208 u8 frame2;
4209 u8 frame3;
4210 u8 frame4;
4211 u8 frame5;
4212 u8 frame6;
4213 u8 frame7;
4214} __attribute__((packed));
4215
4216struct iwl_bt_coex_profile_notif {
4217 struct iwl_bt_uart_msg last_bt_uart_msg;
4218 u8 bt_status; /* 0 - off, 1 - on */
4219 u8 bt_traffic_load; /* 0 .. 3? */
4220 u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */
4221 u8 reserved;
4222} __attribute__((packed));
4223
4224#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0
4225#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1
4226#define IWL_BT_COEX_PRIO_TBL_PRIO_POS 1
4227#define IWL_BT_COEX_PRIO_TBL_PRIO_MASK 0x0e
4228#define IWL_BT_COEX_PRIO_TBL_RESERVED_POS 4
4229#define IWL_BT_COEX_PRIO_TBL_RESERVED_MASK 0xf0
4230#define IWL_BT_COEX_PRIO_TBL_PRIO_SHIFT 1
4231
4232/*
4233 * BT Coexistence Priority table
4234 * REPLY_BT_COEX_PRIO_TABLE = 0xcc
4235 */
4236enum bt_coex_prio_table_events {
4237 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
4238 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
4239 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
4240 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3, /* DC calib */
4241 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
4242 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
4243 BT_COEX_PRIO_TBL_EVT_DTIM = 6,
4244 BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
4245 BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
4246 BT_COEX_PRIO_TBL_EVT_RESERVED0 = 9,
4247 BT_COEX_PRIO_TBL_EVT_RESERVED1 = 10,
4248 BT_COEX_PRIO_TBL_EVT_RESERVED2 = 11,
4249 BT_COEX_PRIO_TBL_EVT_RESERVED3 = 12,
4250 BT_COEX_PRIO_TBL_EVT_RESERVED4 = 13,
4251 BT_COEX_PRIO_TBL_EVT_RESERVED5 = 14,
4252 BT_COEX_PRIO_TBL_EVT_RESERVED6 = 15,
4253 /* BT_COEX_PRIO_TBL_EVT_MAX should always be last */
4254 BT_COEX_PRIO_TBL_EVT_MAX,
4255};
4256
4257enum bt_coex_prio_table_priorities {
4258 BT_COEX_PRIO_TBL_DISABLED = 0,
4259 BT_COEX_PRIO_TBL_PRIO_LOW = 1,
4260 BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
4261 BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
4262 BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
4263 BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
4264 BT_COEX_PRIO_TBL_PRIO_RSRVD1 = 6,
4265 BT_COEX_PRIO_TBL_PRIO_RSRVD2 = 7,
4266 BT_COEX_PRIO_TBL_MAX,
4267};
4268
4269struct iwl_bt_coex_prio_table_cmd {
4270 u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
4271} __attribute__((packed));
4272
4273#define IWL_BT_COEX_ENV_CLOSE 0
4274#define IWL_BT_COEX_ENV_OPEN 1
4275/*
4276 * BT Protection Envelope
4277 * REPLY_BT_COEX_PROT_ENV = 0xcd
4278 */
4279struct iwl_bt_coex_prot_env_cmd {
4280 u8 action; /* 0 = closed, 1 = open */
4281 u8 type; /* 0 .. 15 */
4282 u8 reserved[2];
4283} __attribute__((packed));
4284
4285/******************************************************************************
3958 * (13) 4286 * (13)
3959 * Union of all expected notifications/responses: 4287 * Union of all expected notifications/responses:
3960 * 4288 *
@@ -3993,6 +4321,7 @@ struct iwl_rx_packet {
3993 struct iwl_missed_beacon_notif missed_beacon; 4321 struct iwl_missed_beacon_notif missed_beacon;
3994 struct iwl_coex_medium_notification coex_medium_notif; 4322 struct iwl_coex_medium_notification coex_medium_notif;
3995 struct iwl_coex_event_resp coex_event; 4323 struct iwl_coex_event_resp coex_event;
4324 struct iwl_bt_coex_profile_notif bt_coex_profile_notif;
3996 __le32 status; 4325 __le32 status;
3997 u8 raw[0]; 4326 u8 raw[0];
3998 } u; 4327 } u;
@@ -4000,4 +4329,94 @@ struct iwl_rx_packet {
4000 4329
4001int iwl_agn_check_rxon_cmd(struct iwl_priv *priv); 4330int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
4002 4331
4332/*
4333 * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
4334 */
4335
4336/**
4337 * struct iwl_wipan_slot
4338 * @width: Time in TU
4339 * @type:
4340 * 0 - BSS
4341 * 1 - PAN
4342 */
4343struct iwl_wipan_slot {
4344 __le16 width;
4345 u8 type;
4346 u8 reserved;
4347} __packed;
4348
4349#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_CTS BIT(1) /* reserved */
4350#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_QUIET BIT(2) /* reserved */
4351#define IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE BIT(3) /* reserved */
4352#define IWL_WIPAN_PARAMS_FLG_FILTER_BEACON_NOTIF BIT(4)
4353#define IWL_WIPAN_PARAMS_FLG_FULL_SLOTTED_MODE BIT(5)
4354
4355/**
4356 * struct iwl_wipan_params_cmd
4357 * @flags:
4358 * bit0: reserved
4359 * bit1: CP leave channel with CTS
4360 * bit2: CP leave channel qith Quiet
4361 * bit3: slotted mode
4362 * 1 - work in slotted mode
4363 * 0 - work in non slotted mode
4364 * bit4: filter beacon notification
4365 * bit5: full tx slotted mode. if this flag is set,
4366 * uCode will perform leaving channel methods in context switch
4367 * also when working in same channel mode
4368 * @num_slots: 1 - 10
4369 */
4370struct iwl_wipan_params_cmd {
4371 __le16 flags;
4372 u8 reserved;
4373 u8 num_slots;
4374 struct iwl_wipan_slot slots[10];
4375} __packed;
4376
4377/*
4378 * REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9
4379 *
4380 * TODO: Figure out what this is used for,
4381 * it can only switch between 2.4 GHz
4382 * channels!!
4383 */
4384
4385struct iwl_wipan_p2p_channel_switch_cmd {
4386 __le16 channel;
4387 __le16 reserved;
4388};
4389
4390/*
4391 * REPLY_WIPAN_NOA_NOTIFICATION = 0xbc
4392 *
4393 * This is used by the device to notify us of the
4394 * NoA schedule it determined so we can forward it
4395 * to userspace for inclusion in probe responses.
4396 *
4397 * In beacons, the NoA schedule is simply appended
4398 * to the frame we give the device.
4399 */
4400
4401struct iwl_wipan_noa_descriptor {
4402 u8 count;
4403 __le32 duration;
4404 __le32 interval;
4405 __le32 starttime;
4406} __packed;
4407
4408struct iwl_wipan_noa_attribute {
4409 u8 id;
4410 __le16 length;
4411 u8 index;
4412 u8 ct_window;
4413 struct iwl_wipan_noa_descriptor descr0, descr1;
4414 u8 reserved;
4415} __packed;
4416
4417struct iwl_wipan_noa_notification {
4418 u32 noa_active;
4419 struct iwl_wipan_noa_attribute noa_attribute;
4420} __packed;
4421
4003#endif /* __iwl_commands_h__ */ 4422#endif /* __iwl_commands_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index e23c4060a0f0..5c568933ce48 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -64,7 +64,8 @@ MODULE_LICENSE("GPL");
64 * 64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE) 65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */ 66 */
67static bool bt_coex_active = true; 67bool bt_coex_active = true;
68EXPORT_SYMBOL_GPL(bt_coex_active);
68module_param(bt_coex_active, bool, S_IRUGO); 69module_param(bt_coex_active, bool, S_IRUGO);
69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); 70MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
70 71
@@ -146,6 +147,10 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
146 int i; 147 int i;
147 u8 ind = ant; 148 u8 ind = ant;
148 149
150 if (priv->band == IEEE80211_BAND_2GHZ &&
151 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
152 return 0;
153
149 for (i = 0; i < RATE_ANT_NUM - 1; i++) { 154 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
150 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; 155 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
151 if (valid & BIT(ind)) 156 if (valid & BIT(ind))
@@ -183,38 +188,33 @@ out:
183} 188}
184EXPORT_SYMBOL(iwl_alloc_all); 189EXPORT_SYMBOL(iwl_alloc_all);
185 190
186void iwl_hw_detect(struct iwl_priv *priv)
187{
188 priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
189 priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
190 pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
191}
192EXPORT_SYMBOL(iwl_hw_detect);
193
194/* 191/*
195 * QoS support 192 * QoS support
196*/ 193*/
197static void iwl_update_qos(struct iwl_priv *priv) 194static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
198{ 195{
199 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 196 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
200 return; 197 return;
201 198
202 priv->qos_data.def_qos_parm.qos_flags = 0; 199 if (!ctx->is_active)
200 return;
201
202 ctx->qos_data.def_qos_parm.qos_flags = 0;
203 203
204 if (priv->qos_data.qos_active) 204 if (ctx->qos_data.qos_active)
205 priv->qos_data.def_qos_parm.qos_flags |= 205 ctx->qos_data.def_qos_parm.qos_flags |=
206 QOS_PARAM_FLG_UPDATE_EDCA_MSK; 206 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
207 207
208 if (priv->current_ht_config.is_ht) 208 if (ctx->ht.enabled)
209 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; 209 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
210 210
211 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", 211 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
212 priv->qos_data.qos_active, 212 ctx->qos_data.qos_active,
213 priv->qos_data.def_qos_parm.qos_flags); 213 ctx->qos_data.def_qos_parm.qos_flags);
214 214
215 iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM, 215 iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
216 sizeof(struct iwl_qosparam_cmd), 216 sizeof(struct iwl_qosparam_cmd),
217 &priv->qos_data.def_qos_parm, NULL); 217 &ctx->qos_data.def_qos_parm, NULL);
218} 218}
219 219
220#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 220#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
@@ -247,7 +247,11 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
247 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 247 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
248 248
249 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; 249 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
250 if (priv->cfg->ampdu_factor)
251 ht_info->ampdu_factor = priv->cfg->ampdu_factor;
250 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; 252 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
253 if (priv->cfg->ampdu_density)
254 ht_info->ampdu_density = priv->cfg->ampdu_density;
251 255
252 ht_info->mcs.rx_mask[0] = 0xFF; 256 ht_info->mcs.rx_mask[0] = 0xFF;
253 if (rx_chains_num >= 2) 257 if (rx_chains_num >= 2)
@@ -440,15 +444,15 @@ static bool is_single_rx_stream(struct iwl_priv *priv)
440 priv->current_ht_config.single_chain_sufficient; 444 priv->current_ht_config.single_chain_sufficient;
441} 445}
442 446
443static u8 iwl_is_channel_extension(struct iwl_priv *priv, 447static bool iwl_is_channel_extension(struct iwl_priv *priv,
444 enum ieee80211_band band, 448 enum ieee80211_band band,
445 u16 channel, u8 extension_chan_offset) 449 u16 channel, u8 extension_chan_offset)
446{ 450{
447 const struct iwl_channel_info *ch_info; 451 const struct iwl_channel_info *ch_info;
448 452
449 ch_info = iwl_get_channel_info(priv, band, channel); 453 ch_info = iwl_get_channel_info(priv, band, channel);
450 if (!is_channel_valid(ch_info)) 454 if (!is_channel_valid(ch_info))
451 return 0; 455 return false;
452 456
453 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) 457 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
454 return !(ch_info->ht40_extension_channel & 458 return !(ch_info->ht40_extension_channel &
@@ -457,38 +461,59 @@ static u8 iwl_is_channel_extension(struct iwl_priv *priv,
457 return !(ch_info->ht40_extension_channel & 461 return !(ch_info->ht40_extension_channel &
458 IEEE80211_CHAN_NO_HT40MINUS); 462 IEEE80211_CHAN_NO_HT40MINUS);
459 463
460 return 0; 464 return false;
461} 465}
462 466
463u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, 467bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
464 struct ieee80211_sta_ht_cap *sta_ht_inf) 468 struct iwl_rxon_context *ctx,
469 struct ieee80211_sta_ht_cap *ht_cap)
465{ 470{
466 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 471 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
467 472 return false;
468 if (!ht_conf->is_ht || !ht_conf->is_40mhz)
469 return 0;
470 473
471 /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 474 /*
475 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
472 * the bit will not set if it is pure 40MHz case 476 * the bit will not set if it is pure 40MHz case
473 */ 477 */
474 if (sta_ht_inf) { 478 if (ht_cap && !ht_cap->ht_supported)
475 if (!sta_ht_inf->ht_supported) 479 return false;
476 return 0; 480
477 }
478#ifdef CONFIG_IWLWIFI_DEBUGFS 481#ifdef CONFIG_IWLWIFI_DEBUGFS
479 if (priv->disable_ht40) 482 if (priv->disable_ht40)
480 return 0; 483 return false;
481#endif 484#endif
485
482 return iwl_is_channel_extension(priv, priv->band, 486 return iwl_is_channel_extension(priv, priv->band,
483 le16_to_cpu(priv->staging_rxon.channel), 487 le16_to_cpu(ctx->staging.channel),
484 ht_conf->extension_chan_offset); 488 ctx->ht.extension_chan_offset);
485} 489}
486EXPORT_SYMBOL(iwl_is_ht40_tx_allowed); 490EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
487 491
488static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) 492static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
489{ 493{
490 u16 new_val = 0; 494 u16 new_val;
491 u16 beacon_factor = 0; 495 u16 beacon_factor;
496
497 /*
498 * If mac80211 hasn't given us a beacon interval, program
499 * the default into the device (not checking this here
500 * would cause the adjustment below to return the maximum
501 * value, which may break PAN.)
502 */
503 if (!beacon_val)
504 return DEFAULT_BEACON_INTERVAL;
505
506 /*
507 * If the beacon interval we obtained from the peer
508 * is too large, we'll have to wake up more often
509 * (and in IBSS case, we'll beacon too much)
510 *
511 * For example, if max_beacon_val is 4096, and the
512 * requested beacon interval is 7000, we'll have to
513 * use 3500 to be able to wake up on the beacons.
514 *
515 * This could badly influence beacon detection stats.
516 */
492 517
493 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; 518 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
494 new_val = beacon_val / beacon_factor; 519 new_val = beacon_val / beacon_factor;
@@ -499,51 +524,76 @@ static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
499 return new_val; 524 return new_val;
500} 525}
501 526
502void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif) 527int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
503{ 528{
504 u64 tsf; 529 u64 tsf;
505 s32 interval_tm, rem; 530 s32 interval_tm, rem;
506 unsigned long flags;
507 struct ieee80211_conf *conf = NULL; 531 struct ieee80211_conf *conf = NULL;
508 u16 beacon_int; 532 u16 beacon_int;
533 struct ieee80211_vif *vif = ctx->vif;
509 534
510 conf = ieee80211_get_hw_conf(priv->hw); 535 conf = ieee80211_get_hw_conf(priv->hw);
511 536
512 spin_lock_irqsave(&priv->lock, flags); 537 lockdep_assert_held(&priv->mutex);
513 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
514 priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
515 538
516 beacon_int = vif->bss_conf.beacon_int; 539 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
517 540
518 if (vif->type == NL80211_IFTYPE_ADHOC) { 541 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
519 /* TODO: we need to get atim_window from upper stack 542 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
520 * for now we set to 0 */
521 priv->rxon_timing.atim_window = 0;
522 } else {
523 priv->rxon_timing.atim_window = 0;
524 }
525 543
526 beacon_int = iwl_adjust_beacon_interval(beacon_int, 544 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
545
546 /*
547 * TODO: For IBSS we need to get atim_window from mac80211,
548 * for now just always use 0
549 */
550 ctx->timing.atim_window = 0;
551
552 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
553 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
554 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
555 priv->contexts[IWL_RXON_CTX_BSS].vif &&
556 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
557 ctx->timing.beacon_interval =
558 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
559 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
560 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
561 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
562 priv->contexts[IWL_RXON_CTX_PAN].vif &&
563 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
564 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
565 !ctx->vif->bss_conf.beacon_int)) {
566 ctx->timing.beacon_interval =
567 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
568 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
569 } else {
570 beacon_int = iwl_adjust_beacon_interval(beacon_int,
527 priv->hw_params.max_beacon_itrvl * TIME_UNIT); 571 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
528 priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int); 572 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
573 }
529 574
530 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ 575 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
531 interval_tm = beacon_int * TIME_UNIT; 576 interval_tm = beacon_int * TIME_UNIT;
532 rem = do_div(tsf, interval_tm); 577 rem = do_div(tsf, interval_tm);
533 priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem); 578 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
579
580 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
534 581
535 spin_unlock_irqrestore(&priv->lock, flags);
536 IWL_DEBUG_ASSOC(priv, 582 IWL_DEBUG_ASSOC(priv,
537 "beacon interval %d beacon timer %d beacon tim %d\n", 583 "beacon interval %d beacon timer %d beacon tim %d\n",
538 le16_to_cpu(priv->rxon_timing.beacon_interval), 584 le16_to_cpu(ctx->timing.beacon_interval),
539 le32_to_cpu(priv->rxon_timing.beacon_init_val), 585 le32_to_cpu(ctx->timing.beacon_init_val),
540 le16_to_cpu(priv->rxon_timing.atim_window)); 586 le16_to_cpu(ctx->timing.atim_window));
587
588 return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
589 sizeof(ctx->timing), &ctx->timing);
541} 590}
542EXPORT_SYMBOL(iwl_setup_rxon_timing); 591EXPORT_SYMBOL(iwl_send_rxon_timing);
543 592
544void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) 593void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
594 int hw_decrypt)
545{ 595{
546 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 596 struct iwl_rxon_cmd *rxon = &ctx->staging;
547 597
548 if (hw_decrypt) 598 if (hw_decrypt)
549 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; 599 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
@@ -560,11 +610,11 @@ EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
560 * be #ifdef'd out once the driver is stable and folks aren't actively 610 * be #ifdef'd out once the driver is stable and folks aren't actively
561 * making changes 611 * making changes
562 */ 612 */
563int iwl_check_rxon_cmd(struct iwl_priv *priv) 613int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
564{ 614{
565 int error = 0; 615 int error = 0;
566 int counter = 1; 616 int counter = 1;
567 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 617 struct iwl_rxon_cmd *rxon = &ctx->staging;
568 618
569 if (rxon->flags & RXON_FLG_BAND_24G_MSK) { 619 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
570 error |= le32_to_cpu(rxon->flags & 620 error |= le32_to_cpu(rxon->flags &
@@ -636,66 +686,83 @@ EXPORT_SYMBOL(iwl_check_rxon_cmd);
636 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 686 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
637 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 687 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
638 */ 688 */
639int iwl_full_rxon_required(struct iwl_priv *priv) 689int iwl_full_rxon_required(struct iwl_priv *priv,
690 struct iwl_rxon_context *ctx)
640{ 691{
692 const struct iwl_rxon_cmd *staging = &ctx->staging;
693 const struct iwl_rxon_cmd *active = &ctx->active;
694
695#define CHK(cond) \
696 if ((cond)) { \
697 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
698 return 1; \
699 }
700
701#define CHK_NEQ(c1, c2) \
702 if ((c1) != (c2)) { \
703 IWL_DEBUG_INFO(priv, "need full RXON - " \
704 #c1 " != " #c2 " - %d != %d\n", \
705 (c1), (c2)); \
706 return 1; \
707 }
641 708
642 /* These items are only settable from the full RXON command */ 709 /* These items are only settable from the full RXON command */
643 if (!(iwl_is_associated(priv)) || 710 CHK(!iwl_is_associated_ctx(ctx));
644 compare_ether_addr(priv->staging_rxon.bssid_addr, 711 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
645 priv->active_rxon.bssid_addr) || 712 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
646 compare_ether_addr(priv->staging_rxon.node_addr, 713 CHK(compare_ether_addr(staging->wlap_bssid_addr,
647 priv->active_rxon.node_addr) || 714 active->wlap_bssid_addr));
648 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr, 715 CHK_NEQ(staging->dev_type, active->dev_type);
649 priv->active_rxon.wlap_bssid_addr) || 716 CHK_NEQ(staging->channel, active->channel);
650 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) || 717 CHK_NEQ(staging->air_propagation, active->air_propagation);
651 (priv->staging_rxon.channel != priv->active_rxon.channel) || 718 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
652 (priv->staging_rxon.air_propagation != 719 active->ofdm_ht_single_stream_basic_rates);
653 priv->active_rxon.air_propagation) || 720 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
654 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates != 721 active->ofdm_ht_dual_stream_basic_rates);
655 priv->active_rxon.ofdm_ht_single_stream_basic_rates) || 722 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
656 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates != 723 active->ofdm_ht_triple_stream_basic_rates);
657 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) || 724 CHK_NEQ(staging->assoc_id, active->assoc_id);
658 (priv->staging_rxon.ofdm_ht_triple_stream_basic_rates !=
659 priv->active_rxon.ofdm_ht_triple_stream_basic_rates) ||
660 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
661 return 1;
662 725
663 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can 726 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
664 * be updated with the RXON_ASSOC command -- however only some 727 * be updated with the RXON_ASSOC command -- however only some
665 * flag transitions are allowed using RXON_ASSOC */ 728 * flag transitions are allowed using RXON_ASSOC */
666 729
667 /* Check if we are not switching bands */ 730 /* Check if we are not switching bands */
668 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) != 731 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
669 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)) 732 active->flags & RXON_FLG_BAND_24G_MSK);
670 return 1;
671 733
672 /* Check if we are switching association toggle */ 734 /* Check if we are switching association toggle */
673 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) != 735 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
674 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) 736 active->filter_flags & RXON_FILTER_ASSOC_MSK);
675 return 1; 737
738#undef CHK
739#undef CHK_NEQ
676 740
677 return 0; 741 return 0;
678} 742}
679EXPORT_SYMBOL(iwl_full_rxon_required); 743EXPORT_SYMBOL(iwl_full_rxon_required);
680 744
681u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv) 745u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
746 struct iwl_rxon_context *ctx)
682{ 747{
683 /* 748 /*
684 * Assign the lowest rate -- should really get this from 749 * Assign the lowest rate -- should really get this from
685 * the beacon skb from mac80211. 750 * the beacon skb from mac80211.
686 */ 751 */
687 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) 752 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
688 return IWL_RATE_1M_PLCP; 753 return IWL_RATE_1M_PLCP;
689 else 754 else
690 return IWL_RATE_6M_PLCP; 755 return IWL_RATE_6M_PLCP;
691} 756}
692EXPORT_SYMBOL(iwl_rate_get_lowest_plcp); 757EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
693 758
694void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) 759static void _iwl_set_rxon_ht(struct iwl_priv *priv,
760 struct iwl_ht_config *ht_conf,
761 struct iwl_rxon_context *ctx)
695{ 762{
696 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 763 struct iwl_rxon_cmd *rxon = &ctx->staging;
697 764
698 if (!ht_conf->is_ht) { 765 if (!ctx->ht.enabled) {
699 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | 766 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
700 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | 767 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
701 RXON_FLG_HT40_PROT_MSK | 768 RXON_FLG_HT40_PROT_MSK |
@@ -703,22 +770,22 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
703 return; 770 return;
704 } 771 }
705 772
706 /* FIXME: if the definition of ht_protection changed, the "translation" 773 /* FIXME: if the definition of ht.protection changed, the "translation"
707 * will be needed for rxon->flags 774 * will be needed for rxon->flags
708 */ 775 */
709 rxon->flags |= cpu_to_le32(ht_conf->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS); 776 rxon->flags |= cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
710 777
711 /* Set up channel bandwidth: 778 /* Set up channel bandwidth:
712 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ 779 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
713 /* clear the HT channel mode before set the mode */ 780 /* clear the HT channel mode before set the mode */
714 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | 781 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
715 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 782 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
716 if (iwl_is_ht40_tx_allowed(priv, NULL)) { 783 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
717 /* pure ht40 */ 784 /* pure ht40 */
718 if (ht_conf->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { 785 if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
719 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; 786 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
720 /* Note: control channel is opposite of extension channel */ 787 /* Note: control channel is opposite of extension channel */
721 switch (ht_conf->extension_chan_offset) { 788 switch (ctx->ht.extension_chan_offset) {
722 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 789 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
723 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 790 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
724 break; 791 break;
@@ -728,7 +795,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
728 } 795 }
729 } else { 796 } else {
730 /* Note: control channel is opposite of extension channel */ 797 /* Note: control channel is opposite of extension channel */
731 switch (ht_conf->extension_chan_offset) { 798 switch (ctx->ht.extension_chan_offset) {
732 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 799 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
733 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 800 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
734 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 801 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
@@ -749,12 +816,20 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
749 } 816 }
750 817
751 if (priv->cfg->ops->hcmd->set_rxon_chain) 818 if (priv->cfg->ops->hcmd->set_rxon_chain)
752 priv->cfg->ops->hcmd->set_rxon_chain(priv); 819 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
753 820
754 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " 821 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
755 "extension channel offset 0x%x\n", 822 "extension channel offset 0x%x\n",
756 le32_to_cpu(rxon->flags), ht_conf->ht_protection, 823 le32_to_cpu(rxon->flags), ctx->ht.protection,
757 ht_conf->extension_chan_offset); 824 ctx->ht.extension_chan_offset);
825}
826
827void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
828{
829 struct iwl_rxon_context *ctx;
830
831 for_each_context(priv, ctx)
832 _iwl_set_rxon_ht(priv, ht_conf, ctx);
758} 833}
759EXPORT_SYMBOL(iwl_set_rxon_ht); 834EXPORT_SYMBOL(iwl_set_rxon_ht);
760 835
@@ -775,6 +850,14 @@ EXPORT_SYMBOL(iwl_set_rxon_ht);
775 */ 850 */
776static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) 851static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
777{ 852{
853 if (priv->cfg->advanced_bt_coexist && (priv->bt_full_concurrent ||
854 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
855 /*
856 * only use chain 'A' in bt high traffic load or
857 * full concurrency mode
858 */
859 return IWL_NUM_RX_CHAINS_SINGLE;
860 }
778 /* # of Rx chains to use when expecting MIMO. */ 861 /* # of Rx chains to use when expecting MIMO. */
779 if (is_single_rx_stream(priv)) 862 if (is_single_rx_stream(priv))
780 return IWL_NUM_RX_CHAINS_SINGLE; 863 return IWL_NUM_RX_CHAINS_SINGLE;
@@ -819,7 +902,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
819 * Selects how many and which Rx receivers/antennas/chains to use. 902 * Selects how many and which Rx receivers/antennas/chains to use.
820 * This should not be used for scan command ... it puts data in wrong place. 903 * This should not be used for scan command ... it puts data in wrong place.
821 */ 904 */
822void iwl_set_rxon_chain(struct iwl_priv *priv) 905void iwl_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
823{ 906{
824 bool is_single = is_single_rx_stream(priv); 907 bool is_single = is_single_rx_stream(priv);
825 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); 908 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
@@ -831,11 +914,20 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
831 * Before first association, we assume all antennas are connected. 914 * Before first association, we assume all antennas are connected.
832 * Just after first association, iwl_chain_noise_calibration() 915 * Just after first association, iwl_chain_noise_calibration()
833 * checks which antennas actually *are* connected. */ 916 * checks which antennas actually *are* connected. */
834 if (priv->chain_noise_data.active_chains) 917 if (priv->chain_noise_data.active_chains)
835 active_chains = priv->chain_noise_data.active_chains; 918 active_chains = priv->chain_noise_data.active_chains;
836 else 919 else
837 active_chains = priv->hw_params.valid_rx_ant; 920 active_chains = priv->hw_params.valid_rx_ant;
838 921
922 if (priv->cfg->advanced_bt_coexist && (priv->bt_full_concurrent ||
923 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
924 /*
925 * only use chain 'A' in bt high traffic load or
926 * full concurrency mode
927 */
928 active_chains = first_antenna(active_chains);
929 }
930
839 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; 931 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
840 932
841 /* How many receivers should we use? */ 933 /* How many receivers should we use? */
@@ -856,15 +948,15 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
856 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; 948 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
857 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; 949 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
858 950
859 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain); 951 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
860 952
861 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) 953 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
862 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; 954 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
863 else 955 else
864 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; 956 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
865 957
866 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n", 958 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
867 priv->staging_rxon.rx_chain, 959 ctx->staging.rx_chain,
868 active_rx_cnt, idle_rx_cnt); 960 active_rx_cnt, idle_rx_cnt);
869 961
870 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || 962 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
@@ -872,39 +964,41 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
872} 964}
873EXPORT_SYMBOL(iwl_set_rxon_chain); 965EXPORT_SYMBOL(iwl_set_rxon_chain);
874 966
875/* Return valid channel */ 967/* Return valid, unused, channel for a passive scan to reset the RF */
876u8 iwl_get_single_channel_number(struct iwl_priv *priv, 968u8 iwl_get_single_channel_number(struct iwl_priv *priv,
877 enum ieee80211_band band) 969 enum ieee80211_band band)
878{ 970{
879 const struct iwl_channel_info *ch_info; 971 const struct iwl_channel_info *ch_info;
880 int i; 972 int i;
881 u8 channel = 0; 973 u8 channel = 0;
974 u8 min, max;
975 struct iwl_rxon_context *ctx;
882 976
883 /* only scan single channel, good enough to reset the RF */
884 /* pick the first valid not in-use channel */
885 if (band == IEEE80211_BAND_5GHZ) { 977 if (band == IEEE80211_BAND_5GHZ) {
886 for (i = 14; i < priv->channel_count; i++) { 978 min = 14;
887 if (priv->channel_info[i].channel != 979 max = priv->channel_count;
888 le16_to_cpu(priv->staging_rxon.channel)) {
889 channel = priv->channel_info[i].channel;
890 ch_info = iwl_get_channel_info(priv,
891 band, channel);
892 if (is_channel_valid(ch_info))
893 break;
894 }
895 }
896 } else { 980 } else {
897 for (i = 0; i < 14; i++) { 981 min = 0;
898 if (priv->channel_info[i].channel != 982 max = 14;
899 le16_to_cpu(priv->staging_rxon.channel)) { 983 }
900 channel = 984
901 priv->channel_info[i].channel; 985 for (i = min; i < max; i++) {
902 ch_info = iwl_get_channel_info(priv, 986 bool busy = false;
903 band, channel); 987
904 if (is_channel_valid(ch_info)) 988 for_each_context(priv, ctx) {
905 break; 989 busy = priv->channel_info[i].channel ==
906 } 990 le16_to_cpu(ctx->staging.channel);
991 if (busy)
992 break;
907 } 993 }
994
995 if (busy)
996 continue;
997
998 channel = priv->channel_info[i].channel;
999 ch_info = iwl_get_channel_info(priv, band, channel);
1000 if (is_channel_valid(ch_info))
1001 break;
908 } 1002 }
909 1003
910 return channel; 1004 return channel;
@@ -912,35 +1006,27 @@ u8 iwl_get_single_channel_number(struct iwl_priv *priv,
912EXPORT_SYMBOL(iwl_get_single_channel_number); 1006EXPORT_SYMBOL(iwl_get_single_channel_number);
913 1007
914/** 1008/**
915 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON 1009 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
916 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz 1010 * @ch: requested channel as a pointer to struct ieee80211_channel
917 * @channel: Any channel valid for the requested phymode
918 1011
919 * In addition to setting the staging RXON, priv->phymode is also set.
920 *
921 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 1012 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
922 * in the staging RXON flag structure based on the phymode 1013 * in the staging RXON flag structure based on the ch->band
923 */ 1014 */
924int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch) 1015int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
1016 struct iwl_rxon_context *ctx)
925{ 1017{
926 enum ieee80211_band band = ch->band; 1018 enum ieee80211_band band = ch->band;
927 u16 channel = ieee80211_frequency_to_channel(ch->center_freq); 1019 u16 channel = ch->hw_value;
928
929 if (!iwl_get_channel_info(priv, band, channel)) {
930 IWL_DEBUG_INFO(priv, "Could not set channel to %d [%d]\n",
931 channel, band);
932 return -EINVAL;
933 }
934 1020
935 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) && 1021 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
936 (priv->band == band)) 1022 (priv->band == band))
937 return 0; 1023 return 0;
938 1024
939 priv->staging_rxon.channel = cpu_to_le16(channel); 1025 ctx->staging.channel = cpu_to_le16(channel);
940 if (band == IEEE80211_BAND_5GHZ) 1026 if (band == IEEE80211_BAND_5GHZ)
941 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; 1027 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
942 else 1028 else
943 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; 1029 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
944 1030
945 priv->band = band; 1031 priv->band = band;
946 1032
@@ -951,24 +1037,25 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
951EXPORT_SYMBOL(iwl_set_rxon_channel); 1037EXPORT_SYMBOL(iwl_set_rxon_channel);
952 1038
953void iwl_set_flags_for_band(struct iwl_priv *priv, 1039void iwl_set_flags_for_band(struct iwl_priv *priv,
1040 struct iwl_rxon_context *ctx,
954 enum ieee80211_band band, 1041 enum ieee80211_band band,
955 struct ieee80211_vif *vif) 1042 struct ieee80211_vif *vif)
956{ 1043{
957 if (band == IEEE80211_BAND_5GHZ) { 1044 if (band == IEEE80211_BAND_5GHZ) {
958 priv->staging_rxon.flags &= 1045 ctx->staging.flags &=
959 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK 1046 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
960 | RXON_FLG_CCK_MSK); 1047 | RXON_FLG_CCK_MSK);
961 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 1048 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
962 } else { 1049 } else {
963 /* Copied from iwl_post_associate() */ 1050 /* Copied from iwl_post_associate() */
964 if (vif && vif->bss_conf.use_short_slot) 1051 if (vif && vif->bss_conf.use_short_slot)
965 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 1052 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
966 else 1053 else
967 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 1054 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
968 1055
969 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; 1056 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
970 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK; 1057 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
971 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK; 1058 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
972 } 1059 }
973} 1060}
974EXPORT_SYMBOL(iwl_set_flags_for_band); 1061EXPORT_SYMBOL(iwl_set_flags_for_band);
@@ -977,35 +1064,34 @@ EXPORT_SYMBOL(iwl_set_flags_for_band);
977 * initialize rxon structure with default values from eeprom 1064 * initialize rxon structure with default values from eeprom
978 */ 1065 */
979void iwl_connection_init_rx_config(struct iwl_priv *priv, 1066void iwl_connection_init_rx_config(struct iwl_priv *priv,
980 struct ieee80211_vif *vif) 1067 struct iwl_rxon_context *ctx)
981{ 1068{
982 const struct iwl_channel_info *ch_info; 1069 const struct iwl_channel_info *ch_info;
983 enum nl80211_iftype type = NL80211_IFTYPE_STATION;
984
985 if (vif)
986 type = vif->type;
987 1070
988 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); 1071 memset(&ctx->staging, 0, sizeof(ctx->staging));
989 1072
990 switch (type) { 1073 if (!ctx->vif) {
1074 ctx->staging.dev_type = ctx->unused_devtype;
1075 } else switch (ctx->vif->type) {
991 case NL80211_IFTYPE_AP: 1076 case NL80211_IFTYPE_AP:
992 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; 1077 ctx->staging.dev_type = ctx->ap_devtype;
993 break; 1078 break;
994 1079
995 case NL80211_IFTYPE_STATION: 1080 case NL80211_IFTYPE_STATION:
996 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; 1081 ctx->staging.dev_type = ctx->station_devtype;
997 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 1082 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
998 break; 1083 break;
999 1084
1000 case NL80211_IFTYPE_ADHOC: 1085 case NL80211_IFTYPE_ADHOC:
1001 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; 1086 ctx->staging.dev_type = ctx->ibss_devtype;
1002 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 1087 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
1003 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | 1088 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
1004 RXON_FILTER_ACCEPT_GRP_MSK; 1089 RXON_FILTER_ACCEPT_GRP_MSK;
1005 break; 1090 break;
1006 1091
1007 default: 1092 default:
1008 IWL_ERR(priv, "Unsupported interface type %d\n", type); 1093 IWL_ERR(priv, "Unsupported interface type %d\n",
1094 ctx->vif->type);
1009 break; 1095 break;
1010 } 1096 }
1011 1097
@@ -1013,37 +1099,36 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
1013 /* TODO: Figure out when short_preamble would be set and cache from 1099 /* TODO: Figure out when short_preamble would be set and cache from
1014 * that */ 1100 * that */
1015 if (!hw_to_local(priv->hw)->short_preamble) 1101 if (!hw_to_local(priv->hw)->short_preamble)
1016 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 1102 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1017 else 1103 else
1018 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 1104 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1019#endif 1105#endif
1020 1106
1021 ch_info = iwl_get_channel_info(priv, priv->band, 1107 ch_info = iwl_get_channel_info(priv, priv->band,
1022 le16_to_cpu(priv->active_rxon.channel)); 1108 le16_to_cpu(ctx->active.channel));
1023 1109
1024 if (!ch_info) 1110 if (!ch_info)
1025 ch_info = &priv->channel_info[0]; 1111 ch_info = &priv->channel_info[0];
1026 1112
1027 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); 1113 ctx->staging.channel = cpu_to_le16(ch_info->channel);
1028 priv->band = ch_info->band; 1114 priv->band = ch_info->band;
1029 1115
1030 iwl_set_flags_for_band(priv, priv->band, vif); 1116 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
1031 1117
1032 priv->staging_rxon.ofdm_basic_rates = 1118 ctx->staging.ofdm_basic_rates =
1033 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 1119 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1034 priv->staging_rxon.cck_basic_rates = 1120 ctx->staging.cck_basic_rates =
1035 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; 1121 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1036 1122
1037 /* clear both MIX and PURE40 mode flag */ 1123 /* clear both MIX and PURE40 mode flag */
1038 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | 1124 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
1039 RXON_FLG_CHANNEL_MODE_PURE_40); 1125 RXON_FLG_CHANNEL_MODE_PURE_40);
1126 if (ctx->vif)
1127 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
1040 1128
1041 if (vif) 1129 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
1042 memcpy(priv->staging_rxon.node_addr, vif->addr, ETH_ALEN); 1130 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
1043 1131 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
1044 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
1045 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
1046 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff;
1047} 1132}
1048EXPORT_SYMBOL(iwl_connection_init_rx_config); 1133EXPORT_SYMBOL(iwl_connection_init_rx_config);
1049 1134
@@ -1051,6 +1136,7 @@ void iwl_set_rate(struct iwl_priv *priv)
1051{ 1136{
1052 const struct ieee80211_supported_band *hw = NULL; 1137 const struct ieee80211_supported_band *hw = NULL;
1053 struct ieee80211_rate *rate; 1138 struct ieee80211_rate *rate;
1139 struct iwl_rxon_context *ctx;
1054 int i; 1140 int i;
1055 1141
1056 hw = iwl_get_hw_mode(priv, priv->band); 1142 hw = iwl_get_hw_mode(priv, priv->band);
@@ -1069,21 +1155,29 @@ void iwl_set_rate(struct iwl_priv *priv)
1069 1155
1070 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate); 1156 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
1071 1157
1072 priv->staging_rxon.cck_basic_rates = 1158 for_each_context(priv, ctx) {
1073 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; 1159 ctx->staging.cck_basic_rates =
1160 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1074 1161
1075 priv->staging_rxon.ofdm_basic_rates = 1162 ctx->staging.ofdm_basic_rates =
1076 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 1163 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1164 }
1077} 1165}
1078EXPORT_SYMBOL(iwl_set_rate); 1166EXPORT_SYMBOL(iwl_set_rate);
1079 1167
1080void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) 1168void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
1081{ 1169{
1170 /*
1171 * MULTI-FIXME
1172 * See iwl_mac_channel_switch.
1173 */
1174 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1175
1082 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1176 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1083 return; 1177 return;
1084 1178
1085 if (priv->switch_rxon.switch_in_progress) { 1179 if (priv->switch_rxon.switch_in_progress) {
1086 ieee80211_chswitch_done(priv->vif, is_success); 1180 ieee80211_chswitch_done(ctx->vif, is_success);
1087 mutex_lock(&priv->mutex); 1181 mutex_lock(&priv->mutex);
1088 priv->switch_rxon.switch_in_progress = false; 1182 priv->switch_rxon.switch_in_progress = false;
1089 mutex_unlock(&priv->mutex); 1183 mutex_unlock(&priv->mutex);
@@ -1094,14 +1188,19 @@ EXPORT_SYMBOL(iwl_chswitch_done);
1094void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1188void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1095{ 1189{
1096 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1190 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1097 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
1098 struct iwl_csa_notification *csa = &(pkt->u.csa_notif); 1191 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
1192 /*
1193 * MULTI-FIXME
1194 * See iwl_mac_channel_switch.
1195 */
1196 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1197 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
1099 1198
1100 if (priv->switch_rxon.switch_in_progress) { 1199 if (priv->switch_rxon.switch_in_progress) {
1101 if (!le32_to_cpu(csa->status) && 1200 if (!le32_to_cpu(csa->status) &&
1102 (csa->channel == priv->switch_rxon.channel)) { 1201 (csa->channel == priv->switch_rxon.channel)) {
1103 rxon->channel = csa->channel; 1202 rxon->channel = csa->channel;
1104 priv->staging_rxon.channel = csa->channel; 1203 ctx->staging.channel = csa->channel;
1105 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", 1204 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
1106 le16_to_cpu(csa->channel)); 1205 le16_to_cpu(csa->channel));
1107 iwl_chswitch_done(priv, true); 1206 iwl_chswitch_done(priv, true);
@@ -1115,9 +1214,10 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1115EXPORT_SYMBOL(iwl_rx_csa); 1214EXPORT_SYMBOL(iwl_rx_csa);
1116 1215
1117#ifdef CONFIG_IWLWIFI_DEBUG 1216#ifdef CONFIG_IWLWIFI_DEBUG
1118void iwl_print_rx_config_cmd(struct iwl_priv *priv) 1217void iwl_print_rx_config_cmd(struct iwl_priv *priv,
1218 struct iwl_rxon_context *ctx)
1119{ 1219{
1120 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 1220 struct iwl_rxon_cmd *rxon = &ctx->staging;
1121 1221
1122 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); 1222 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
1123 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 1223 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
@@ -1157,7 +1257,8 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1157 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false); 1257 priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
1158#ifdef CONFIG_IWLWIFI_DEBUG 1258#ifdef CONFIG_IWLWIFI_DEBUG
1159 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) 1259 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
1160 iwl_print_rx_config_cmd(priv); 1260 iwl_print_rx_config_cmd(priv,
1261 &priv->contexts[IWL_RXON_CTX_BSS]);
1161#endif 1262#endif
1162 1263
1163 wake_up_interruptible(&priv->wait_command_queue); 1264 wake_up_interruptible(&priv->wait_command_queue);
@@ -1328,25 +1429,6 @@ out:
1328EXPORT_SYMBOL(iwl_apm_init); 1429EXPORT_SYMBOL(iwl_apm_init);
1329 1430
1330 1431
1331int iwl_set_hw_params(struct iwl_priv *priv)
1332{
1333 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1334 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
1335 if (priv->cfg->mod_params->amsdu_size_8K)
1336 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
1337 else
1338 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
1339
1340 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
1341
1342 if (priv->cfg->mod_params->disable_11n)
1343 priv->cfg->sku &= ~IWL_SKU_N;
1344
1345 /* Device-specific setup */
1346 return priv->cfg->ops->lib->set_hw_params(priv);
1347}
1348EXPORT_SYMBOL(iwl_set_hw_params);
1349
1350int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) 1432int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1351{ 1433{
1352 int ret = 0; 1434 int ret = 0;
@@ -1496,76 +1578,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1496} 1578}
1497EXPORT_SYMBOL(iwl_send_statistics_request); 1579EXPORT_SYMBOL(iwl_send_statistics_request);
1498 1580
1499void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1500{
1501 struct iwl_ct_kill_config cmd;
1502 struct iwl_ct_kill_throttling_config adv_cmd;
1503 unsigned long flags;
1504 int ret = 0;
1505
1506 spin_lock_irqsave(&priv->lock, flags);
1507 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1508 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1509 spin_unlock_irqrestore(&priv->lock, flags);
1510 priv->thermal_throttle.ct_kill_toggle = false;
1511
1512 if (priv->cfg->support_ct_kill_exit) {
1513 adv_cmd.critical_temperature_enter =
1514 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1515 adv_cmd.critical_temperature_exit =
1516 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
1517
1518 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1519 sizeof(adv_cmd), &adv_cmd);
1520 if (ret)
1521 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1522 else
1523 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1524 "succeeded, "
1525 "critical temperature enter is %d,"
1526 "exit is %d\n",
1527 priv->hw_params.ct_kill_threshold,
1528 priv->hw_params.ct_kill_exit_threshold);
1529 } else {
1530 cmd.critical_temperature_R =
1531 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1532
1533 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1534 sizeof(cmd), &cmd);
1535 if (ret)
1536 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1537 else
1538 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1539 "succeeded, "
1540 "critical temperature is %d\n",
1541 priv->hw_params.ct_kill_threshold);
1542 }
1543}
1544EXPORT_SYMBOL(iwl_rf_kill_ct_config);
1545
1546
1547/*
1548 * CARD_STATE_CMD
1549 *
1550 * Use: Sets the device's internal card state to enable, disable, or halt
1551 *
1552 * When in the 'enable' state the card operates as normal.
1553 * When in the 'disable' state, the card enters into a low power mode.
1554 * When in the 'halt' state, the card is shut down and must be fully
1555 * restarted to come back on.
1556 */
1557int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1558{
1559 struct iwl_host_cmd cmd = {
1560 .id = REPLY_CARD_STATE_CMD,
1561 .len = sizeof(u32),
1562 .data = &flags,
1563 .flags = meta_flag,
1564 };
1565
1566 return iwl_send_cmd(priv, &cmd);
1567}
1568
1569void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, 1581void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1570 struct iwl_rx_mem_buffer *rxb) 1582 struct iwl_rx_mem_buffer *rxb)
1571{ 1583{
@@ -1614,6 +1626,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1614 const struct ieee80211_tx_queue_params *params) 1626 const struct ieee80211_tx_queue_params *params)
1615{ 1627{
1616 struct iwl_priv *priv = hw->priv; 1628 struct iwl_priv *priv = hw->priv;
1629 struct iwl_rxon_context *ctx;
1617 unsigned long flags; 1630 unsigned long flags;
1618 int q; 1631 int q;
1619 1632
@@ -1633,13 +1646,21 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1633 1646
1634 spin_lock_irqsave(&priv->lock, flags); 1647 spin_lock_irqsave(&priv->lock, flags);
1635 1648
1636 priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min); 1649 /*
1637 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); 1650 * MULTI-FIXME
1638 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; 1651 * This may need to be done per interface in nl80211/cfg80211/mac80211.
1639 priv->qos_data.def_qos_parm.ac[q].edca_txop = 1652 */
1640 cpu_to_le16((params->txop * 32)); 1653 for_each_context(priv, ctx) {
1654 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1655 cpu_to_le16(params->cw_min);
1656 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1657 cpu_to_le16(params->cw_max);
1658 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1659 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1660 cpu_to_le16((params->txop * 32));
1641 1661
1642 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; 1662 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1663 }
1643 1664
1644 spin_unlock_irqrestore(&priv->lock, flags); 1665 spin_unlock_irqrestore(&priv->lock, flags);
1645 1666
@@ -1648,21 +1669,30 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1648} 1669}
1649EXPORT_SYMBOL(iwl_mac_conf_tx); 1670EXPORT_SYMBOL(iwl_mac_conf_tx);
1650 1671
1672int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1673{
1674 struct iwl_priv *priv = hw->priv;
1675
1676 return priv->ibss_manager == IWL_IBSS_MANAGER;
1677}
1678EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
1679
1651static void iwl_ht_conf(struct iwl_priv *priv, 1680static void iwl_ht_conf(struct iwl_priv *priv,
1652 struct ieee80211_vif *vif) 1681 struct ieee80211_vif *vif)
1653{ 1682{
1654 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 1683 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1655 struct ieee80211_sta *sta; 1684 struct ieee80211_sta *sta;
1656 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 1685 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1686 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1657 1687
1658 IWL_DEBUG_MAC80211(priv, "enter:\n"); 1688 IWL_DEBUG_MAC80211(priv, "enter:\n");
1659 1689
1660 if (!ht_conf->is_ht) 1690 if (!ctx->ht.enabled)
1661 return; 1691 return;
1662 1692
1663 ht_conf->ht_protection = 1693 ctx->ht.protection =
1664 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; 1694 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
1665 ht_conf->non_GF_STA_present = 1695 ctx->ht.non_gf_sta_present =
1666 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 1696 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1667 1697
1668 ht_conf->single_chain_sufficient = false; 1698 ht_conf->single_chain_sufficient = false;
@@ -1706,18 +1736,20 @@ static void iwl_ht_conf(struct iwl_priv *priv,
1706 IWL_DEBUG_MAC80211(priv, "leave\n"); 1736 IWL_DEBUG_MAC80211(priv, "leave\n");
1707} 1737}
1708 1738
1709static inline void iwl_set_no_assoc(struct iwl_priv *priv) 1739static inline void iwl_set_no_assoc(struct iwl_priv *priv,
1740 struct ieee80211_vif *vif)
1710{ 1741{
1742 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1743
1711 iwl_led_disassociate(priv); 1744 iwl_led_disassociate(priv);
1712 /* 1745 /*
1713 * inform the ucode that there is no longer an 1746 * inform the ucode that there is no longer an
1714 * association and that no more packets should be 1747 * association and that no more packets should be
1715 * sent 1748 * sent
1716 */ 1749 */
1717 priv->staging_rxon.filter_flags &= 1750 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1718 ~RXON_FILTER_ASSOC_MSK; 1751 ctx->staging.assoc_id = 0;
1719 priv->staging_rxon.assoc_id = 0; 1752 iwlcore_commit_rxon(priv, ctx);
1720 iwlcore_commit_rxon(priv);
1721} 1753}
1722 1754
1723static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) 1755static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -1728,6 +1760,14 @@ static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1728 1760
1729 IWL_DEBUG_MAC80211(priv, "enter\n"); 1761 IWL_DEBUG_MAC80211(priv, "enter\n");
1730 1762
1763 lockdep_assert_held(&priv->mutex);
1764
1765 if (!priv->beacon_ctx) {
1766 IWL_ERR(priv, "update beacon but no beacon context!\n");
1767 dev_kfree_skb(skb);
1768 return -EINVAL;
1769 }
1770
1731 if (!iwl_is_ready_rf(priv)) { 1771 if (!iwl_is_ready_rf(priv)) {
1732 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); 1772 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1733 return -EIO; 1773 return -EIO;
@@ -1746,7 +1786,7 @@ static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
1746 IWL_DEBUG_MAC80211(priv, "leave\n"); 1786 IWL_DEBUG_MAC80211(priv, "leave\n");
1747 spin_unlock_irqrestore(&priv->lock, flags); 1787 spin_unlock_irqrestore(&priv->lock, flags);
1748 1788
1749 priv->cfg->ops->lib->post_associate(priv, priv->vif); 1789 priv->cfg->ops->lib->post_associate(priv, priv->beacon_ctx->vif);
1750 1790
1751 return 0; 1791 return 0;
1752} 1792}
@@ -1757,6 +1797,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1757 u32 changes) 1797 u32 changes)
1758{ 1798{
1759 struct iwl_priv *priv = hw->priv; 1799 struct iwl_priv *priv = hw->priv;
1800 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1760 int ret; 1801 int ret;
1761 1802
1762 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes); 1803 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
@@ -1770,19 +1811,30 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1770 unsigned long flags; 1811 unsigned long flags;
1771 1812
1772 spin_lock_irqsave(&priv->lock, flags); 1813 spin_lock_irqsave(&priv->lock, flags);
1773 priv->qos_data.qos_active = bss_conf->qos; 1814 ctx->qos_data.qos_active = bss_conf->qos;
1774 iwl_update_qos(priv); 1815 iwl_update_qos(priv, ctx);
1775 spin_unlock_irqrestore(&priv->lock, flags); 1816 spin_unlock_irqrestore(&priv->lock, flags);
1776 } 1817 }
1777 1818
1819 if (changes & BSS_CHANGED_BEACON_ENABLED) {
1820 /*
1821 * the add_interface code must make sure we only ever
1822 * have a single interface that could be beaconing at
1823 * any time.
1824 */
1825 if (vif->bss_conf.enable_beacon)
1826 priv->beacon_ctx = ctx;
1827 else
1828 priv->beacon_ctx = NULL;
1829 }
1830
1778 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) { 1831 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
1779 dev_kfree_skb(priv->ibss_beacon); 1832 dev_kfree_skb(priv->ibss_beacon);
1780 priv->ibss_beacon = ieee80211_beacon_get(hw, vif); 1833 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
1781 } 1834 }
1782 1835
1783 if (changes & BSS_CHANGED_BEACON_INT) { 1836 if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
1784 /* TODO: in AP mode, do something to make this take effect */ 1837 iwl_send_rxon_timing(priv, ctx);
1785 }
1786 1838
1787 if (changes & BSS_CHANGED_BSSID) { 1839 if (changes & BSS_CHANGED_BSSID) {
1788 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid); 1840 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
@@ -1801,13 +1853,13 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1801 1853
1802 /* mac80211 only sets assoc when in STATION mode */ 1854 /* mac80211 only sets assoc when in STATION mode */
1803 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) { 1855 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
1804 memcpy(priv->staging_rxon.bssid_addr, 1856 memcpy(ctx->staging.bssid_addr,
1805 bss_conf->bssid, ETH_ALEN); 1857 bss_conf->bssid, ETH_ALEN);
1806 1858
1807 /* currently needed in a few places */ 1859 /* currently needed in a few places */
1808 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); 1860 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
1809 } else { 1861 } else {
1810 priv->staging_rxon.filter_flags &= 1862 ctx->staging.filter_flags &=
1811 ~RXON_FILTER_ASSOC_MSK; 1863 ~RXON_FILTER_ASSOC_MSK;
1812 } 1864 }
1813 1865
@@ -1830,21 +1882,21 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1830 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n", 1882 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
1831 bss_conf->use_short_preamble); 1883 bss_conf->use_short_preamble);
1832 if (bss_conf->use_short_preamble) 1884 if (bss_conf->use_short_preamble)
1833 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 1885 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1834 else 1886 else
1835 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 1887 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1836 } 1888 }
1837 1889
1838 if (changes & BSS_CHANGED_ERP_CTS_PROT) { 1890 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
1839 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot); 1891 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
1840 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) 1892 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
1841 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK; 1893 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
1842 else 1894 else
1843 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 1895 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1844 if (bss_conf->use_cts_prot) 1896 if (bss_conf->use_cts_prot)
1845 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN; 1897 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1846 else 1898 else
1847 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN; 1899 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1848 } 1900 }
1849 1901
1850 if (changes & BSS_CHANGED_BASIC_RATES) { 1902 if (changes & BSS_CHANGED_BASIC_RATES) {
@@ -1854,12 +1906,12 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1854 * like this here: 1906 * like this here:
1855 * 1907 *
1856 if (A-band) 1908 if (A-band)
1857 priv->staging_rxon.ofdm_basic_rates = 1909 ctx->staging.ofdm_basic_rates =
1858 bss_conf->basic_rates; 1910 bss_conf->basic_rates;
1859 else 1911 else
1860 priv->staging_rxon.ofdm_basic_rates = 1912 ctx->staging.ofdm_basic_rates =
1861 bss_conf->basic_rates >> 4; 1913 bss_conf->basic_rates >> 4;
1862 priv->staging_rxon.cck_basic_rates = 1914 ctx->staging.cck_basic_rates =
1863 bss_conf->basic_rates & 0xF; 1915 bss_conf->basic_rates & 0xF;
1864 */ 1916 */
1865 } 1917 }
@@ -1868,7 +1920,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1868 iwl_ht_conf(priv, vif); 1920 iwl_ht_conf(priv, vif);
1869 1921
1870 if (priv->cfg->ops->hcmd->set_rxon_chain) 1922 if (priv->cfg->ops->hcmd->set_rxon_chain)
1871 priv->cfg->ops->hcmd->set_rxon_chain(priv); 1923 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1872 } 1924 }
1873 1925
1874 if (changes & BSS_CHANGED_ASSOC) { 1926 if (changes & BSS_CHANGED_ASSOC) {
@@ -1881,29 +1933,29 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1881 if (!iwl_is_rfkill(priv)) 1933 if (!iwl_is_rfkill(priv))
1882 priv->cfg->ops->lib->post_associate(priv, vif); 1934 priv->cfg->ops->lib->post_associate(priv, vif);
1883 } else 1935 } else
1884 iwl_set_no_assoc(priv); 1936 iwl_set_no_assoc(priv, vif);
1885 } 1937 }
1886 1938
1887 if (changes && iwl_is_associated(priv) && bss_conf->aid) { 1939 if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
1888 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n", 1940 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
1889 changes); 1941 changes);
1890 ret = iwl_send_rxon_assoc(priv); 1942 ret = iwl_send_rxon_assoc(priv, ctx);
1891 if (!ret) { 1943 if (!ret) {
1892 /* Sync active_rxon with latest change. */ 1944 /* Sync active_rxon with latest change. */
1893 memcpy((void *)&priv->active_rxon, 1945 memcpy((void *)&ctx->active,
1894 &priv->staging_rxon, 1946 &ctx->staging,
1895 sizeof(struct iwl_rxon_cmd)); 1947 sizeof(struct iwl_rxon_cmd));
1896 } 1948 }
1897 } 1949 }
1898 1950
1899 if (changes & BSS_CHANGED_BEACON_ENABLED) { 1951 if (changes & BSS_CHANGED_BEACON_ENABLED) {
1900 if (vif->bss_conf.enable_beacon) { 1952 if (vif->bss_conf.enable_beacon) {
1901 memcpy(priv->staging_rxon.bssid_addr, 1953 memcpy(ctx->staging.bssid_addr,
1902 bss_conf->bssid, ETH_ALEN); 1954 bss_conf->bssid, ETH_ALEN);
1903 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); 1955 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
1904 iwlcore_config_ap(priv, vif); 1956 iwlcore_config_ap(priv, vif);
1905 } else 1957 } else
1906 iwl_set_no_assoc(priv); 1958 iwl_set_no_assoc(priv, vif);
1907 } 1959 }
1908 1960
1909 if (changes & BSS_CHANGED_IBSS) { 1961 if (changes & BSS_CHANGED_IBSS) {
@@ -1915,6 +1967,12 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1915 bss_conf->bssid); 1967 bss_conf->bssid);
1916 } 1968 }
1917 1969
1970 if (changes & BSS_CHANGED_IDLE &&
1971 priv->cfg->ops->hcmd->set_pan_params) {
1972 if (priv->cfg->ops->hcmd->set_pan_params(priv))
1973 IWL_ERR(priv, "failed to update PAN params\n");
1974 }
1975
1918 mutex_unlock(&priv->mutex); 1976 mutex_unlock(&priv->mutex);
1919 1977
1920 IWL_DEBUG_MAC80211(priv, "leave\n"); 1978 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1923,17 +1981,21 @@ EXPORT_SYMBOL(iwl_bss_info_changed);
1923 1981
1924static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif) 1982static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
1925{ 1983{
1926 iwl_connection_init_rx_config(priv, vif); 1984 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1985
1986 iwl_connection_init_rx_config(priv, ctx);
1927 1987
1928 if (priv->cfg->ops->hcmd->set_rxon_chain) 1988 if (priv->cfg->ops->hcmd->set_rxon_chain)
1929 priv->cfg->ops->hcmd->set_rxon_chain(priv); 1989 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1930 1990
1931 return iwlcore_commit_rxon(priv); 1991 return iwlcore_commit_rxon(priv, ctx);
1932} 1992}
1933 1993
1934int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1994int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1935{ 1995{
1936 struct iwl_priv *priv = hw->priv; 1996 struct iwl_priv *priv = hw->priv;
1997 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1998 struct iwl_rxon_context *tmp, *ctx = NULL;
1937 int err = 0; 1999 int err = 0;
1938 2000
1939 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", 2001 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
@@ -1946,23 +2008,65 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1946 goto out; 2008 goto out;
1947 } 2009 }
1948 2010
1949 if (priv->vif) { 2011 for_each_context(priv, tmp) {
1950 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n"); 2012 u32 possible_modes =
2013 tmp->interface_modes | tmp->exclusive_interface_modes;
2014
2015 if (tmp->vif) {
2016 /* check if this busy context is exclusive */
2017 if (tmp->exclusive_interface_modes &
2018 BIT(tmp->vif->type)) {
2019 err = -EINVAL;
2020 goto out;
2021 }
2022 continue;
2023 }
2024
2025 if (!(possible_modes & BIT(vif->type)))
2026 continue;
2027
2028 /* have maybe usable context w/o interface */
2029 ctx = tmp;
2030 break;
2031 }
2032
2033 if (!ctx) {
1951 err = -EOPNOTSUPP; 2034 err = -EOPNOTSUPP;
1952 goto out; 2035 goto out;
1953 } 2036 }
1954 2037
1955 priv->vif = vif; 2038 vif_priv->ctx = ctx;
2039 ctx->vif = vif;
2040 /*
2041 * This variable will be correct only when there's just
2042 * a single context, but all code using it is for hardware
2043 * that supports only one context.
2044 */
1956 priv->iw_mode = vif->type; 2045 priv->iw_mode = vif->type;
1957 2046
2047 ctx->is_active = true;
2048
1958 err = iwl_set_mode(priv, vif); 2049 err = iwl_set_mode(priv, vif);
1959 if (err) 2050 if (err) {
2051 if (!ctx->always_active)
2052 ctx->is_active = false;
1960 goto out_err; 2053 goto out_err;
2054 }
2055
2056 if (priv->cfg->advanced_bt_coexist &&
2057 vif->type == NL80211_IFTYPE_ADHOC) {
2058 /*
2059 * pretend to have high BT traffic as long as we
2060 * are operating in IBSS mode, as this will cause
2061 * the rate scaling etc. to behave as intended.
2062 */
2063 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
2064 }
1961 2065
1962 goto out; 2066 goto out;
1963 2067
1964 out_err: 2068 out_err:
1965 priv->vif = NULL; 2069 ctx->vif = NULL;
1966 priv->iw_mode = NL80211_IFTYPE_STATION; 2070 priv->iw_mode = NL80211_IFTYPE_STATION;
1967 out: 2071 out:
1968 mutex_unlock(&priv->mutex); 2072 mutex_unlock(&priv->mutex);
@@ -1976,30 +2080,36 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1976 struct ieee80211_vif *vif) 2080 struct ieee80211_vif *vif)
1977{ 2081{
1978 struct iwl_priv *priv = hw->priv; 2082 struct iwl_priv *priv = hw->priv;
1979 bool scan_completed = false; 2083 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1980 2084
1981 IWL_DEBUG_MAC80211(priv, "enter\n"); 2085 IWL_DEBUG_MAC80211(priv, "enter\n");
1982 2086
1983 mutex_lock(&priv->mutex); 2087 mutex_lock(&priv->mutex);
1984 2088
1985 if (iwl_is_ready_rf(priv)) { 2089 WARN_ON(ctx->vif != vif);
1986 iwl_scan_cancel_timeout(priv, 100); 2090 ctx->vif = NULL;
1987 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2091
1988 iwlcore_commit_rxon(priv); 2092 if (priv->scan_vif == vif) {
1989 } 2093 iwl_scan_cancel_timeout(priv, 200);
1990 if (priv->vif == vif) { 2094 iwl_force_scan_end(priv);
1991 priv->vif = NULL;
1992 if (priv->scan_vif == vif) {
1993 scan_completed = true;
1994 priv->scan_vif = NULL;
1995 priv->scan_request = NULL;
1996 }
1997 memset(priv->bssid, 0, ETH_ALEN);
1998 } 2095 }
1999 mutex_unlock(&priv->mutex); 2096 iwl_set_mode(priv, vif);
2097
2098 if (!ctx->always_active)
2099 ctx->is_active = false;
2100
2101 /*
2102 * When removing the IBSS interface, overwrite the
2103 * BT traffic load with the stored one from the last
2104 * notification, if any. If this is a device that
2105 * doesn't implement this, this has no effect since
2106 * both values are the same and zero.
2107 */
2108 if (vif->type == NL80211_IFTYPE_ADHOC)
2109 priv->bt_traffic_load = priv->notif_bt_traffic_load;
2000 2110
2001 if (scan_completed) 2111 memset(priv->bssid, 0, ETH_ALEN);
2002 ieee80211_scan_completed(priv->hw, true); 2112 mutex_unlock(&priv->mutex);
2003 2113
2004 IWL_DEBUG_MAC80211(priv, "leave\n"); 2114 IWL_DEBUG_MAC80211(priv, "leave\n");
2005 2115
@@ -2014,7 +2124,9 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2014 struct iwl_priv *priv = hw->priv; 2124 struct iwl_priv *priv = hw->priv;
2015 const struct iwl_channel_info *ch_info; 2125 const struct iwl_channel_info *ch_info;
2016 struct ieee80211_conf *conf = &hw->conf; 2126 struct ieee80211_conf *conf = &hw->conf;
2127 struct ieee80211_channel *channel = conf->channel;
2017 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 2128 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2129 struct iwl_rxon_context *ctx;
2018 unsigned long flags = 0; 2130 unsigned long flags = 0;
2019 int ret = 0; 2131 int ret = 0;
2020 u16 ch; 2132 u16 ch;
@@ -2023,7 +2135,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2023 mutex_lock(&priv->mutex); 2135 mutex_lock(&priv->mutex);
2024 2136
2025 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n", 2137 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2026 conf->channel->hw_value, changed); 2138 channel->hw_value, changed);
2027 2139
2028 if (unlikely(!priv->cfg->mod_params->disable_hw_scan && 2140 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2029 test_bit(STATUS_SCANNING, &priv->status))) { 2141 test_bit(STATUS_SCANNING, &priv->status))) {
@@ -2044,7 +2156,8 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2044 * configured. 2156 * configured.
2045 */ 2157 */
2046 if (priv->cfg->ops->hcmd->set_rxon_chain) 2158 if (priv->cfg->ops->hcmd->set_rxon_chain)
2047 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2159 for_each_context(priv, ctx)
2160 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2048 } 2161 }
2049 2162
2050 /* during scanning mac80211 will delay channel setting until 2163 /* during scanning mac80211 will delay channel setting until
@@ -2054,8 +2167,8 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2054 if (scan_active) 2167 if (scan_active)
2055 goto set_ch_out; 2168 goto set_ch_out;
2056 2169
2057 ch = ieee80211_frequency_to_channel(conf->channel->center_freq); 2170 ch = channel->hw_value;
2058 ch_info = iwl_get_channel_info(priv, conf->channel->band, ch); 2171 ch_info = iwl_get_channel_info(priv, channel->band, ch);
2059 if (!is_channel_valid(ch_info)) { 2172 if (!is_channel_valid(ch_info)) {
2060 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n"); 2173 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2061 ret = -EINVAL; 2174 ret = -EINVAL;
@@ -2064,42 +2177,49 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2064 2177
2065 spin_lock_irqsave(&priv->lock, flags); 2178 spin_lock_irqsave(&priv->lock, flags);
2066 2179
2067 /* Configure HT40 channels */ 2180 for_each_context(priv, ctx) {
2068 ht_conf->is_ht = conf_is_ht(conf); 2181 /* Configure HT40 channels */
2069 if (ht_conf->is_ht) { 2182 ctx->ht.enabled = conf_is_ht(conf);
2070 if (conf_is_ht40_minus(conf)) { 2183 if (ctx->ht.enabled) {
2071 ht_conf->extension_chan_offset = 2184 if (conf_is_ht40_minus(conf)) {
2072 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 2185 ctx->ht.extension_chan_offset =
2073 ht_conf->is_40mhz = true; 2186 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2074 } else if (conf_is_ht40_plus(conf)) { 2187 ctx->ht.is_40mhz = true;
2075 ht_conf->extension_chan_offset = 2188 } else if (conf_is_ht40_plus(conf)) {
2076 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 2189 ctx->ht.extension_chan_offset =
2077 ht_conf->is_40mhz = true; 2190 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2078 } else { 2191 ctx->ht.is_40mhz = true;
2079 ht_conf->extension_chan_offset = 2192 } else {
2080 IEEE80211_HT_PARAM_CHA_SEC_NONE; 2193 ctx->ht.extension_chan_offset =
2081 ht_conf->is_40mhz = false; 2194 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2082 } 2195 ctx->ht.is_40mhz = false;
2083 } else 2196 }
2084 ht_conf->is_40mhz = false; 2197 } else
2085 /* Default to no protection. Protection mode will later be set 2198 ctx->ht.is_40mhz = false;
2086 * from BSS config in iwl_ht_conf */
2087 ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2088 2199
2089 /* if we are switching from ht to 2.4 clear flags 2200 /*
2090 * from any ht related info since 2.4 does not 2201 * Default to no protection. Protection mode will
2091 * support ht */ 2202 * later be set from BSS config in iwl_ht_conf
2092 if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) 2203 */
2093 priv->staging_rxon.flags = 0; 2204 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2205
2206 /* if we are switching from ht to 2.4 clear flags
2207 * from any ht related info since 2.4 does not
2208 * support ht */
2209 if ((le16_to_cpu(ctx->staging.channel) != ch))
2210 ctx->staging.flags = 0;
2094 2211
2095 iwl_set_rxon_channel(priv, conf->channel); 2212 iwl_set_rxon_channel(priv, channel, ctx);
2096 iwl_set_rxon_ht(priv, ht_conf); 2213 iwl_set_rxon_ht(priv, ht_conf);
2214
2215 iwl_set_flags_for_band(priv, ctx, channel->band,
2216 ctx->vif);
2217 }
2097 2218
2098 iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
2099 spin_unlock_irqrestore(&priv->lock, flags); 2219 spin_unlock_irqrestore(&priv->lock, flags);
2100 2220
2101 if (priv->cfg->ops->lib->update_bcast_station) 2221 if (priv->cfg->ops->lib->update_bcast_stations)
2102 ret = priv->cfg->ops->lib->update_bcast_station(priv); 2222 ret = priv->cfg->ops->lib->update_bcast_stations(priv);
2103 2223
2104 set_ch_out: 2224 set_ch_out:
2105 /* The list of supported rates and rate mask can be different 2225 /* The list of supported rates and rate mask can be different
@@ -2130,12 +2250,13 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2130 if (scan_active) 2250 if (scan_active)
2131 goto out; 2251 goto out;
2132 2252
2133 if (memcmp(&priv->active_rxon, 2253 for_each_context(priv, ctx) {
2134 &priv->staging_rxon, sizeof(priv->staging_rxon))) 2254 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2135 iwlcore_commit_rxon(priv); 2255 iwlcore_commit_rxon(priv, ctx);
2136 else 2256 else
2137 IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration.\n"); 2257 IWL_DEBUG_INFO(priv,
2138 2258 "Not re-sending same RXON configuration.\n");
2259 }
2139 2260
2140out: 2261out:
2141 IWL_DEBUG_MAC80211(priv, "leave\n"); 2262 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2148,6 +2269,8 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2148{ 2269{
2149 struct iwl_priv *priv = hw->priv; 2270 struct iwl_priv *priv = hw->priv;
2150 unsigned long flags; 2271 unsigned long flags;
2272 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2273 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2151 2274
2152 mutex_lock(&priv->mutex); 2275 mutex_lock(&priv->mutex);
2153 IWL_DEBUG_MAC80211(priv, "enter\n"); 2276 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -2168,6 +2291,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2168 2291
2169 spin_unlock_irqrestore(&priv->lock, flags); 2292 spin_unlock_irqrestore(&priv->lock, flags);
2170 2293
2294 iwl_scan_cancel_timeout(priv, 100);
2171 if (!iwl_is_ready_rf(priv)) { 2295 if (!iwl_is_ready_rf(priv)) {
2172 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 2296 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2173 mutex_unlock(&priv->mutex); 2297 mutex_unlock(&priv->mutex);
@@ -2177,9 +2301,8 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2177 /* we are restarting association process 2301 /* we are restarting association process
2178 * clear RXON_FILTER_ASSOC_MSK bit 2302 * clear RXON_FILTER_ASSOC_MSK bit
2179 */ 2303 */
2180 iwl_scan_cancel_timeout(priv, 100); 2304 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2181 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2305 iwlcore_commit_rxon(priv, ctx);
2182 iwlcore_commit_rxon(priv);
2183 2306
2184 iwl_set_rate(priv); 2307 iwl_set_rate(priv);
2185 2308
@@ -2588,7 +2711,7 @@ static void iwl_force_rf_reset(struct iwl_priv *priv)
2588 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2711 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2589 return; 2712 return;
2590 2713
2591 if (!iwl_is_associated(priv)) { 2714 if (!iwl_is_any_associated(priv)) {
2592 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n"); 2715 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
2593 return; 2716 return;
2594 } 2717 }
@@ -2719,10 +2842,14 @@ static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
2719 "queue %d, not read %d time\n", 2842 "queue %d, not read %d time\n",
2720 q->id, 2843 q->id,
2721 q->repeat_same_read_ptr); 2844 q->repeat_same_read_ptr);
2722 mod_timer(&priv->monitor_recover, jiffies + 2845 if (!priv->cfg->advanced_bt_coexist) {
2723 msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS)); 2846 mod_timer(&priv->monitor_recover,
2847 jiffies + msecs_to_jiffies(
2848 IWL_ONE_HUNDRED_MSECS));
2849 return 1;
2850 }
2724 } 2851 }
2725 return 1; 2852 return 0;
2726 } else { 2853 } else {
2727 q->last_read_ptr = q->read_ptr; 2854 q->last_read_ptr = q->read_ptr;
2728 q->repeat_same_read_ptr = 0; 2855 q->repeat_same_read_ptr = 0;
@@ -2740,25 +2867,27 @@ void iwl_bg_monitor_recover(unsigned long data)
2740 return; 2867 return;
2741 2868
2742 /* monitor and check for stuck cmd queue */ 2869 /* monitor and check for stuck cmd queue */
2743 if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM)) 2870 if (iwl_check_stuck_queue(priv, priv->cmd_queue))
2744 return; 2871 return;
2745 2872
2746 /* monitor and check for other stuck queues */ 2873 /* monitor and check for other stuck queues */
2747 if (iwl_is_associated(priv)) { 2874 if (iwl_is_any_associated(priv)) {
2748 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { 2875 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
2749 /* skip as we already checked the command queue */ 2876 /* skip as we already checked the command queue */
2750 if (cnt == IWL_CMD_QUEUE_NUM) 2877 if (cnt == priv->cmd_queue)
2751 continue; 2878 continue;
2752 if (iwl_check_stuck_queue(priv, cnt)) 2879 if (iwl_check_stuck_queue(priv, cnt))
2753 return; 2880 return;
2754 } 2881 }
2755 } 2882 }
2756 /* 2883 if (priv->cfg->monitor_recover_period) {
2757 * Reschedule the timer to occur in 2884 /*
2758 * priv->cfg->monitor_recover_period 2885 * Reschedule the timer to occur in
2759 */ 2886 * priv->cfg->monitor_recover_period
2760 mod_timer(&priv->monitor_recover, 2887 */
2761 jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period)); 2888 mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies(
2889 priv->cfg->monitor_recover_period));
2890 }
2762} 2891}
2763EXPORT_SYMBOL(iwl_bg_monitor_recover); 2892EXPORT_SYMBOL(iwl_bg_monitor_recover);
2764 2893
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 5e6ee3da6bbf..f0302bfe85f5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -88,11 +88,13 @@ struct iwl_cmd;
88#define IWL_CMD(x) case x: return #x 88#define IWL_CMD(x) case x: return #x
89 89
90struct iwl_hcmd_ops { 90struct iwl_hcmd_ops {
91 int (*rxon_assoc)(struct iwl_priv *priv); 91 int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
92 int (*commit_rxon)(struct iwl_priv *priv); 92 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
93 void (*set_rxon_chain)(struct iwl_priv *priv); 93 void (*set_rxon_chain)(struct iwl_priv *priv,
94 struct iwl_rxon_context *ctx);
94 int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant); 95 int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
95 void (*send_bt_config)(struct iwl_priv *priv); 96 void (*send_bt_config)(struct iwl_priv *priv);
97 int (*set_pan_params)(struct iwl_priv *priv);
96}; 98};
97 99
98struct iwl_hcmd_utils_ops { 100struct iwl_hcmd_utils_ops {
@@ -109,7 +111,7 @@ struct iwl_hcmd_utils_ops {
109 __le16 fc, __le32 *tx_flags); 111 __le16 fc, __le32 *tx_flags);
110 int (*calc_rssi)(struct iwl_priv *priv, 112 int (*calc_rssi)(struct iwl_priv *priv,
111 struct iwl_rx_phy_res *rx_resp); 113 struct iwl_rx_phy_res *rx_resp);
112 void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif); 114 int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
113}; 115};
114 116
115struct iwl_apm_ops { 117struct iwl_apm_ops {
@@ -128,6 +130,8 @@ struct iwl_debugfs_ops {
128 size_t count, loff_t *ppos); 130 size_t count, loff_t *ppos);
129 ssize_t (*bt_stats_read)(struct file *file, char __user *user_buf, 131 ssize_t (*bt_stats_read)(struct file *file, char __user *user_buf,
130 size_t count, loff_t *ppos); 132 size_t count, loff_t *ppos);
133 ssize_t (*reply_tx_error)(struct file *file, char __user *user_buf,
134 size_t count, loff_t *ppos);
131}; 135};
132 136
133struct iwl_temp_ops { 137struct iwl_temp_ops {
@@ -136,6 +140,12 @@ struct iwl_temp_ops {
136 void (*set_calib_version)(struct iwl_priv *priv); 140 void (*set_calib_version)(struct iwl_priv *priv);
137}; 141};
138 142
143struct iwl_tt_ops {
144 bool (*lower_power_detection)(struct iwl_priv *priv);
145 u8 (*tt_power_mode)(struct iwl_priv *priv);
146 bool (*ct_kill_check)(struct iwl_priv *priv);
147};
148
139struct iwl_lib_ops { 149struct iwl_lib_ops {
140 /* set hw dependent parameters */ 150 /* set hw dependent parameters */
141 int (*set_hw_params)(struct iwl_priv *priv); 151 int (*set_hw_params)(struct iwl_priv *priv);
@@ -199,7 +209,7 @@ struct iwl_lib_ops {
199 /* station management */ 209 /* station management */
200 int (*manage_ibss_station)(struct iwl_priv *priv, 210 int (*manage_ibss_station)(struct iwl_priv *priv,
201 struct ieee80211_vif *vif, bool add); 211 struct ieee80211_vif *vif, bool add);
202 int (*update_bcast_station)(struct iwl_priv *priv); 212 int (*update_bcast_stations)(struct iwl_priv *priv);
203 /* recover from tx queue stall */ 213 /* recover from tx queue stall */
204 void (*recover_from_tx_stall)(unsigned long data); 214 void (*recover_from_tx_stall)(unsigned long data);
205 /* check for plcp health */ 215 /* check for plcp health */
@@ -212,6 +222,9 @@ struct iwl_lib_ops {
212 void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control); 222 void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control);
213 223
214 struct iwl_debugfs_ops debugfs_ops; 224 struct iwl_debugfs_ops debugfs_ops;
225
226 /* thermal throttling */
227 struct iwl_tt_ops tt_ops;
215}; 228};
216 229
217struct iwl_led_ops { 230struct iwl_led_ops {
@@ -269,6 +282,14 @@ struct iwl_mod_params {
269 * @chain_noise_calib_by_driver: driver has the capability to perform 282 * @chain_noise_calib_by_driver: driver has the capability to perform
270 * chain noise calibration operation 283 * chain noise calibration operation
271 * @scan_antennas: available antenna for scan operation 284 * @scan_antennas: available antenna for scan operation
285 * @advanced_bt_coexist: support advanced bt coexist
286 * @bt_init_traffic_load: specify initial bt traffic load
287 * @bt_prio_boost: default bt priority boost value
288 * @need_dc_calib: need to perform init dc calibration
289 * @bt_statistics: use BT version of statistics notification
290 * @agg_time_limit: maximum number of uSec in aggregation
291 * @ampdu_factor: Maximum A-MPDU length factor
292 * @ampdu_density: Minimum A-MPDU spacing
272 * 293 *
273 * We enable the driver to be backward compatible wrt API version. The 294 * We enable the driver to be backward compatible wrt API version. The
274 * driver specifies which APIs it supports (with @ucode_api_max being the 295 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -337,8 +358,14 @@ struct iwl_cfg {
337 const bool chain_noise_calib_by_driver; 358 const bool chain_noise_calib_by_driver;
338 u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; 359 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
339 u8 scan_tx_antennas[IEEE80211_NUM_BANDS]; 360 u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
361 bool advanced_bt_coexist;
362 u8 bt_init_traffic_load;
363 u8 bt_prio_boost;
340 const bool need_dc_calib; 364 const bool need_dc_calib;
341 const bool bt_statistics; 365 const bool bt_statistics;
366 u16 agg_time_limit;
367 u8 ampdu_factor;
368 u8 ampdu_density;
342}; 369};
343 370
344/*************************** 371/***************************
@@ -347,38 +374,41 @@ struct iwl_cfg {
347 374
348struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 375struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
349 struct ieee80211_ops *hw_ops); 376 struct ieee80211_ops *hw_ops);
350void iwl_hw_detect(struct iwl_priv *priv);
351void iwl_activate_qos(struct iwl_priv *priv); 377void iwl_activate_qos(struct iwl_priv *priv);
352int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 378int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
353 const struct ieee80211_tx_queue_params *params); 379 const struct ieee80211_tx_queue_params *params);
354void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt); 380int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
355int iwl_check_rxon_cmd(struct iwl_priv *priv); 381void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
356int iwl_full_rxon_required(struct iwl_priv *priv); 382 int hw_decrypt);
357void iwl_set_rxon_chain(struct iwl_priv *priv); 383int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
358int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch); 384int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
385void iwl_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
386int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
387 struct iwl_rxon_context *ctx);
359void iwl_set_flags_for_band(struct iwl_priv *priv, 388void iwl_set_flags_for_band(struct iwl_priv *priv,
389 struct iwl_rxon_context *ctx,
360 enum ieee80211_band band, 390 enum ieee80211_band band,
361 struct ieee80211_vif *vif); 391 struct ieee80211_vif *vif);
362u8 iwl_get_single_channel_number(struct iwl_priv *priv, 392u8 iwl_get_single_channel_number(struct iwl_priv *priv,
363 enum ieee80211_band band); 393 enum ieee80211_band band);
364void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf); 394void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
365u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, 395bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
366 struct ieee80211_sta_ht_cap *sta_ht_inf); 396 struct iwl_rxon_context *ctx,
397 struct ieee80211_sta_ht_cap *ht_cap);
367void iwl_connection_init_rx_config(struct iwl_priv *priv, 398void iwl_connection_init_rx_config(struct iwl_priv *priv,
368 struct ieee80211_vif *vif); 399 struct iwl_rxon_context *ctx);
369void iwl_set_rate(struct iwl_priv *priv); 400void iwl_set_rate(struct iwl_priv *priv);
370int iwl_set_decrypted_flag(struct iwl_priv *priv, 401int iwl_set_decrypted_flag(struct iwl_priv *priv,
371 struct ieee80211_hdr *hdr, 402 struct ieee80211_hdr *hdr,
372 u32 decrypt_res, 403 u32 decrypt_res,
373 struct ieee80211_rx_status *stats); 404 struct ieee80211_rx_status *stats);
374void iwl_irq_handle_error(struct iwl_priv *priv); 405void iwl_irq_handle_error(struct iwl_priv *priv);
375int iwl_set_hw_params(struct iwl_priv *priv);
376void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif); 406void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
377void iwl_bss_info_changed(struct ieee80211_hw *hw, 407void iwl_bss_info_changed(struct ieee80211_hw *hw,
378 struct ieee80211_vif *vif, 408 struct ieee80211_vif *vif,
379 struct ieee80211_bss_conf *bss_conf, 409 struct ieee80211_bss_conf *bss_conf,
380 u32 changes); 410 u32 changes);
381int iwl_commit_rxon(struct iwl_priv *priv); 411int iwl_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
382int iwl_mac_add_interface(struct ieee80211_hw *hw, 412int iwl_mac_add_interface(struct ieee80211_hw *hw,
383 struct ieee80211_vif *vif); 413 struct ieee80211_vif *vif);
384void iwl_mac_remove_interface(struct ieee80211_hw *hw, 414void iwl_mac_remove_interface(struct ieee80211_hw *hw,
@@ -496,7 +526,8 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
496 526
497int iwl_hwrate_to_plcp_idx(u32 rate_n_flags); 527int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
498 528
499u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv); 529u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
530 struct iwl_rxon_context *ctx);
500 531
501u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); 532u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
502 533
@@ -524,10 +555,10 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
524void iwl_init_scan_params(struct iwl_priv *priv); 555void iwl_init_scan_params(struct iwl_priv *priv);
525int iwl_scan_cancel(struct iwl_priv *priv); 556int iwl_scan_cancel(struct iwl_priv *priv);
526int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); 557int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
558void iwl_force_scan_end(struct iwl_priv *priv);
527int iwl_mac_hw_scan(struct ieee80211_hw *hw, 559int iwl_mac_hw_scan(struct ieee80211_hw *hw,
528 struct ieee80211_vif *vif, 560 struct ieee80211_vif *vif,
529 struct cfg80211_scan_request *req); 561 struct cfg80211_scan_request *req);
530void iwl_bg_start_internal_scan(struct work_struct *work);
531void iwl_internal_short_hw_scan(struct iwl_priv *priv); 562void iwl_internal_short_hw_scan(struct iwl_priv *priv);
532int iwl_force_reset(struct iwl_priv *priv, int mode, bool external); 563int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
533u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, 564u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
@@ -539,10 +570,8 @@ u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
539u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, 570u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
540 enum ieee80211_band band, 571 enum ieee80211_band band,
541 struct ieee80211_vif *vif); 572 struct ieee80211_vif *vif);
542void iwl_bg_scan_check(struct work_struct *data);
543void iwl_bg_abort_scan(struct work_struct *work);
544void iwl_bg_scan_completed(struct work_struct *work);
545void iwl_setup_scan_deferred_work(struct iwl_priv *priv); 573void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
574void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
546 575
547/* For faster active scanning, scan will move to the next channel if fewer than 576/* For faster active scanning, scan will move to the next channel if fewer than
548 * PLCP_QUIET_THRESH packets are heard on this channel within 577 * PLCP_QUIET_THRESH packets are heard on this channel within
@@ -580,8 +609,6 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
580 609
581int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 610int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
582 611
583int iwl_send_card_state(struct iwl_priv *priv, u32 flags,
584 u8 meta_flag);
585 612
586/***************************************************** 613/*****************************************************
587 * PCI * 614 * PCI *
@@ -616,9 +643,11 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv,
616void iwl_dump_csr(struct iwl_priv *priv); 643void iwl_dump_csr(struct iwl_priv *priv);
617int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display); 644int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
618#ifdef CONFIG_IWLWIFI_DEBUG 645#ifdef CONFIG_IWLWIFI_DEBUG
619void iwl_print_rx_config_cmd(struct iwl_priv *priv); 646void iwl_print_rx_config_cmd(struct iwl_priv *priv,
647 struct iwl_rxon_context *ctx);
620#else 648#else
621static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv) 649static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
650 struct iwl_rxon_context *ctx)
622{ 651{
623} 652}
624#endif 653#endif
@@ -695,23 +724,24 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
695 return iwl_is_ready(priv); 724 return iwl_is_ready(priv);
696} 725}
697 726
698extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
699extern void iwl_send_bt_config(struct iwl_priv *priv); 727extern void iwl_send_bt_config(struct iwl_priv *priv);
700extern int iwl_send_statistics_request(struct iwl_priv *priv, 728extern int iwl_send_statistics_request(struct iwl_priv *priv,
701 u8 flags, bool clear); 729 u8 flags, bool clear);
702extern int iwl_send_lq_cmd(struct iwl_priv *priv, 730extern int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
703 struct iwl_link_quality_cmd *lq, u8 flags, bool init); 731 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
704void iwl_apm_stop(struct iwl_priv *priv); 732void iwl_apm_stop(struct iwl_priv *priv);
705int iwl_apm_init(struct iwl_priv *priv); 733int iwl_apm_init(struct iwl_priv *priv);
706 734
707void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif); 735int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
708static inline int iwl_send_rxon_assoc(struct iwl_priv *priv) 736static inline int iwl_send_rxon_assoc(struct iwl_priv *priv,
737 struct iwl_rxon_context *ctx)
709{ 738{
710 return priv->cfg->ops->hcmd->rxon_assoc(priv); 739 return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
711} 740}
712static inline int iwlcore_commit_rxon(struct iwl_priv *priv) 741static inline int iwlcore_commit_rxon(struct iwl_priv *priv,
742 struct iwl_rxon_context *ctx)
713{ 743{
714 return priv->cfg->ops->hcmd->commit_rxon(priv); 744 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
715} 745}
716static inline void iwlcore_config_ap(struct iwl_priv *priv, 746static inline void iwlcore_config_ap(struct iwl_priv *priv,
717 struct ieee80211_vif *vif) 747 struct ieee80211_vif *vif)
@@ -723,4 +753,8 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
723{ 753{
724 return priv->hw->wiphy->bands[band]; 754 return priv->hw->wiphy->bands[band];
725} 755}
756
757extern bool bt_coex_active;
758extern bool bt_siso_mode;
759
726#endif /* __iwl_core_h__ */ 760#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index e96a1bb12783..265ad01a443f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -467,8 +467,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
467 for (i = 0; i < supp_band->n_channels; i++) 467 for (i = 0; i < supp_band->n_channels; i++)
468 pos += scnprintf(buf + pos, bufsz - pos, 468 pos += scnprintf(buf + pos, bufsz - pos,
469 "%d: %ddBm: BSS%s%s, %s.\n", 469 "%d: %ddBm: BSS%s%s, %s.\n",
470 ieee80211_frequency_to_channel( 470 channels[i].hw_value,
471 channels[i].center_freq),
472 channels[i].max_power, 471 channels[i].max_power,
473 channels[i].flags & IEEE80211_CHAN_RADAR ? 472 channels[i].flags & IEEE80211_CHAN_RADAR ?
474 " (IEEE 802.11h required)" : "", 473 " (IEEE 802.11h required)" : "",
@@ -491,8 +490,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
491 for (i = 0; i < supp_band->n_channels; i++) 490 for (i = 0; i < supp_band->n_channels; i++)
492 pos += scnprintf(buf + pos, bufsz - pos, 491 pos += scnprintf(buf + pos, bufsz - pos,
493 "%d: %ddBm: BSS%s%s, %s.\n", 492 "%d: %ddBm: BSS%s%s, %s.\n",
494 ieee80211_frequency_to_channel( 493 channels[i].hw_value,
495 channels[i].center_freq),
496 channels[i].max_power, 494 channels[i].max_power,
497 channels[i].flags & IEEE80211_CHAN_RADAR ? 495 channels[i].flags & IEEE80211_CHAN_RADAR ?
498 " (IEEE 802.11h required)" : "", 496 " (IEEE 802.11h required)" : "",
@@ -577,10 +575,10 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
577 priv->isr_stats.hw); 575 priv->isr_stats.hw);
578 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 576 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
579 priv->isr_stats.sw); 577 priv->isr_stats.sw);
580 if (priv->isr_stats.sw > 0) { 578 if (priv->isr_stats.sw || priv->isr_stats.hw) {
581 pos += scnprintf(buf + pos, bufsz - pos, 579 pos += scnprintf(buf + pos, bufsz - pos,
582 "\tLast Restarting Code: 0x%X\n", 580 "\tLast Restarting Code: 0x%X\n",
583 priv->isr_stats.sw_err); 581 priv->isr_stats.err_code);
584 } 582 }
585#ifdef CONFIG_IWLWIFI_DEBUG 583#ifdef CONFIG_IWLWIFI_DEBUG
586 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 584 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
@@ -645,19 +643,25 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
645 size_t count, loff_t *ppos) 643 size_t count, loff_t *ppos)
646{ 644{
647 struct iwl_priv *priv = file->private_data; 645 struct iwl_priv *priv = file->private_data;
646 struct iwl_rxon_context *ctx;
648 int pos = 0, i; 647 int pos = 0, i;
649 char buf[256]; 648 char buf[256 * NUM_IWL_RXON_CTX];
650 const size_t bufsz = sizeof(buf); 649 const size_t bufsz = sizeof(buf);
651 650
652 for (i = 0; i < AC_NUM; i++) { 651 for_each_context(priv, ctx) {
653 pos += scnprintf(buf + pos, bufsz - pos, 652 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
654 "\tcw_min\tcw_max\taifsn\ttxop\n"); 653 ctx->ctxid);
655 pos += scnprintf(buf + pos, bufsz - pos, 654 for (i = 0; i < AC_NUM; i++) {
655 pos += scnprintf(buf + pos, bufsz - pos,
656 "\tcw_min\tcw_max\taifsn\ttxop\n");
657 pos += scnprintf(buf + pos, bufsz - pos,
656 "AC[%d]\t%u\t%u\t%u\t%u\n", i, 658 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
657 priv->qos_data.def_qos_parm.ac[i].cw_min, 659 ctx->qos_data.def_qos_parm.ac[i].cw_min,
658 priv->qos_data.def_qos_parm.ac[i].cw_max, 660 ctx->qos_data.def_qos_parm.ac[i].cw_max,
659 priv->qos_data.def_qos_parm.ac[i].aifsn, 661 ctx->qos_data.def_qos_parm.ac[i].aifsn,
660 priv->qos_data.def_qos_parm.ac[i].edca_txop); 662 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
663 }
664 pos += scnprintf(buf + pos, bufsz - pos, "\n");
661 } 665 }
662 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 666 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
663} 667}
@@ -732,7 +736,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
732 return -EFAULT; 736 return -EFAULT;
733 if (sscanf(buf, "%d", &ht40) != 1) 737 if (sscanf(buf, "%d", &ht40) != 1)
734 return -EFAULT; 738 return -EFAULT;
735 if (!iwl_is_associated(priv)) 739 if (!iwl_is_any_associated(priv))
736 priv->disable_ht40 = ht40 ? true : false; 740 priv->disable_ht40 = ht40 ? true : false;
737 else { 741 else {
738 IWL_ERR(priv, "Sta associated with AP - " 742 IWL_ERR(priv, "Sta associated with AP - "
@@ -1321,7 +1325,8 @@ static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
1321 int len = 0; 1325 int len = 0;
1322 char buf[20]; 1326 char buf[20];
1323 1327
1324 len = sprintf(buf, "0x%04X\n", le32_to_cpu(priv->active_rxon.flags)); 1328 len = sprintf(buf, "0x%04X\n",
1329 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1325 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 1330 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1326} 1331}
1327 1332
@@ -1334,7 +1339,7 @@ static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
1334 char buf[20]; 1339 char buf[20];
1335 1340
1336 len = sprintf(buf, "0x%04X\n", 1341 len = sprintf(buf, "0x%04X\n",
1337 le32_to_cpu(priv->active_rxon.filter_flags)); 1342 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1338 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 1343 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1339} 1344}
1340 1345
@@ -1529,6 +1534,126 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1529 user_buf, count, ppos); 1534 user_buf, count, ppos);
1530} 1535}
1531 1536
1537static ssize_t iwl_dbgfs_monitor_period_write(struct file *file,
1538 const char __user *user_buf,
1539 size_t count, loff_t *ppos) {
1540
1541 struct iwl_priv *priv = file->private_data;
1542 char buf[8];
1543 int buf_size;
1544 int period;
1545
1546 memset(buf, 0, sizeof(buf));
1547 buf_size = min(count, sizeof(buf) - 1);
1548 if (copy_from_user(buf, user_buf, buf_size))
1549 return -EFAULT;
1550 if (sscanf(buf, "%d", &period) != 1)
1551 return -EINVAL;
1552 if (period < 0 || period > IWL_MAX_MONITORING_PERIOD)
1553 priv->cfg->monitor_recover_period = IWL_DEF_MONITORING_PERIOD;
1554 else
1555 priv->cfg->monitor_recover_period = period;
1556
1557 if (priv->cfg->monitor_recover_period)
1558 mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies(
1559 priv->cfg->monitor_recover_period));
1560 else
1561 del_timer_sync(&priv->monitor_recover);
1562 return count;
1563}
1564
1565static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
1566 char __user *user_buf,
1567 size_t count, loff_t *ppos) {
1568
1569 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1570 int pos = 0;
1571 char buf[200];
1572 const size_t bufsz = sizeof(buf);
1573 ssize_t ret;
1574
1575 pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n",
1576 priv->bt_full_concurrent ? "full concurrency" : "3-wire");
1577 pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, "
1578 "last traffic notif: %d\n",
1579 priv->bt_status ? "On" : "Off", priv->notif_bt_traffic_load);
1580 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
1581 "sco_active: %d, kill_ack_mask: %x, "
1582 "kill_cts_mask: %x\n",
1583 priv->bt_ch_announce, priv->bt_sco_active,
1584 priv->kill_ack_mask, priv->kill_cts_mask);
1585
1586 pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
1587 switch (priv->bt_traffic_load) {
1588 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1589 pos += scnprintf(buf + pos, bufsz - pos, "Continuous\n");
1590 break;
1591 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1592 pos += scnprintf(buf + pos, bufsz - pos, "High\n");
1593 break;
1594 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1595 pos += scnprintf(buf + pos, bufsz - pos, "Low\n");
1596 break;
1597 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1598 default:
1599 pos += scnprintf(buf + pos, bufsz - pos, "None\n");
1600 break;
1601 }
1602
1603 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1604 return ret;
1605}
1606
1607static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
1608 char __user *user_buf,
1609 size_t count, loff_t *ppos)
1610{
1611 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1612
1613 int pos = 0;
1614 char buf[40];
1615 const size_t bufsz = sizeof(buf);
1616
1617 pos += scnprintf(buf + pos, bufsz - pos, "use %s for aggregation\n",
1618 (priv->cfg->use_rts_for_aggregation) ? "rts/cts" :
1619 "cts-to-self");
1620 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1621}
1622
1623static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
1624 const char __user *user_buf,
1625 size_t count, loff_t *ppos) {
1626
1627 struct iwl_priv *priv = file->private_data;
1628 char buf[8];
1629 int buf_size;
1630 int rts;
1631
1632 memset(buf, 0, sizeof(buf));
1633 buf_size = min(count, sizeof(buf) - 1);
1634 if (copy_from_user(buf, user_buf, buf_size))
1635 return -EFAULT;
1636 if (sscanf(buf, "%d", &rts) != 1)
1637 return -EINVAL;
1638 if (rts)
1639 priv->cfg->use_rts_for_aggregation = true;
1640 else
1641 priv->cfg->use_rts_for_aggregation = false;
1642 return count;
1643}
1644
1645static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
1646 char __user *user_buf,
1647 size_t count, loff_t *ppos)
1648{
1649 struct iwl_priv *priv = file->private_data;
1650
1651 if (priv->cfg->ops->lib->debugfs_ops.reply_tx_error)
1652 return priv->cfg->ops->lib->debugfs_ops.reply_tx_error(
1653 file, user_buf, count, ppos);
1654 else
1655 return -ENODATA;
1656}
1532DEBUGFS_READ_FILE_OPS(rx_statistics); 1657DEBUGFS_READ_FILE_OPS(rx_statistics);
1533DEBUGFS_READ_FILE_OPS(tx_statistics); 1658DEBUGFS_READ_FILE_OPS(tx_statistics);
1534DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 1659DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1552,6 +1677,10 @@ DEBUGFS_READ_FILE_OPS(rxon_flags);
1552DEBUGFS_READ_FILE_OPS(rxon_filter_flags); 1677DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1553DEBUGFS_WRITE_FILE_OPS(txfifo_flush); 1678DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
1554DEBUGFS_READ_FILE_OPS(ucode_bt_stats); 1679DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
1680DEBUGFS_WRITE_FILE_OPS(monitor_period);
1681DEBUGFS_READ_FILE_OPS(bt_traffic);
1682DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
1683DEBUGFS_READ_FILE_OPS(reply_tx_error);
1555 1684
1556/* 1685/*
1557 * Create the debugfs files and directories 1686 * Create the debugfs files and directories
@@ -1612,6 +1741,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1612 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); 1741 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1613 if (priv->cfg->ops->lib->dev_txfifo_flush) 1742 if (priv->cfg->ops->lib->dev_txfifo_flush)
1614 DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR); 1743 DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR);
1744 DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR);
1615 1745
1616 if (priv->cfg->sensitivity_calib_by_driver) 1746 if (priv->cfg->sensitivity_calib_by_driver)
1617 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); 1747 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
@@ -1621,8 +1751,12 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1621 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); 1751 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1622 if (priv->cfg->bt_statistics) 1752 if (priv->cfg->bt_statistics)
1623 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR); 1753 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
1754 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
1624 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 1755 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1625 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 1756 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1757 DEBUGFS_ADD_FILE(monitor_period, dir_debug, S_IWUSR);
1758 if (priv->cfg->advanced_bt_coexist)
1759 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
1626 if (priv->cfg->sensitivity_calib_by_driver) 1760 if (priv->cfg->sensitivity_calib_by_driver)
1627 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, 1761 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1628 &priv->disable_sens_cal); 1762 &priv->disable_sens_cal);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 2e97cd2fa98a..74d25bcbfcb2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -47,6 +47,7 @@
47#include "iwl-led.h" 47#include "iwl-led.h"
48#include "iwl-power.h" 48#include "iwl-power.h"
49#include "iwl-agn-rs.h" 49#include "iwl-agn-rs.h"
50#include "iwl-agn-tt.h"
50 51
51struct iwl_tx_queue; 52struct iwl_tx_queue;
52 53
@@ -143,6 +144,7 @@ struct iwl_queue {
143/* One for each TFD */ 144/* One for each TFD */
144struct iwl_tx_info { 145struct iwl_tx_info {
145 struct sk_buff *skb; 146 struct sk_buff *skb;
147 struct iwl_rxon_context *ctx;
146}; 148};
147 149
148/** 150/**
@@ -252,10 +254,14 @@ struct iwl_channel_info {
252 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES]; 254 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
253}; 255};
254 256
255#define IWL_TX_FIFO_BK 0 257#define IWL_TX_FIFO_BK 0 /* shared */
256#define IWL_TX_FIFO_BE 1 258#define IWL_TX_FIFO_BE 1
257#define IWL_TX_FIFO_VI 2 259#define IWL_TX_FIFO_VI 2 /* shared */
258#define IWL_TX_FIFO_VO 3 260#define IWL_TX_FIFO_VO 3
261#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
262#define IWL_TX_FIFO_BE_IPAN 4
263#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
264#define IWL_TX_FIFO_VO_IPAN 5
259#define IWL_TX_FIFO_UNUSED -1 265#define IWL_TX_FIFO_UNUSED -1
260 266
261/* Minimum number of queues. MAX_NUM is defined in hw specific files. 267/* Minimum number of queues. MAX_NUM is defined in hw specific files.
@@ -264,11 +270,17 @@ struct iwl_channel_info {
264#define IWL_MIN_NUM_QUEUES 10 270#define IWL_MIN_NUM_QUEUES 10
265 271
266/* 272/*
267 * Queue #4 is the command queue for 3945/4965/5x00/1000/6x00, 273 * Command queue depends on iPAN support.
268 * the driver maps it into the appropriate device FIFO for the
269 * uCode.
270 */ 274 */
271#define IWL_CMD_QUEUE_NUM 4 275#define IWL_DEFAULT_CMD_QUEUE_NUM 4
276#define IWL_IPAN_CMD_QUEUE_NUM 9
277
278/*
279 * This queue number is required for proper operation
280 * because the ucode will stop/start the scheduler as
281 * required.
282 */
283#define IWL_IPAN_MCAST_QUEUE 8
272 284
273/* Power management (not Tx power) structures */ 285/* Power management (not Tx power) structures */
274 286
@@ -420,7 +432,7 @@ struct iwl_tid_data {
420}; 432};
421 433
422struct iwl_hw_key { 434struct iwl_hw_key {
423 enum ieee80211_key_alg alg; 435 u32 cipher;
424 int keylen; 436 int keylen;
425 u8 keyidx; 437 u8 keyidx;
426 u8 key[32]; 438 u8 key[32];
@@ -434,7 +446,13 @@ union iwl_ht_rate_supp {
434 }; 446 };
435}; 447};
436 448
437#define CFG_HT_RX_AMPDU_FACTOR_DEF (0x3) 449#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
450#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
451#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
452#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
453#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
454#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
455#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
438 456
439/* 457/*
440 * Maximal MPDU density for TX aggregation 458 * Maximal MPDU density for TX aggregation
@@ -443,19 +461,17 @@ union iwl_ht_rate_supp {
443 * 6 - 8us density 461 * 6 - 8us density
444 * 7 - 16us density 462 * 7 - 16us density
445 */ 463 */
464#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
446#define CFG_HT_MPDU_DENSITY_4USEC (0x5) 465#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
466#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
467#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
447#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC 468#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
469#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
470#define CFG_HT_MPDU_DENSITY_MIN (0x1)
448 471
449struct iwl_ht_config { 472struct iwl_ht_config {
450 /* self configuration data */
451 bool is_ht;
452 bool is_40mhz;
453 bool single_chain_sufficient; 473 bool single_chain_sufficient;
454 enum ieee80211_smps_mode smps; /* current smps mode */ 474 enum ieee80211_smps_mode smps; /* current smps mode */
455 /* BSS related data */
456 u8 extension_chan_offset;
457 u8 ht_protection;
458 u8 non_GF_STA_present;
459}; 475};
460 476
461/* QoS structures */ 477/* QoS structures */
@@ -473,12 +489,13 @@ struct iwl_qos_info {
473struct iwl_station_entry { 489struct iwl_station_entry {
474 struct iwl_addsta_cmd sta; 490 struct iwl_addsta_cmd sta;
475 struct iwl_tid_data tid[MAX_TID_COUNT]; 491 struct iwl_tid_data tid[MAX_TID_COUNT];
476 u8 used; 492 u8 used, ctxid;
477 struct iwl_hw_key keyinfo; 493 struct iwl_hw_key keyinfo;
478 struct iwl_link_quality_cmd *lq; 494 struct iwl_link_quality_cmd *lq;
479}; 495};
480 496
481struct iwl_station_priv_common { 497struct iwl_station_priv_common {
498 struct iwl_rxon_context *ctx;
482 u8 sta_id; 499 u8 sta_id;
483}; 500};
484 501
@@ -507,6 +524,7 @@ struct iwl_station_priv {
507 * space for us to put data into. 524 * space for us to put data into.
508 */ 525 */
509struct iwl_vif_priv { 526struct iwl_vif_priv {
527 struct iwl_rxon_context *ctx;
510 u8 ibss_bssid_sta_id; 528 u8 ibss_bssid_sta_id;
511}; 529};
512 530
@@ -564,6 +582,7 @@ enum iwl_ucode_tlv_type {
564 IWL_UCODE_TLV_INIT_DATA = 4, 582 IWL_UCODE_TLV_INIT_DATA = 4,
565 IWL_UCODE_TLV_BOOT = 5, 583 IWL_UCODE_TLV_BOOT = 5,
566 IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */ 584 IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
585 IWL_UCODE_TLV_PAN = 7,
567 IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8, 586 IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
568 IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9, 587 IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
569 IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10, 588 IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
@@ -658,7 +677,6 @@ struct iwl_sensitivity_ranges {
658 * @rx_page_order: Rx buffer page order 677 * @rx_page_order: Rx buffer page order
659 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR 678 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
660 * @max_stations: 679 * @max_stations:
661 * @bcast_sta_id:
662 * @ht40_channel: is 40MHz width possible in band 2.4 680 * @ht40_channel: is 40MHz width possible in band 2.4
663 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ) 681 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
664 * @sw_crypto: 0 for hw, 1 for sw 682 * @sw_crypto: 0 for hw, 1 for sw
@@ -682,7 +700,6 @@ struct iwl_hw_params {
682 u32 rx_page_order; 700 u32 rx_page_order;
683 u32 rx_wrt_ptr_reg; 701 u32 rx_wrt_ptr_reg;
684 u8 max_stations; 702 u8 max_stations;
685 u8 bcast_sta_id;
686 u8 ht40_channel; 703 u8 ht40_channel;
687 u8 max_beacon_itrvl; /* in 1024 ms */ 704 u8 max_beacon_itrvl; /* in 1024 ms */
688 u32 max_inst_size; 705 u32 max_inst_size;
@@ -928,7 +945,7 @@ enum iwl_pa_type {
928struct isr_statistics { 945struct isr_statistics {
929 u32 hw; 946 u32 hw;
930 u32 sw; 947 u32 sw;
931 u32 sw_err; 948 u32 err_code;
932 u32 sch; 949 u32 sch;
933 u32 alive; 950 u32 alive;
934 u32 rfkill; 951 u32 rfkill;
@@ -940,6 +957,50 @@ struct isr_statistics {
940 u32 unhandled; 957 u32 unhandled;
941}; 958};
942 959
960/* reply_tx_statistics (for _agn devices) */
961struct reply_tx_error_statistics {
962 u32 pp_delay;
963 u32 pp_few_bytes;
964 u32 pp_bt_prio;
965 u32 pp_quiet_period;
966 u32 pp_calc_ttak;
967 u32 int_crossed_retry;
968 u32 short_limit;
969 u32 long_limit;
970 u32 fifo_underrun;
971 u32 drain_flow;
972 u32 rfkill_flush;
973 u32 life_expire;
974 u32 dest_ps;
975 u32 host_abort;
976 u32 bt_retry;
977 u32 sta_invalid;
978 u32 frag_drop;
979 u32 tid_disable;
980 u32 fifo_flush;
981 u32 insuff_cf_poll;
982 u32 fail_hw_drop;
983 u32 sta_color_mismatch;
984 u32 unknown;
985};
986
987/* reply_agg_tx_statistics (for _agn devices) */
988struct reply_agg_tx_error_statistics {
989 u32 underrun;
990 u32 bt_prio;
991 u32 few_bytes;
992 u32 abort;
993 u32 last_sent_ttl;
994 u32 last_sent_try;
995 u32 last_sent_bt_kill;
996 u32 scd_query;
997 u32 bad_crc32;
998 u32 response;
999 u32 dump_tx;
1000 u32 delay_tx;
1001 u32 unknown;
1002};
1003
943#ifdef CONFIG_IWLWIFI_DEBUGFS 1004#ifdef CONFIG_IWLWIFI_DEBUGFS
944/* management statistics */ 1005/* management statistics */
945enum iwl_mgmt_stats { 1006enum iwl_mgmt_stats {
@@ -1052,7 +1113,10 @@ struct iwl_event_log {
1052#define IWL_DEF_MONITORING_PERIOD (1000) 1113#define IWL_DEF_MONITORING_PERIOD (1000)
1053#define IWL_LONG_MONITORING_PERIOD (5000) 1114#define IWL_LONG_MONITORING_PERIOD (5000)
1054#define IWL_ONE_HUNDRED_MSECS (100) 1115#define IWL_ONE_HUNDRED_MSECS (100)
1055#define IWL_SIXTY_SECS (60000) 1116#define IWL_MAX_MONITORING_PERIOD (60000)
1117
1118/* BT Antenna Coupling Threshold (dB) */
1119#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
1056 1120
1057enum iwl_reset { 1121enum iwl_reset {
1058 IWL_RF_RESET = 0, 1122 IWL_RF_RESET = 0,
@@ -1082,6 +1146,64 @@ struct iwl_force_reset {
1082 */ 1146 */
1083#define IWLAGN_EXT_BEACON_TIME_POS 22 1147#define IWLAGN_EXT_BEACON_TIME_POS 22
1084 1148
1149enum iwl_rxon_context_id {
1150 IWL_RXON_CTX_BSS,
1151 IWL_RXON_CTX_PAN,
1152
1153 NUM_IWL_RXON_CTX
1154};
1155
1156struct iwl_rxon_context {
1157 struct ieee80211_vif *vif;
1158
1159 const u8 *ac_to_fifo;
1160 const u8 *ac_to_queue;
1161 u8 mcast_queue;
1162
1163 /*
1164 * We could use the vif to indicate active, but we
1165 * also need it to be active during disabling when
1166 * we already removed the vif for type setting.
1167 */
1168 bool always_active, is_active;
1169
1170 enum iwl_rxon_context_id ctxid;
1171
1172 u32 interface_modes, exclusive_interface_modes;
1173 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
1174
1175 /*
1176 * We declare this const so it can only be
1177 * changed via explicit cast within the
1178 * routines that actually update the physical
1179 * hardware.
1180 */
1181 const struct iwl_rxon_cmd active;
1182 struct iwl_rxon_cmd staging;
1183
1184 struct iwl_rxon_time_cmd timing;
1185
1186 struct iwl_qos_info qos_data;
1187
1188 u8 bcast_sta_id, ap_sta_id;
1189
1190 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
1191 u8 qos_cmd;
1192 u8 wep_key_cmd;
1193
1194 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
1195 u8 key_mapping_keys;
1196
1197 __le32 station_flags;
1198
1199 struct {
1200 bool non_gf_sta_present;
1201 u8 protection;
1202 bool enabled, is_40mhz;
1203 u8 extension_chan_offset;
1204 } ht;
1205};
1206
1085struct iwl_priv { 1207struct iwl_priv {
1086 1208
1087 /* ieee device used by generic ieee processing code */ 1209 /* ieee device used by generic ieee processing code */
@@ -1110,6 +1232,9 @@ struct iwl_priv {
1110 u32 ucode_beacon_time; 1232 u32 ucode_beacon_time;
1111 int missed_beacon_threshold; 1233 int missed_beacon_threshold;
1112 1234
1235 /* track IBSS manager (last beacon) status */
1236 u32 ibss_manager;
1237
1113 /* storing the jiffies when the plcp error rate is received */ 1238 /* storing the jiffies when the plcp error rate is received */
1114 unsigned long plcp_jiffies; 1239 unsigned long plcp_jiffies;
1115 1240
@@ -1155,6 +1280,15 @@ struct iwl_priv {
1155 u32 hw_wa_rev; 1280 u32 hw_wa_rev;
1156 u8 rev_id; 1281 u8 rev_id;
1157 1282
1283 /* microcode/device supports multiple contexts */
1284 u8 valid_contexts;
1285
1286 /* command queue number */
1287 u8 cmd_queue;
1288
1289 /* max number of station keys */
1290 u8 sta_key_max_num;
1291
1158 /* EEPROM MAC addresses */ 1292 /* EEPROM MAC addresses */
1159 struct mac_address addresses[2]; 1293 struct mac_address addresses[2];
1160 1294
@@ -1172,15 +1306,7 @@ struct iwl_priv {
1172 u8 ucode_write_complete; /* the image write is complete */ 1306 u8 ucode_write_complete; /* the image write is complete */
1173 char firmware_name[25]; 1307 char firmware_name[25];
1174 1308
1175 1309 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1176 struct iwl_rxon_time_cmd rxon_timing;
1177
1178 /* We declare this const so it can only be
1179 * changed via explicit cast within the
1180 * routines that actually update the physical
1181 * hardware */
1182 const struct iwl_rxon_cmd active_rxon;
1183 struct iwl_rxon_cmd staging_rxon;
1184 1310
1185 struct iwl_switch_rxon switch_rxon; 1311 struct iwl_switch_rxon switch_rxon;
1186 1312
@@ -1242,8 +1368,6 @@ struct iwl_priv {
1242 spinlock_t sta_lock; 1368 spinlock_t sta_lock;
1243 int num_stations; 1369 int num_stations;
1244 struct iwl_station_entry stations[IWL_STATION_COUNT]; 1370 struct iwl_station_entry stations[IWL_STATION_COUNT];
1245 struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; /* protected by mutex */
1246 u8 key_mapping_key;
1247 unsigned long ucode_key_table; 1371 unsigned long ucode_key_table;
1248 1372
1249 /* queue refcounts */ 1373 /* queue refcounts */
@@ -1268,7 +1392,6 @@ struct iwl_priv {
1268 1392
1269 /* Last Rx'd beacon timestamp */ 1393 /* Last Rx'd beacon timestamp */
1270 u64 timestamp; 1394 u64 timestamp;
1271 struct ieee80211_vif *vif;
1272 1395
1273 union { 1396 union {
1274#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE) 1397#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
@@ -1336,6 +1459,9 @@ struct iwl_priv {
1336 1459
1337 struct iwl_notif_statistics statistics; 1460 struct iwl_notif_statistics statistics;
1338 struct iwl_bt_notif_statistics statistics_bt; 1461 struct iwl_bt_notif_statistics statistics_bt;
1462 /* counts reply_tx error */
1463 struct reply_tx_error_statistics reply_tx_stats;
1464 struct reply_agg_tx_error_statistics reply_agg_tx_stats;
1339#ifdef CONFIG_IWLWIFI_DEBUGFS 1465#ifdef CONFIG_IWLWIFI_DEBUGFS
1340 struct iwl_notif_statistics accum_statistics; 1466 struct iwl_notif_statistics accum_statistics;
1341 struct iwl_notif_statistics delta_statistics; 1467 struct iwl_notif_statistics delta_statistics;
@@ -1348,12 +1474,27 @@ struct iwl_priv {
1348#endif 1474#endif
1349 }; 1475 };
1350 1476
1477 /* bt coex */
1478 u8 bt_status;
1479 u8 bt_traffic_load, notif_bt_traffic_load;
1480 bool bt_ch_announce;
1481 bool bt_sco_active;
1482 bool bt_full_concurrent;
1483 bool bt_ant_couple_ok;
1484 __le32 kill_ack_mask;
1485 __le32 kill_cts_mask;
1486 __le16 bt_valid;
1487 u16 bt_on_thresh;
1488 u16 bt_duration;
1489 u16 dynamic_frag_thresh;
1490 u16 dynamic_agg_thresh;
1491 u8 bt_ci_compliance;
1492 struct work_struct bt_traffic_change_work;
1493
1351 struct iwl_hw_params hw_params; 1494 struct iwl_hw_params hw_params;
1352 1495
1353 u32 inta_mask; 1496 u32 inta_mask;
1354 1497
1355 struct iwl_qos_info qos_data;
1356
1357 struct workqueue_struct *workqueue; 1498 struct workqueue_struct *workqueue;
1358 1499
1359 struct work_struct restart; 1500 struct work_struct restart;
@@ -1361,11 +1502,15 @@ struct iwl_priv {
1361 struct work_struct rx_replenish; 1502 struct work_struct rx_replenish;
1362 struct work_struct abort_scan; 1503 struct work_struct abort_scan;
1363 struct work_struct beacon_update; 1504 struct work_struct beacon_update;
1505 struct iwl_rxon_context *beacon_ctx;
1506
1364 struct work_struct tt_work; 1507 struct work_struct tt_work;
1365 struct work_struct ct_enter; 1508 struct work_struct ct_enter;
1366 struct work_struct ct_exit; 1509 struct work_struct ct_exit;
1367 struct work_struct start_internal_scan; 1510 struct work_struct start_internal_scan;
1368 struct work_struct tx_flush; 1511 struct work_struct tx_flush;
1512 struct work_struct bt_full_concurrency;
1513 struct work_struct bt_runtime_config;
1369 1514
1370 struct tasklet_struct irq_tasklet; 1515 struct tasklet_struct irq_tasklet;
1371 1516
@@ -1453,10 +1598,34 @@ static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
1453 return NULL; 1598 return NULL;
1454} 1599}
1455 1600
1601static inline struct iwl_rxon_context *
1602iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1603{
1604 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1605
1606 return vif_priv->ctx;
1607}
1608
1609#define for_each_context(priv, ctx) \
1610 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1611 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1612 if (priv->valid_contexts & BIT(ctx->ctxid))
1613
1614static inline int iwl_is_associated(struct iwl_priv *priv,
1615 enum iwl_rxon_context_id ctxid)
1616{
1617 return (priv->contexts[ctxid].active.filter_flags &
1618 RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1619}
1620
1621static inline int iwl_is_any_associated(struct iwl_priv *priv)
1622{
1623 return iwl_is_associated(priv, IWL_RXON_CTX_BSS);
1624}
1456 1625
1457static inline int iwl_is_associated(struct iwl_priv *priv) 1626static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
1458{ 1627{
1459 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; 1628 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1460} 1629}
1461 1630
1462static inline int is_channel_valid(const struct iwl_channel_info *ch_info) 1631static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 258d059ef41f..c373b53babea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -97,6 +97,17 @@ const char *get_cmd_string(u8 cmd)
97 IWL_CMD(REPLY_TX_POWER_DBM_CMD); 97 IWL_CMD(REPLY_TX_POWER_DBM_CMD);
98 IWL_CMD(TEMPERATURE_NOTIFICATION); 98 IWL_CMD(TEMPERATURE_NOTIFICATION);
99 IWL_CMD(TX_ANT_CONFIGURATION_CMD); 99 IWL_CMD(TX_ANT_CONFIGURATION_CMD);
100 IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
101 IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
102 IWL_CMD(REPLY_BT_COEX_PROT_ENV);
103 IWL_CMD(REPLY_WIPAN_PARAMS);
104 IWL_CMD(REPLY_WIPAN_RXON);
105 IWL_CMD(REPLY_WIPAN_RXON_TIMING);
106 IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
107 IWL_CMD(REPLY_WIPAN_QOS_PARAM);
108 IWL_CMD(REPLY_WIPAN_WEPKEY);
109 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
110 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
100 default: 111 default:
101 return "UNKNOWN"; 112 return "UNKNOWN";
102 113
@@ -229,7 +240,7 @@ cancel:
229 * in later, it will possibly set an invalid 240 * in later, it will possibly set an invalid
230 * address (cmd->meta.source). 241 * address (cmd->meta.source).
231 */ 242 */
232 priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_idx].flags &= 243 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
233 ~CMD_WANT_SKB; 244 ~CMD_WANT_SKB;
234 } 245 }
235fail: 246fail:
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index cda6a94d6cc9..63c0ab46261f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -192,47 +192,6 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
192 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1); 192 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
193} 193}
194 194
195/* default Thermal Throttling transaction table
196 * Current state | Throttling Down | Throttling Up
197 *=============================================================================
198 * Condition Nxt State Condition Nxt State Condition Nxt State
199 *-----------------------------------------------------------------------------
200 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
201 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
202 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
203 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
204 *=============================================================================
205 */
206static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
207 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
208 {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
209 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
210};
211static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
212 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
213 {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
214 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
215};
216static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
217 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
218 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
219 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
220};
221static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
222 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
223 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
224 {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
225};
226
227/* Advance Thermal Throttling default restriction table */
228static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = {
229 {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true },
230 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true },
231 {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false },
232 {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false }
233};
234
235
236static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv, 195static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
237 struct iwl_powertable_cmd *cmd) 196 struct iwl_powertable_cmd *cmd)
238{ 197{
@@ -308,7 +267,6 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
308int iwl_power_update_mode(struct iwl_priv *priv, bool force) 267int iwl_power_update_mode(struct iwl_priv *priv, bool force)
309{ 268{
310 int ret = 0; 269 int ret = 0;
311 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
312 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS; 270 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
313 bool update_chains; 271 bool update_chains;
314 struct iwl_powertable_cmd cmd; 272 struct iwl_powertable_cmd cmd;
@@ -325,9 +283,13 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
325 else if (priv->cfg->supports_idle && 283 else if (priv->cfg->supports_idle &&
326 priv->hw->conf.flags & IEEE80211_CONF_IDLE) 284 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
327 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20); 285 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20);
328 else if (tt->state >= IWL_TI_1) 286 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
329 iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper); 287 priv->cfg->ops->lib->tt_ops.tt_power_mode &&
330 else if (!enabled) 288 priv->cfg->ops->lib->tt_ops.lower_power_detection(priv)) {
289 /* in thermal throttling low power state */
290 iwl_static_sleep_cmd(priv, &cmd,
291 priv->cfg->ops->lib->tt_ops.tt_power_mode(priv), dtimper);
292 } else if (!enabled)
331 iwl_power_sleep_cam_cmd(priv, &cmd); 293 iwl_power_sleep_cam_cmd(priv, &cmd);
332 else if (priv->power_data.debug_sleep_level_override >= 0) 294 else if (priv->power_data.debug_sleep_level_override >= 0)
333 iwl_static_sleep_cmd(priv, &cmd, 295 iwl_static_sleep_cmd(priv, &cmd,
@@ -367,592 +329,6 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
367} 329}
368EXPORT_SYMBOL(iwl_power_update_mode); 330EXPORT_SYMBOL(iwl_power_update_mode);
369 331
370bool iwl_ht_enabled(struct iwl_priv *priv)
371{
372 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
373 struct iwl_tt_restriction *restriction;
374
375 if (!priv->thermal_throttle.advanced_tt)
376 return true;
377 restriction = tt->restriction + tt->state;
378 return restriction->is_ht;
379}
380EXPORT_SYMBOL(iwl_ht_enabled);
381
382bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
383{
384 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
385 bool within_margin = false;
386
387 if (priv->cfg->temperature_kelvin)
388 temp = KELVIN_TO_CELSIUS(priv->temperature);
389
390 if (!priv->thermal_throttle.advanced_tt)
391 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
392 CT_KILL_THRESHOLD_LEGACY) ? true : false;
393 else
394 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
395 CT_KILL_THRESHOLD) ? true : false;
396 return within_margin;
397}
398
399enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
400{
401 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
402 struct iwl_tt_restriction *restriction;
403
404 if (!priv->thermal_throttle.advanced_tt)
405 return IWL_ANT_OK_MULTI;
406 restriction = tt->restriction + tt->state;
407 return restriction->tx_stream;
408}
409EXPORT_SYMBOL(iwl_tx_ant_restriction);
410
411enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
412{
413 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
414 struct iwl_tt_restriction *restriction;
415
416 if (!priv->thermal_throttle.advanced_tt)
417 return IWL_ANT_OK_MULTI;
418 restriction = tt->restriction + tt->state;
419 return restriction->rx_stream;
420}
421
422#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
423#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
424
425/*
426 * toggle the bit to wake up uCode and check the temperature
427 * if the temperature is below CT, uCode will stay awake and send card
428 * state notification with CT_KILL bit clear to inform Thermal Throttling
429 * Management to change state. Otherwise, uCode will go back to sleep
430 * without doing anything, driver should continue the 5 seconds timer
431 * to wake up uCode for temperature check until temperature drop below CT
432 */
433static void iwl_tt_check_exit_ct_kill(unsigned long data)
434{
435 struct iwl_priv *priv = (struct iwl_priv *)data;
436 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
437 unsigned long flags;
438
439 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
440 return;
441
442 if (tt->state == IWL_TI_CT_KILL) {
443 if (priv->thermal_throttle.ct_kill_toggle) {
444 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
445 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
446 priv->thermal_throttle.ct_kill_toggle = false;
447 } else {
448 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
449 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
450 priv->thermal_throttle.ct_kill_toggle = true;
451 }
452 iwl_read32(priv, CSR_UCODE_DRV_GP1);
453 spin_lock_irqsave(&priv->reg_lock, flags);
454 if (!iwl_grab_nic_access(priv))
455 iwl_release_nic_access(priv);
456 spin_unlock_irqrestore(&priv->reg_lock, flags);
457
458 /* Reschedule the ct_kill timer to occur in
459 * CT_KILL_EXIT_DURATION seconds to ensure we get a
460 * thermal update */
461 IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n");
462 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
463 CT_KILL_EXIT_DURATION * HZ);
464 }
465}
466
467static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
468 bool stop)
469{
470 if (stop) {
471 IWL_DEBUG_POWER(priv, "Stop all queues\n");
472 if (priv->mac80211_registered)
473 ieee80211_stop_queues(priv->hw);
474 IWL_DEBUG_POWER(priv,
475 "Schedule 5 seconds CT_KILL Timer\n");
476 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
477 CT_KILL_EXIT_DURATION * HZ);
478 } else {
479 IWL_DEBUG_POWER(priv, "Wake all queues\n");
480 if (priv->mac80211_registered)
481 ieee80211_wake_queues(priv->hw);
482 }
483}
484
485static void iwl_tt_ready_for_ct_kill(unsigned long data)
486{
487 struct iwl_priv *priv = (struct iwl_priv *)data;
488 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
489
490 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
491 return;
492
493 /* temperature timer expired, ready to go into CT_KILL state */
494 if (tt->state != IWL_TI_CT_KILL) {
495 IWL_DEBUG_POWER(priv, "entering CT_KILL state when temperature timer expired\n");
496 tt->state = IWL_TI_CT_KILL;
497 set_bit(STATUS_CT_KILL, &priv->status);
498 iwl_perform_ct_kill_task(priv, true);
499 }
500}
501
502static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
503{
504 IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n");
505 /* make request to retrieve statistics information */
506 iwl_send_statistics_request(priv, CMD_SYNC, false);
507 /* Reschedule the ct_kill wait timer */
508 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
509 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
510}
511
512#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
513#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
514#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
515
516/*
517 * Legacy thermal throttling
518 * 1) Avoid NIC destruction due to high temperatures
519 * Chip will identify dangerously high temperatures that can
520 * harm the device and will power down
521 * 2) Avoid the NIC power down due to high temperature
522 * Throttle early enough to lower the power consumption before
523 * drastic steps are needed
524 */
525static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
526{
527 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
528 enum iwl_tt_state old_state;
529
530#ifdef CONFIG_IWLWIFI_DEBUG
531 if ((tt->tt_previous_temp) &&
532 (temp > tt->tt_previous_temp) &&
533 ((temp - tt->tt_previous_temp) >
534 IWL_TT_INCREASE_MARGIN)) {
535 IWL_DEBUG_POWER(priv,
536 "Temperature increase %d degree Celsius\n",
537 (temp - tt->tt_previous_temp));
538 }
539#endif
540 old_state = tt->state;
541 /* in Celsius */
542 if (temp >= IWL_MINIMAL_POWER_THRESHOLD)
543 tt->state = IWL_TI_CT_KILL;
544 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2)
545 tt->state = IWL_TI_2;
546 else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1)
547 tt->state = IWL_TI_1;
548 else
549 tt->state = IWL_TI_0;
550
551#ifdef CONFIG_IWLWIFI_DEBUG
552 tt->tt_previous_temp = temp;
553#endif
554 /* stop ct_kill_waiting_tm timer */
555 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
556 if (tt->state != old_state) {
557 switch (tt->state) {
558 case IWL_TI_0:
559 /*
560 * When the system is ready to go back to IWL_TI_0
561 * we only have to call iwl_power_update_mode() to
562 * do so.
563 */
564 break;
565 case IWL_TI_1:
566 tt->tt_power_mode = IWL_POWER_INDEX_3;
567 break;
568 case IWL_TI_2:
569 tt->tt_power_mode = IWL_POWER_INDEX_4;
570 break;
571 default:
572 tt->tt_power_mode = IWL_POWER_INDEX_5;
573 break;
574 }
575 mutex_lock(&priv->mutex);
576 if (old_state == IWL_TI_CT_KILL)
577 clear_bit(STATUS_CT_KILL, &priv->status);
578 if (tt->state != IWL_TI_CT_KILL &&
579 iwl_power_update_mode(priv, true)) {
580 /* TT state not updated
581 * try again during next temperature read
582 */
583 if (old_state == IWL_TI_CT_KILL)
584 set_bit(STATUS_CT_KILL, &priv->status);
585 tt->state = old_state;
586 IWL_ERR(priv, "Cannot update power mode, "
587 "TT state not updated\n");
588 } else {
589 if (tt->state == IWL_TI_CT_KILL) {
590 if (force) {
591 set_bit(STATUS_CT_KILL, &priv->status);
592 iwl_perform_ct_kill_task(priv, true);
593 } else {
594 iwl_prepare_ct_kill_task(priv);
595 tt->state = old_state;
596 }
597 } else if (old_state == IWL_TI_CT_KILL &&
598 tt->state != IWL_TI_CT_KILL)
599 iwl_perform_ct_kill_task(priv, false);
600 IWL_DEBUG_POWER(priv, "Temperature state changed %u\n",
601 tt->state);
602 IWL_DEBUG_POWER(priv, "Power Index change to %u\n",
603 tt->tt_power_mode);
604 }
605 mutex_unlock(&priv->mutex);
606 }
607}
608
609/*
610 * Advance thermal throttling
611 * 1) Avoid NIC destruction due to high temperatures
612 * Chip will identify dangerously high temperatures that can
613 * harm the device and will power down
614 * 2) Avoid the NIC power down due to high temperature
615 * Throttle early enough to lower the power consumption before
616 * drastic steps are needed
617 * Actions include relaxing the power down sleep thresholds and
618 * decreasing the number of TX streams
619 * 3) Avoid throughput performance impact as much as possible
620 *
621 *=============================================================================
622 * Condition Nxt State Condition Nxt State Condition Nxt State
623 *-----------------------------------------------------------------------------
624 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
625 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
626 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
627 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
628 *=============================================================================
629 */
630static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
631{
632 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
633 int i;
634 bool changed = false;
635 enum iwl_tt_state old_state;
636 struct iwl_tt_trans *transaction;
637
638 old_state = tt->state;
639 for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) {
640 /* based on the current TT state,
641 * find the curresponding transaction table
642 * each table has (IWL_TI_STATE_MAX - 1) entries
643 * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
644 * will advance to the correct table.
645 * then based on the current temperature
646 * find the next state need to transaction to
647 * go through all the possible (IWL_TI_STATE_MAX - 1) entries
648 * in the current table to see if transaction is needed
649 */
650 transaction = tt->transaction +
651 ((old_state * (IWL_TI_STATE_MAX - 1)) + i);
652 if (temp >= transaction->tt_low &&
653 temp <= transaction->tt_high) {
654#ifdef CONFIG_IWLWIFI_DEBUG
655 if ((tt->tt_previous_temp) &&
656 (temp > tt->tt_previous_temp) &&
657 ((temp - tt->tt_previous_temp) >
658 IWL_TT_INCREASE_MARGIN)) {
659 IWL_DEBUG_POWER(priv,
660 "Temperature increase %d "
661 "degree Celsius\n",
662 (temp - tt->tt_previous_temp));
663 }
664 tt->tt_previous_temp = temp;
665#endif
666 if (old_state !=
667 transaction->next_state) {
668 changed = true;
669 tt->state =
670 transaction->next_state;
671 }
672 break;
673 }
674 }
675 /* stop ct_kill_waiting_tm timer */
676 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
677 if (changed) {
678 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
679
680 if (tt->state >= IWL_TI_1) {
681 /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
682 tt->tt_power_mode = IWL_POWER_INDEX_5;
683 if (!iwl_ht_enabled(priv))
684 /* disable HT */
685 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
686 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
687 RXON_FLG_HT40_PROT_MSK |
688 RXON_FLG_HT_PROT_MSK);
689 else {
690 /* check HT capability and set
691 * according to the system HT capability
692 * in case get disabled before */
693 iwl_set_rxon_ht(priv, &priv->current_ht_config);
694 }
695
696 } else {
697 /*
698 * restore system power setting -- it will be
699 * recalculated automatically.
700 */
701
702 /* check HT capability and set
703 * according to the system HT capability
704 * in case get disabled before */
705 iwl_set_rxon_ht(priv, &priv->current_ht_config);
706 }
707 mutex_lock(&priv->mutex);
708 if (old_state == IWL_TI_CT_KILL)
709 clear_bit(STATUS_CT_KILL, &priv->status);
710 if (tt->state != IWL_TI_CT_KILL &&
711 iwl_power_update_mode(priv, true)) {
712 /* TT state not updated
713 * try again during next temperature read
714 */
715 IWL_ERR(priv, "Cannot update power mode, "
716 "TT state not updated\n");
717 if (old_state == IWL_TI_CT_KILL)
718 set_bit(STATUS_CT_KILL, &priv->status);
719 tt->state = old_state;
720 } else {
721 IWL_DEBUG_POWER(priv,
722 "Thermal Throttling to new state: %u\n",
723 tt->state);
724 if (old_state != IWL_TI_CT_KILL &&
725 tt->state == IWL_TI_CT_KILL) {
726 if (force) {
727 IWL_DEBUG_POWER(priv,
728 "Enter IWL_TI_CT_KILL\n");
729 set_bit(STATUS_CT_KILL, &priv->status);
730 iwl_perform_ct_kill_task(priv, true);
731 } else {
732 iwl_prepare_ct_kill_task(priv);
733 tt->state = old_state;
734 }
735 } else if (old_state == IWL_TI_CT_KILL &&
736 tt->state != IWL_TI_CT_KILL) {
737 IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n");
738 iwl_perform_ct_kill_task(priv, false);
739 }
740 }
741 mutex_unlock(&priv->mutex);
742 }
743}
744
745/* Card State Notification indicated reach critical temperature
746 * if PSP not enable, no Thermal Throttling function will be performed
747 * just set the GP1 bit to acknowledge the event
748 * otherwise, go into IWL_TI_CT_KILL state
749 * since Card State Notification will not provide any temperature reading
750 * for Legacy mode
751 * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
752 * for advance mode
753 * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
754 */
755static void iwl_bg_ct_enter(struct work_struct *work)
756{
757 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
758 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
759
760 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
761 return;
762
763 if (!iwl_is_ready(priv))
764 return;
765
766 if (tt->state != IWL_TI_CT_KILL) {
767 IWL_ERR(priv, "Device reached critical temperature "
768 "- ucode going to sleep!\n");
769 if (!priv->thermal_throttle.advanced_tt)
770 iwl_legacy_tt_handler(priv,
771 IWL_MINIMAL_POWER_THRESHOLD,
772 true);
773 else
774 iwl_advance_tt_handler(priv,
775 CT_KILL_THRESHOLD + 1, true);
776 }
777}
778
779/* Card State Notification indicated out of critical temperature
780 * since Card State Notification will not provide any temperature reading
781 * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
782 * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
783 */
784static void iwl_bg_ct_exit(struct work_struct *work)
785{
786 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
787 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
788
789 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
790 return;
791
792 if (!iwl_is_ready(priv))
793 return;
794
795 /* stop ct_kill_exit_tm timer */
796 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
797
798 if (tt->state == IWL_TI_CT_KILL) {
799 IWL_ERR(priv,
800 "Device temperature below critical"
801 "- ucode awake!\n");
802 /*
803 * exit from CT_KILL state
804 * reset the current temperature reading
805 */
806 priv->temperature = 0;
807 if (!priv->thermal_throttle.advanced_tt)
808 iwl_legacy_tt_handler(priv,
809 IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
810 true);
811 else
812 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
813 true);
814 }
815}
816
817void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
818{
819 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
820 return;
821
822 IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n");
823 queue_work(priv->workqueue, &priv->ct_enter);
824}
825EXPORT_SYMBOL(iwl_tt_enter_ct_kill);
826
827void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
828{
829 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
830 return;
831
832 IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n");
833 queue_work(priv->workqueue, &priv->ct_exit);
834}
835EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
836
837static void iwl_bg_tt_work(struct work_struct *work)
838{
839 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
840 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
841
842 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
843 return;
844
845 if (priv->cfg->temperature_kelvin)
846 temp = KELVIN_TO_CELSIUS(priv->temperature);
847
848 if (!priv->thermal_throttle.advanced_tt)
849 iwl_legacy_tt_handler(priv, temp, false);
850 else
851 iwl_advance_tt_handler(priv, temp, false);
852}
853
854void iwl_tt_handler(struct iwl_priv *priv)
855{
856 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
857 return;
858
859 IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n");
860 queue_work(priv->workqueue, &priv->tt_work);
861}
862EXPORT_SYMBOL(iwl_tt_handler);
863
864/* Thermal throttling initialization
865 * For advance thermal throttling:
866 * Initialize Thermal Index and temperature threshold table
867 * Initialize thermal throttling restriction table
868 */
869void iwl_tt_initialize(struct iwl_priv *priv)
870{
871 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
872 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
873 struct iwl_tt_trans *transaction;
874
875 IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
876
877 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
878
879 tt->state = IWL_TI_0;
880 init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
881 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
882 priv->thermal_throttle.ct_kill_exit_tm.function =
883 iwl_tt_check_exit_ct_kill;
884 init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
885 priv->thermal_throttle.ct_kill_waiting_tm.data = (unsigned long)priv;
886 priv->thermal_throttle.ct_kill_waiting_tm.function =
887 iwl_tt_ready_for_ct_kill;
888 /* setup deferred ct kill work */
889 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
890 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
891 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
892
893 if (priv->cfg->adv_thermal_throttle) {
894 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
895 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
896 IWL_TI_STATE_MAX, GFP_KERNEL);
897 tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) *
898 IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1),
899 GFP_KERNEL);
900 if (!tt->restriction || !tt->transaction) {
901 IWL_ERR(priv, "Fallback to Legacy Throttling\n");
902 priv->thermal_throttle.advanced_tt = false;
903 kfree(tt->restriction);
904 tt->restriction = NULL;
905 kfree(tt->transaction);
906 tt->transaction = NULL;
907 } else {
908 transaction = tt->transaction +
909 (IWL_TI_0 * (IWL_TI_STATE_MAX - 1));
910 memcpy(transaction, &tt_range_0[0], size);
911 transaction = tt->transaction +
912 (IWL_TI_1 * (IWL_TI_STATE_MAX - 1));
913 memcpy(transaction, &tt_range_1[0], size);
914 transaction = tt->transaction +
915 (IWL_TI_2 * (IWL_TI_STATE_MAX - 1));
916 memcpy(transaction, &tt_range_2[0], size);
917 transaction = tt->transaction +
918 (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1));
919 memcpy(transaction, &tt_range_3[0], size);
920 size = sizeof(struct iwl_tt_restriction) *
921 IWL_TI_STATE_MAX;
922 memcpy(tt->restriction,
923 &restriction_range[0], size);
924 priv->thermal_throttle.advanced_tt = true;
925 }
926 } else {
927 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
928 priv->thermal_throttle.advanced_tt = false;
929 }
930}
931EXPORT_SYMBOL(iwl_tt_initialize);
932
933/* cleanup thermal throttling management related memory and timer */
934void iwl_tt_exit(struct iwl_priv *priv)
935{
936 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
937
938 /* stop ct_kill_exit_tm timer if activated */
939 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
940 /* stop ct_kill_waiting_tm timer if activated */
941 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
942 cancel_work_sync(&priv->tt_work);
943 cancel_work_sync(&priv->ct_enter);
944 cancel_work_sync(&priv->ct_exit);
945
946 if (priv->thermal_throttle.advanced_tt) {
947 /* free advance thermal throttling memory */
948 kfree(tt->restriction);
949 tt->restriction = NULL;
950 kfree(tt->transaction);
951 tt->transaction = NULL;
952 }
953}
954EXPORT_SYMBOL(iwl_tt_exit);
955
956/* initialize to default */ 332/* initialize to default */
957void iwl_power_initialize(struct iwl_priv *priv) 333void iwl_power_initialize(struct iwl_priv *priv)
958{ 334{
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 5db91c10dcc8..df81565a7cc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -30,90 +30,6 @@
30 30
31#include "iwl-commands.h" 31#include "iwl-commands.h"
32 32
33#define IWL_ABSOLUTE_ZERO 0
34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
35#define IWL_TT_INCREASE_MARGIN 5
36#define IWL_TT_CT_KILL_MARGIN 3
37
38enum iwl_antenna_ok {
39 IWL_ANT_OK_NONE,
40 IWL_ANT_OK_SINGLE,
41 IWL_ANT_OK_MULTI,
42};
43
44/* Thermal Throttling State Machine states */
45enum iwl_tt_state {
46 IWL_TI_0, /* normal temperature, system power state */
47 IWL_TI_1, /* high temperature detect, low power state */
48 IWL_TI_2, /* higher temperature detected, lower power state */
49 IWL_TI_CT_KILL, /* critical temperature detected, lowest power state */
50 IWL_TI_STATE_MAX
51};
52
53/**
54 * struct iwl_tt_restriction - Thermal Throttling restriction table
55 * @tx_stream: number of tx stream allowed
56 * @is_ht: ht enable/disable
57 * @rx_stream: number of rx stream allowed
58 *
59 * This table is used by advance thermal throttling management
60 * based on the current thermal throttling state, and determines
61 * the number of tx/rx streams and the status of HT operation.
62 */
63struct iwl_tt_restriction {
64 enum iwl_antenna_ok tx_stream;
65 enum iwl_antenna_ok rx_stream;
66 bool is_ht;
67};
68
69/**
70 * struct iwl_tt_trans - Thermal Throttling transaction table
71 * @next_state: next thermal throttling mode
72 * @tt_low: low temperature threshold to change state
73 * @tt_high: high temperature threshold to change state
74 *
75 * This is used by the advanced thermal throttling algorithm
76 * to determine the next thermal state to go based on the
77 * current temperature.
78 */
79struct iwl_tt_trans {
80 enum iwl_tt_state next_state;
81 u32 tt_low;
82 u32 tt_high;
83};
84
85/**
86 * struct iwl_tt_mgnt - Thermal Throttling Management structure
87 * @advanced_tt: advanced thermal throttle required
88 * @state: current Thermal Throttling state
89 * @tt_power_mode: Thermal Throttling power mode index
90 * being used to set power level when
91 * when thermal throttling state != IWL_TI_0
92 * the tt_power_mode should set to different
93 * power mode based on the current tt state
94 * @tt_previous_temperature: last measured temperature
95 * @iwl_tt_restriction: ptr to restriction tbl, used by advance
96 * thermal throttling to determine how many tx/rx streams
97 * should be used in tt state; and can HT be enabled or not
98 * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling
99 * state transaction
100 * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature
101 * @ct_kill_exit_tm: timer to exit thermal kill
102 */
103struct iwl_tt_mgmt {
104 enum iwl_tt_state state;
105 bool advanced_tt;
106 u8 tt_power_mode;
107 bool ct_kill_toggle;
108#ifdef CONFIG_IWLWIFI_DEBUG
109 s32 tt_previous_temp;
110#endif
111 struct iwl_tt_restriction *restriction;
112 struct iwl_tt_trans *transaction;
113 struct timer_list ct_kill_exit_tm;
114 struct timer_list ct_kill_waiting_tm;
115};
116
117enum iwl_power_level { 33enum iwl_power_level {
118 IWL_POWER_INDEX_1, 34 IWL_POWER_INDEX_1,
119 IWL_POWER_INDEX_2, 35 IWL_POWER_INDEX_2,
@@ -130,15 +46,6 @@ struct iwl_power_mgr {
130}; 46};
131 47
132int iwl_power_update_mode(struct iwl_priv *priv, bool force); 48int iwl_power_update_mode(struct iwl_priv *priv, bool force);
133bool iwl_ht_enabled(struct iwl_priv *priv);
134bool iwl_within_ct_kill_margin(struct iwl_priv *priv);
135enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
136enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
137void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
138void iwl_tt_exit_ct_kill(struct iwl_priv *priv);
139void iwl_tt_handler(struct iwl_priv *priv);
140void iwl_tt_initialize(struct iwl_priv *priv);
141void iwl_tt_exit(struct iwl_priv *priv);
142void iwl_power_initialize(struct iwl_priv *priv); 49void iwl_power_initialize(struct iwl_priv *priv);
143 50
144extern bool no_sleep_autoadjust; 51extern bool no_sleep_autoadjust;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index b1f101caf19d..5469655646ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -306,7 +306,7 @@
306 * at a time, until receiving ACK from receiving station, or reaching 306 * at a time, until receiving ACK from receiving station, or reaching
307 * retry limit and giving up. 307 * retry limit and giving up.
308 * 308 *
309 * The command queue (#4) must use this mode! 309 * The command queue (#4/#9) must use this mode!
310 * This mode does not require use of the Byte Count table in host DRAM. 310 * This mode does not require use of the Byte Count table in host DRAM.
311 * 311 *
312 * Driver controls scheduler operation via 3 means: 312 * Driver controls scheduler operation via 3 means:
@@ -322,7 +322,7 @@
322 * (1024 bytes for each queue). 322 * (1024 bytes for each queue).
323 * 323 *
324 * After receiving "Alive" response from uCode, driver must initialize 324 * After receiving "Alive" response from uCode, driver must initialize
325 * the scheduler (especially for queue #4, the command queue, otherwise 325 * the scheduler (especially for queue #4/#9, the command queue, otherwise
326 * the driver can't issue commands!): 326 * the driver can't issue commands!):
327 */ 327 */
328 328
@@ -555,8 +555,9 @@
555#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ 555#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
556 ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc) 556 ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
557 557
558#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\ 558#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv) \
559 (~(1<<IWL_CMD_QUEUE_NUM))) 559 (((1<<(priv)->hw_params.max_txq_num) - 1) &\
560 (~(1<<(priv)->cmd_queue)))
560 561
561#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00) 562#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00)
562 563
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 79773e353baa..10be197b0f22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -228,7 +228,7 @@ void iwl_recover_from_statistics(struct iwl_priv *priv,
228{ 228{
229 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 229 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
230 return; 230 return;
231 if (iwl_is_associated(priv)) { 231 if (iwl_is_any_associated(priv)) {
232 if (priv->cfg->ops->lib->check_ack_health) { 232 if (priv->cfg->ops->lib->check_ack_health) {
233 if (!priv->cfg->ops->lib->check_ack_health( 233 if (!priv->cfg->ops->lib->check_ack_health(
234 priv, pkt)) { 234 priv, pkt)) {
@@ -266,7 +266,12 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
266{ 266{
267 u16 fc = le16_to_cpu(hdr->frame_control); 267 u16 fc = le16_to_cpu(hdr->frame_control);
268 268
269 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) 269 /*
270 * All contexts have the same setting here due to it being
271 * a module parameter, so OK to check any context.
272 */
273 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
274 RXON_FILTER_DIS_DECRYPT_MSK)
270 return 0; 275 return 0;
271 276
272 if (!(fc & IEEE80211_FCTL_PROTECTED)) 277 if (!(fc & IEEE80211_FCTL_PROTECTED))
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index a4b3663a262f..c54c20023e7c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -54,100 +54,134 @@
54#define IWL_PASSIVE_DWELL_BASE (100) 54#define IWL_PASSIVE_DWELL_BASE (100)
55#define IWL_CHANNEL_TUNE_TIME 5 55#define IWL_CHANNEL_TUNE_TIME 5
56 56
57static int iwl_send_scan_abort(struct iwl_priv *priv)
58{
59 int ret;
60 struct iwl_rx_packet *pkt;
61 struct iwl_host_cmd cmd = {
62 .id = REPLY_SCAN_ABORT_CMD,
63 .flags = CMD_WANT_SKB,
64 };
57 65
66 /* Exit instantly with error when device is not ready
67 * to receive scan abort command or it does not perform
68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->status) ||
73 test_bit(STATUS_EXIT_PENDING, &priv->status))
74 return -EIO;
58 75
59/** 76 ret = iwl_send_cmd_sync(priv, &cmd);
60 * iwl_scan_cancel - Cancel any currently executing HW scan 77 if (ret)
61 * 78 return ret;
62 * NOTE: priv->mutex is not required before calling this function 79
63 */ 80 pkt = (struct iwl_rx_packet *)cmd.reply_page;
64int iwl_scan_cancel(struct iwl_priv *priv) 81 if (pkt->u.status != CAN_ABORT_STATUS) {
82 /* The scan abort will return 1 for success or
83 * 2 for "failure". A failure condition can be
84 * due to simply not being in an active scan which
85 * can occur if we send the scan abort before we
86 * the microcode has notified us that a scan is
87 * completed. */
88 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
89 ret = -EIO;
90 }
91
92 iwl_free_pages(priv, cmd.reply_page);
93 return ret;
94}
95
96static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
65{ 97{
66 if (!test_bit(STATUS_SCAN_HW, &priv->status)) { 98 /* check if scan was requested from mac80211 */
67 clear_bit(STATUS_SCANNING, &priv->status); 99 if (priv->scan_request) {
68 return 0; 100 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
101 ieee80211_scan_completed(priv->hw, aborted);
69 } 102 }
70 103
71 if (test_bit(STATUS_SCANNING, &priv->status)) { 104 priv->is_internal_short_scan = false;
72 if (!test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) { 105 priv->scan_vif = NULL;
73 IWL_DEBUG_SCAN(priv, "Queuing scan abort.\n"); 106 priv->scan_request = NULL;
74 queue_work(priv->workqueue, &priv->abort_scan); 107}
75 108
76 } else 109void iwl_force_scan_end(struct iwl_priv *priv)
77 IWL_DEBUG_SCAN(priv, "Scan abort already in progress.\n"); 110{
111 lockdep_assert_held(&priv->mutex);
78 112
79 return test_bit(STATUS_SCANNING, &priv->status); 113 if (!test_bit(STATUS_SCANNING, &priv->status)) {
114 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
115 return;
80 } 116 }
81 117
118 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
119 clear_bit(STATUS_SCANNING, &priv->status);
120 clear_bit(STATUS_SCAN_HW, &priv->status);
121 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
122 iwl_complete_scan(priv, true);
123}
124EXPORT_SYMBOL(iwl_force_scan_end);
125
126static void iwl_do_scan_abort(struct iwl_priv *priv)
127{
128 int ret;
129
130 lockdep_assert_held(&priv->mutex);
131
132 if (!test_bit(STATUS_SCANNING, &priv->status)) {
133 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
134 return;
135 }
136
137 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
138 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
139 return;
140 }
141
142 ret = iwl_send_scan_abort(priv);
143 if (ret) {
144 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
145 iwl_force_scan_end(priv);
146 } else
147 IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
148}
149
150/**
151 * iwl_scan_cancel - Cancel any currently executing HW scan
152 */
153int iwl_scan_cancel(struct iwl_priv *priv)
154{
155 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
156 queue_work(priv->workqueue, &priv->abort_scan);
82 return 0; 157 return 0;
83} 158}
84EXPORT_SYMBOL(iwl_scan_cancel); 159EXPORT_SYMBOL(iwl_scan_cancel);
160
85/** 161/**
86 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan 162 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
87 * @ms: amount of time to wait (in milliseconds) for scan to abort 163 * @ms: amount of time to wait (in milliseconds) for scan to abort
88 * 164 *
89 * NOTE: priv->mutex must be held before calling this function
90 */ 165 */
91int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) 166int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
92{ 167{
93 unsigned long now = jiffies; 168 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
94 int ret;
95
96 ret = iwl_scan_cancel(priv);
97 if (ret && ms) {
98 mutex_unlock(&priv->mutex);
99 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
100 test_bit(STATUS_SCANNING, &priv->status))
101 msleep(1);
102 mutex_lock(&priv->mutex);
103
104 return test_bit(STATUS_SCANNING, &priv->status);
105 }
106 169
107 return ret; 170 lockdep_assert_held(&priv->mutex);
108}
109EXPORT_SYMBOL(iwl_scan_cancel_timeout);
110 171
111static int iwl_send_scan_abort(struct iwl_priv *priv) 172 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
112{
113 int ret = 0;
114 struct iwl_rx_packet *pkt;
115 struct iwl_host_cmd cmd = {
116 .id = REPLY_SCAN_ABORT_CMD,
117 .flags = CMD_WANT_SKB,
118 };
119 173
120 /* If there isn't a scan actively going on in the hardware 174 iwl_do_scan_abort(priv);
121 * then we are in between scan bands and not actually
122 * actively scanning, so don't send the abort command */
123 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
124 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
125 return 0;
126 }
127 175
128 ret = iwl_send_cmd_sync(priv, &cmd); 176 while (time_before_eq(jiffies, timeout)) {
129 if (ret) { 177 if (!test_bit(STATUS_SCAN_HW, &priv->status))
130 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 178 break;
131 return ret; 179 msleep(20);
132 } 180 }
133 181
134 pkt = (struct iwl_rx_packet *)cmd.reply_page; 182 return test_bit(STATUS_SCAN_HW, &priv->status);
135 if (pkt->u.status != CAN_ABORT_STATUS) {
136 /* The scan abort will return 1 for success or
137 * 2 for "failure". A failure condition can be
138 * due to simply not being in an active scan which
139 * can occur if we send the scan abort before we
140 * the microcode has notified us that a scan is
141 * completed. */
142 IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", pkt->u.status);
143 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
144 clear_bit(STATUS_SCAN_HW, &priv->status);
145 }
146
147 iwl_free_pages(priv, cmd.reply_page);
148
149 return ret;
150} 183}
184EXPORT_SYMBOL(iwl_scan_cancel_timeout);
151 185
152/* Service response to REPLY_SCAN_CMD (0x80) */ 186/* Service response to REPLY_SCAN_CMD (0x80) */
153static void iwl_rx_reply_scan(struct iwl_priv *priv, 187static void iwl_rx_reply_scan(struct iwl_priv *priv,
@@ -158,7 +192,7 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
158 struct iwl_scanreq_notification *notif = 192 struct iwl_scanreq_notification *notif =
159 (struct iwl_scanreq_notification *)pkt->u.raw; 193 (struct iwl_scanreq_notification *)pkt->u.raw;
160 194
161 IWL_DEBUG_RX(priv, "Scan request status = 0x%x\n", notif->status); 195 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
162#endif 196#endif
163} 197}
164 198
@@ -206,7 +240,6 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
206static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, 240static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
207 struct iwl_rx_mem_buffer *rxb) 241 struct iwl_rx_mem_buffer *rxb)
208{ 242{
209#ifdef CONFIG_IWLWIFI_DEBUG
210 struct iwl_rx_packet *pkt = rxb_addr(rxb); 243 struct iwl_rx_packet *pkt = rxb_addr(rxb);
211 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 244 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
212 245
@@ -214,29 +247,37 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
214 scan_notif->scanned_channels, 247 scan_notif->scanned_channels,
215 scan_notif->tsf_low, 248 scan_notif->tsf_low,
216 scan_notif->tsf_high, scan_notif->status); 249 scan_notif->tsf_high, scan_notif->status);
217#endif
218 250
219 /* The HW is no longer scanning */ 251 /* The HW is no longer scanning */
220 clear_bit(STATUS_SCAN_HW, &priv->status); 252 clear_bit(STATUS_SCAN_HW, &priv->status);
221 253
222 IWL_DEBUG_INFO(priv, "Scan on %sGHz took %dms\n", 254 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
223 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", 255 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
224 jiffies_to_msecs(elapsed_jiffies 256 jiffies_to_msecs(elapsed_jiffies
225 (priv->scan_start, jiffies))); 257 (priv->scan_start, jiffies)));
226 258
227 /*
228 * If a request to abort was given, or the scan did not succeed
229 * then we reset the scan state machine and terminate,
230 * re-queuing another scan if one has been requested
231 */
232 if (test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status))
233 IWL_DEBUG_INFO(priv, "Aborted scan completed.\n");
234
235 IWL_DEBUG_INFO(priv, "Setting scan to off\n");
236
237 clear_bit(STATUS_SCANNING, &priv->status);
238
239 queue_work(priv->workqueue, &priv->scan_completed); 259 queue_work(priv->workqueue, &priv->scan_completed);
260
261 if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
262 priv->cfg->advanced_bt_coexist &&
263 priv->bt_status != scan_notif->bt_status) {
264 if (scan_notif->bt_status) {
265 /* BT on */
266 if (!priv->bt_ch_announce)
267 priv->bt_traffic_load =
268 IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
269 /*
270 * otherwise, no traffic load information provided
271 * no changes made
272 */
273 } else {
274 /* BT off */
275 priv->bt_traffic_load =
276 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
277 }
278 priv->bt_status = scan_notif->bt_status;
279 queue_work(priv->workqueue, &priv->bt_traffic_change_work);
280 }
240} 281}
241 282
242void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) 283void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
@@ -268,18 +309,28 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
268 enum ieee80211_band band, 309 enum ieee80211_band band,
269 struct ieee80211_vif *vif) 310 struct ieee80211_vif *vif)
270{ 311{
312 struct iwl_rxon_context *ctx;
271 u16 passive = (band == IEEE80211_BAND_2GHZ) ? 313 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
272 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : 314 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
273 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; 315 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
274 316
275 if (iwl_is_associated(priv)) { 317 if (iwl_is_any_associated(priv)) {
276 /* If we're associated, we clamp the maximum passive 318 /*
277 * dwell time to be 98% of the beacon interval (minus 319 * If we're associated, we clamp the maximum passive
278 * 2 * channel tune time) */ 320 * dwell time to be 98% of the smallest beacon interval
279 passive = vif ? vif->bss_conf.beacon_int : 0; 321 * (minus 2 * channel tune time)
280 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive) 322 */
281 passive = IWL_PASSIVE_DWELL_BASE; 323 for_each_context(priv, ctx) {
282 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; 324 u16 value;
325
326 if (!iwl_is_associated_ctx(ctx))
327 continue;
328 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
329 if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
330 value = IWL_PASSIVE_DWELL_BASE;
331 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
332 passive = min(value, passive);
333 }
283 } 334 }
284 335
285 return passive; 336 return passive;
@@ -296,19 +347,53 @@ void iwl_init_scan_params(struct iwl_priv *priv)
296} 347}
297EXPORT_SYMBOL(iwl_init_scan_params); 348EXPORT_SYMBOL(iwl_init_scan_params);
298 349
299static int iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif) 350static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
351 struct ieee80211_vif *vif,
352 bool internal,
353 enum ieee80211_band band)
300{ 354{
355 int ret;
356
301 lockdep_assert_held(&priv->mutex); 357 lockdep_assert_held(&priv->mutex);
302 358
303 IWL_DEBUG_INFO(priv, "Starting scan...\n"); 359 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
360 return -EOPNOTSUPP;
361
362 cancel_delayed_work(&priv->scan_check);
363
364 if (!iwl_is_ready_rf(priv)) {
365 IWL_WARN(priv, "Request scan called when driver not ready.\n");
366 return -EIO;
367 }
368
369 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
370 IWL_DEBUG_SCAN(priv,
371 "Multiple concurrent scan requests in parallel.\n");
372 return -EBUSY;
373 }
374
375 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
376 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
377 return -EBUSY;
378 }
379
380 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
381 internal ? "internal short " : "");
382
304 set_bit(STATUS_SCANNING, &priv->status); 383 set_bit(STATUS_SCANNING, &priv->status);
305 priv->is_internal_short_scan = false; 384 priv->is_internal_short_scan = internal;
306 priv->scan_start = jiffies; 385 priv->scan_start = jiffies;
386 priv->scan_band = band;
307 387
308 if (WARN_ON(!priv->cfg->ops->utils->request_scan)) 388 ret = priv->cfg->ops->utils->request_scan(priv, vif);
309 return -EOPNOTSUPP; 389 if (ret) {
390 clear_bit(STATUS_SCANNING, &priv->status);
391 priv->is_internal_short_scan = false;
392 return ret;
393 }
310 394
311 priv->cfg->ops->utils->request_scan(priv, vif); 395 queue_delayed_work(priv->workqueue, &priv->scan_check,
396 IWL_SCAN_CHECK_WATCHDOG);
312 397
313 return 0; 398 return 0;
314} 399}
@@ -327,12 +412,6 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
327 412
328 mutex_lock(&priv->mutex); 413 mutex_lock(&priv->mutex);
329 414
330 if (!iwl_is_ready_rf(priv)) {
331 ret = -EIO;
332 IWL_DEBUG_MAC80211(priv, "leave - not ready or exit pending\n");
333 goto out_unlock;
334 }
335
336 if (test_bit(STATUS_SCANNING, &priv->status) && 415 if (test_bit(STATUS_SCANNING, &priv->status) &&
337 !priv->is_internal_short_scan) { 416 !priv->is_internal_short_scan) {
338 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 417 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
@@ -340,14 +419,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
340 goto out_unlock; 419 goto out_unlock;
341 } 420 }
342 421
343 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
344 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
345 ret = -EAGAIN;
346 goto out_unlock;
347 }
348
349 /* mac80211 will only ask for one band at a time */ 422 /* mac80211 will only ask for one band at a time */
350 priv->scan_band = req->channels[0]->band;
351 priv->scan_request = req; 423 priv->scan_request = req;
352 priv->scan_vif = vif; 424 priv->scan_vif = vif;
353 425
@@ -355,10 +427,12 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
355 * If an internal scan is in progress, just set 427 * If an internal scan is in progress, just set
356 * up the scan_request as per above. 428 * up the scan_request as per above.
357 */ 429 */
358 if (priv->is_internal_short_scan) 430 if (priv->is_internal_short_scan) {
431 IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
359 ret = 0; 432 ret = 0;
360 else 433 } else
361 ret = iwl_scan_initiate(priv, vif); 434 ret = iwl_scan_initiate(priv, vif, false,
435 req->channels[0]->band);
362 436
363 IWL_DEBUG_MAC80211(priv, "leave\n"); 437 IWL_DEBUG_MAC80211(priv, "leave\n");
364 438
@@ -378,11 +452,13 @@ void iwl_internal_short_hw_scan(struct iwl_priv *priv)
378 queue_work(priv->workqueue, &priv->start_internal_scan); 452 queue_work(priv->workqueue, &priv->start_internal_scan);
379} 453}
380 454
381void iwl_bg_start_internal_scan(struct work_struct *work) 455static void iwl_bg_start_internal_scan(struct work_struct *work)
382{ 456{
383 struct iwl_priv *priv = 457 struct iwl_priv *priv =
384 container_of(work, struct iwl_priv, start_internal_scan); 458 container_of(work, struct iwl_priv, start_internal_scan);
385 459
460 IWL_DEBUG_SCAN(priv, "Start internal scan\n");
461
386 mutex_lock(&priv->mutex); 462 mutex_lock(&priv->mutex);
387 463
388 if (priv->is_internal_short_scan == true) { 464 if (priv->is_internal_short_scan == true) {
@@ -390,56 +466,31 @@ void iwl_bg_start_internal_scan(struct work_struct *work)
390 goto unlock; 466 goto unlock;
391 } 467 }
392 468
393 if (!iwl_is_ready_rf(priv)) {
394 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
395 goto unlock;
396 }
397
398 if (test_bit(STATUS_SCANNING, &priv->status)) { 469 if (test_bit(STATUS_SCANNING, &priv->status)) {
399 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 470 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
400 goto unlock; 471 goto unlock;
401 } 472 }
402 473
403 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 474 if (iwl_scan_initiate(priv, NULL, true, priv->band))
404 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); 475 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
405 goto unlock;
406 }
407
408 priv->scan_band = priv->band;
409
410 IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
411 set_bit(STATUS_SCANNING, &priv->status);
412 priv->is_internal_short_scan = true;
413
414 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
415 goto unlock;
416
417 priv->cfg->ops->utils->request_scan(priv, NULL);
418 unlock: 476 unlock:
419 mutex_unlock(&priv->mutex); 477 mutex_unlock(&priv->mutex);
420} 478}
421EXPORT_SYMBOL(iwl_bg_start_internal_scan);
422 479
423void iwl_bg_scan_check(struct work_struct *data) 480static void iwl_bg_scan_check(struct work_struct *data)
424{ 481{
425 struct iwl_priv *priv = 482 struct iwl_priv *priv =
426 container_of(data, struct iwl_priv, scan_check.work); 483 container_of(data, struct iwl_priv, scan_check.work);
427 484
428 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 485 IWL_DEBUG_SCAN(priv, "Scan check work\n");
429 return;
430 486
487 /* Since we are here firmware does not finish scan and
488 * most likely is in bad shape, so we don't bother to
489 * send abort command, just force scan complete to mac80211 */
431 mutex_lock(&priv->mutex); 490 mutex_lock(&priv->mutex);
432 if (test_bit(STATUS_SCANNING, &priv->status) && 491 iwl_force_scan_end(priv);
433 !test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
434 IWL_DEBUG_SCAN(priv, "Scan completion watchdog (%dms)\n",
435 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
436
437 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
438 iwl_send_scan_abort(priv);
439 }
440 mutex_unlock(&priv->mutex); 492 mutex_unlock(&priv->mutex);
441} 493}
442EXPORT_SYMBOL(iwl_bg_scan_check);
443 494
444/** 495/**
445 * iwl_fill_probe_req - fill in all required fields and IE for probe request 496 * iwl_fill_probe_req - fill in all required fields and IE for probe request
@@ -489,48 +540,69 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
489} 540}
490EXPORT_SYMBOL(iwl_fill_probe_req); 541EXPORT_SYMBOL(iwl_fill_probe_req);
491 542
492void iwl_bg_abort_scan(struct work_struct *work) 543static void iwl_bg_abort_scan(struct work_struct *work)
493{ 544{
494 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); 545 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
495 546
496 if (!test_bit(STATUS_READY, &priv->status) || 547 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
497 !test_bit(STATUS_GEO_CONFIGURED, &priv->status))
498 return;
499
500 cancel_delayed_work(&priv->scan_check);
501 548
549 /* We keep scan_check work queued in case when firmware will not
550 * report back scan completed notification */
502 mutex_lock(&priv->mutex); 551 mutex_lock(&priv->mutex);
503 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) 552 iwl_scan_cancel_timeout(priv, 200);
504 iwl_send_scan_abort(priv);
505 mutex_unlock(&priv->mutex); 553 mutex_unlock(&priv->mutex);
506} 554}
507EXPORT_SYMBOL(iwl_bg_abort_scan);
508 555
509void iwl_bg_scan_completed(struct work_struct *work) 556static void iwl_bg_scan_completed(struct work_struct *work)
510{ 557{
511 struct iwl_priv *priv = 558 struct iwl_priv *priv =
512 container_of(work, struct iwl_priv, scan_completed); 559 container_of(work, struct iwl_priv, scan_completed);
513 bool internal = false; 560 bool aborted;
561 struct iwl_rxon_context *ctx;
514 562
515 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); 563 IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
564 priv->is_internal_short_scan ? "internal short " : "");
516 565
517 cancel_delayed_work(&priv->scan_check); 566 cancel_delayed_work(&priv->scan_check);
518 567
519 mutex_lock(&priv->mutex); 568 mutex_lock(&priv->mutex);
520 if (priv->is_internal_short_scan) { 569
521 priv->is_internal_short_scan = false; 570 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
522 IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); 571 if (aborted)
523 internal = true; 572 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
524 } else { 573
525 priv->scan_request = NULL; 574 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
526 priv->scan_vif = NULL; 575 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
576 goto out_settings;
527 } 577 }
528 578
529 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 579 if (priv->is_internal_short_scan && !aborted) {
580 int err;
581
582 /* Check if mac80211 requested scan during our internal scan */
583 if (priv->scan_request == NULL)
584 goto out_complete;
585
586 /* If so request a new scan */
587 err = iwl_scan_initiate(priv, priv->scan_vif, false,
588 priv->scan_request->channels[0]->band);
589 if (err) {
590 IWL_DEBUG_SCAN(priv,
591 "failed to initiate pending scan: %d\n", err);
592 aborted = true;
593 goto out_complete;
594 }
595
530 goto out; 596 goto out;
597 }
531 598
532 if (internal && priv->scan_request) 599out_complete:
533 iwl_scan_initiate(priv, priv->scan_vif); 600 iwl_complete_scan(priv, aborted);
601
602out_settings:
603 /* Can we still talk to firmware ? */
604 if (!iwl_is_ready_rf(priv))
605 goto out;
534 606
535 /* Since setting the TXPOWER may have been deferred while 607 /* Since setting the TXPOWER may have been deferred while
536 * performing the scan, fire one off */ 608 * performing the scan, fire one off */
@@ -540,22 +612,15 @@ void iwl_bg_scan_completed(struct work_struct *work)
540 * Since setting the RXON may have been deferred while 612 * Since setting the RXON may have been deferred while
541 * performing the scan, fire one off if needed 613 * performing the scan, fire one off if needed
542 */ 614 */
543 if (memcmp(&priv->active_rxon, 615 for_each_context(priv, ctx)
544 &priv->staging_rxon, sizeof(priv->staging_rxon))) 616 iwlcore_commit_rxon(priv, ctx);
545 iwlcore_commit_rxon(priv); 617
618 if (priv->cfg->ops->hcmd->set_pan_params)
619 priv->cfg->ops->hcmd->set_pan_params(priv);
546 620
547 out: 621 out:
548 mutex_unlock(&priv->mutex); 622 mutex_unlock(&priv->mutex);
549
550 /*
551 * Do not hold mutex here since this will cause mac80211 to call
552 * into driver again into functions that will attempt to take
553 * mutex.
554 */
555 if (!internal)
556 ieee80211_scan_completed(priv->hw, false);
557} 623}
558EXPORT_SYMBOL(iwl_bg_scan_completed);
559 624
560void iwl_setup_scan_deferred_work(struct iwl_priv *priv) 625void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
561{ 626{
@@ -566,3 +631,16 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
566} 631}
567EXPORT_SYMBOL(iwl_setup_scan_deferred_work); 632EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
568 633
634void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
635{
636 cancel_work_sync(&priv->start_internal_scan);
637 cancel_work_sync(&priv->abort_scan);
638 cancel_work_sync(&priv->scan_completed);
639
640 if (cancel_delayed_work_sync(&priv->scan_check)) {
641 mutex_lock(&priv->mutex);
642 iwl_force_scan_end(priv);
643 mutex_unlock(&priv->mutex);
644 }
645}
646EXPORT_SYMBOL(iwl_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 7e0829be5e78..6edd0341dfe2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -172,12 +172,14 @@ int iwl_send_add_sta(struct iwl_priv *priv,
172EXPORT_SYMBOL(iwl_send_add_sta); 172EXPORT_SYMBOL(iwl_send_add_sta);
173 173
174static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, 174static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
175 struct ieee80211_sta_ht_cap *sta_ht_inf) 175 struct ieee80211_sta *sta,
176 struct iwl_rxon_context *ctx)
176{ 177{
178 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
177 __le32 sta_flags; 179 __le32 sta_flags;
178 u8 mimo_ps_mode; 180 u8 mimo_ps_mode;
179 181
180 if (!sta_ht_inf || !sta_ht_inf->ht_supported) 182 if (!sta || !sta_ht_inf->ht_supported)
181 goto done; 183 goto done;
182 184
183 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; 185 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
@@ -211,7 +213,7 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
211 sta_flags |= cpu_to_le32( 213 sta_flags |= cpu_to_le32(
212 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); 214 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
213 215
214 if (iwl_is_ht40_tx_allowed(priv, sta_ht_inf)) 216 if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
215 sta_flags |= STA_FLG_HT40_EN_MSK; 217 sta_flags |= STA_FLG_HT40_EN_MSK;
216 else 218 else
217 sta_flags &= ~STA_FLG_HT40_EN_MSK; 219 sta_flags &= ~STA_FLG_HT40_EN_MSK;
@@ -226,9 +228,9 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
226 * 228 *
227 * should be called with sta_lock held 229 * should be called with sta_lock held
228 */ 230 */
229static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr, 231static u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
230 bool is_ap, 232 const u8 *addr, bool is_ap,
231 struct ieee80211_sta_ht_cap *ht_info) 233 struct ieee80211_sta *sta)
232{ 234{
233 struct iwl_station_entry *station; 235 struct iwl_station_entry *station;
234 int i; 236 int i;
@@ -236,9 +238,9 @@ static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
236 u16 rate; 238 u16 rate;
237 239
238 if (is_ap) 240 if (is_ap)
239 sta_id = IWL_AP_ID; 241 sta_id = ctx->ap_sta_id;
240 else if (is_broadcast_ether_addr(addr)) 242 else if (is_broadcast_ether_addr(addr))
241 sta_id = priv->hw_params.bcast_sta_id; 243 sta_id = ctx->bcast_sta_id;
242 else 244 else
243 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) { 245 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
244 if (!compare_ether_addr(priv->stations[i].sta.sta.addr, 246 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
@@ -289,14 +291,22 @@ static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
289 memcpy(station->sta.sta.addr, addr, ETH_ALEN); 291 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
290 station->sta.mode = 0; 292 station->sta.mode = 0;
291 station->sta.sta.sta_id = sta_id; 293 station->sta.sta.sta_id = sta_id;
292 station->sta.station_flags = 0; 294 station->sta.station_flags = ctx->station_flags;
295 station->ctxid = ctx->ctxid;
296
297 if (sta) {
298 struct iwl_station_priv_common *sta_priv;
299
300 sta_priv = (void *)sta->drv_priv;
301 sta_priv->ctx = ctx;
302 }
293 303
294 /* 304 /*
295 * OK to call unconditionally, since local stations (IBSS BSSID 305 * OK to call unconditionally, since local stations (IBSS BSSID
296 * STA and broadcast STA) pass in a NULL ht_info, and mac80211 306 * STA and broadcast STA) pass in a NULL sta, and mac80211
297 * doesn't allow HT IBSS. 307 * doesn't allow HT IBSS.
298 */ 308 */
299 iwl_set_ht_add_station(priv, sta_id, ht_info); 309 iwl_set_ht_add_station(priv, sta_id, sta, ctx);
300 310
301 /* 3945 only */ 311 /* 3945 only */
302 rate = (priv->band == IEEE80211_BAND_5GHZ) ? 312 rate = (priv->band == IEEE80211_BAND_5GHZ) ?
@@ -313,10 +323,9 @@ static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
313/** 323/**
314 * iwl_add_station_common - 324 * iwl_add_station_common -
315 */ 325 */
316int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr, 326int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
317 bool is_ap, 327 const u8 *addr, bool is_ap,
318 struct ieee80211_sta_ht_cap *ht_info, 328 struct ieee80211_sta *sta, u8 *sta_id_r)
319 u8 *sta_id_r)
320{ 329{
321 unsigned long flags_spin; 330 unsigned long flags_spin;
322 int ret = 0; 331 int ret = 0;
@@ -325,7 +334,7 @@ int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
325 334
326 *sta_id_r = 0; 335 *sta_id_r = 0;
327 spin_lock_irqsave(&priv->sta_lock, flags_spin); 336 spin_lock_irqsave(&priv->sta_lock, flags_spin);
328 sta_id = iwl_prep_station(priv, addr, is_ap, ht_info); 337 sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
329 if (sta_id == IWL_INVALID_STATION) { 338 if (sta_id == IWL_INVALID_STATION) {
330 IWL_ERR(priv, "Unable to prepare station %pM for addition\n", 339 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
331 addr); 340 addr);
@@ -377,7 +386,8 @@ static struct iwl_link_quality_cmd *iwl_sta_alloc_lq(struct iwl_priv *priv,
377{ 386{
378 int i, r; 387 int i, r;
379 struct iwl_link_quality_cmd *link_cmd; 388 struct iwl_link_quality_cmd *link_cmd;
380 u32 rate_flags; 389 u32 rate_flags = 0;
390 __le32 rate_n_flags;
381 391
382 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL); 392 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
383 if (!link_cmd) { 393 if (!link_cmd) {
@@ -391,18 +401,14 @@ static struct iwl_link_quality_cmd *iwl_sta_alloc_lq(struct iwl_priv *priv,
391 else 401 else
392 r = IWL_RATE_1M_INDEX; 402 r = IWL_RATE_1M_INDEX;
393 403
394 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 404 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
395 rate_flags = 0; 405 rate_flags |= RATE_MCS_CCK_MSK;
396 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
397 rate_flags |= RATE_MCS_CCK_MSK;
398 406
399 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) << 407 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
400 RATE_MCS_ANT_POS; 408 RATE_MCS_ANT_POS;
401 409 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
402 link_cmd->rs_table[i].rate_n_flags = 410 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
403 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); 411 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
404 r = iwl_get_prev_ieee_rate(r);
405 }
406 412
407 link_cmd->general_params.single_stream_ant_msk = 413 link_cmd->general_params.single_stream_ant_msk =
408 first_antenna(priv->hw_params.valid_tx_ant); 414 first_antenna(priv->hw_params.valid_tx_ant);
@@ -431,8 +437,8 @@ static struct iwl_link_quality_cmd *iwl_sta_alloc_lq(struct iwl_priv *priv,
431 * 437 *
432 * Function sleeps. 438 * Function sleeps.
433 */ 439 */
434int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs, 440int iwl_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
435 u8 *sta_id_r) 441 const u8 *addr, bool init_rs, u8 *sta_id_r)
436{ 442{
437 int ret; 443 int ret;
438 u8 sta_id; 444 u8 sta_id;
@@ -442,7 +448,7 @@ int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
442 if (sta_id_r) 448 if (sta_id_r)
443 *sta_id_r = IWL_INVALID_STATION; 449 *sta_id_r = IWL_INVALID_STATION;
444 450
445 ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id); 451 ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
446 if (ret) { 452 if (ret) {
447 IWL_ERR(priv, "Unable to add station %pM\n", addr); 453 IWL_ERR(priv, "Unable to add station %pM\n", addr);
448 return ret; 454 return ret;
@@ -464,7 +470,7 @@ int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
464 return -ENOMEM; 470 return -ENOMEM;
465 } 471 }
466 472
467 ret = iwl_send_lq_cmd(priv, link_cmd, CMD_SYNC, true); 473 ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
468 if (ret) 474 if (ret)
469 IWL_ERR(priv, "Link quality command failed (%d)\n", ret); 475 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
470 476
@@ -616,7 +622,8 @@ EXPORT_SYMBOL_GPL(iwl_remove_station);
616 * other than explicit station management would cause this in 622 * other than explicit station management would cause this in
617 * the ucode, e.g. unassociated RXON. 623 * the ucode, e.g. unassociated RXON.
618 */ 624 */
619void iwl_clear_ucode_stations(struct iwl_priv *priv) 625void iwl_clear_ucode_stations(struct iwl_priv *priv,
626 struct iwl_rxon_context *ctx)
620{ 627{
621 int i; 628 int i;
622 unsigned long flags_spin; 629 unsigned long flags_spin;
@@ -626,6 +633,9 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv)
626 633
627 spin_lock_irqsave(&priv->sta_lock, flags_spin); 634 spin_lock_irqsave(&priv->sta_lock, flags_spin);
628 for (i = 0; i < priv->hw_params.max_stations; i++) { 635 for (i = 0; i < priv->hw_params.max_stations; i++) {
636 if (ctx && ctx->ctxid != priv->stations[i].ctxid)
637 continue;
638
629 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { 639 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
630 IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i); 640 IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
631 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; 641 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
@@ -647,7 +657,7 @@ EXPORT_SYMBOL(iwl_clear_ucode_stations);
647 * 657 *
648 * Function sleeps. 658 * Function sleeps.
649 */ 659 */
650void iwl_restore_stations(struct iwl_priv *priv) 660void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
651{ 661{
652 struct iwl_addsta_cmd sta_cmd; 662 struct iwl_addsta_cmd sta_cmd;
653 struct iwl_link_quality_cmd lq; 663 struct iwl_link_quality_cmd lq;
@@ -665,6 +675,8 @@ void iwl_restore_stations(struct iwl_priv *priv)
665 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); 675 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
666 spin_lock_irqsave(&priv->sta_lock, flags_spin); 676 spin_lock_irqsave(&priv->sta_lock, flags_spin);
667 for (i = 0; i < priv->hw_params.max_stations; i++) { 677 for (i = 0; i < priv->hw_params.max_stations; i++) {
678 if (ctx->ctxid != priv->stations[i].ctxid)
679 continue;
668 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && 680 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
669 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) { 681 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
670 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n", 682 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
@@ -700,7 +712,7 @@ void iwl_restore_stations(struct iwl_priv *priv)
700 * current LQ command 712 * current LQ command
701 */ 713 */
702 if (send_lq) 714 if (send_lq)
703 iwl_send_lq_cmd(priv, &lq, CMD_SYNC, true); 715 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
704 spin_lock_irqsave(&priv->sta_lock, flags_spin); 716 spin_lock_irqsave(&priv->sta_lock, flags_spin);
705 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; 717 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
706 } 718 }
@@ -718,7 +730,7 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
718{ 730{
719 int i; 731 int i;
720 732
721 for (i = 0; i < STA_KEY_MAX_NUM; i++) 733 for (i = 0; i < priv->sta_key_max_num; i++)
722 if (!test_and_set_bit(i, &priv->ucode_key_table)) 734 if (!test_and_set_bit(i, &priv->ucode_key_table))
723 return i; 735 return i;
724 736
@@ -726,7 +738,9 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
726} 738}
727EXPORT_SYMBOL(iwl_get_free_ucode_key_index); 739EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
728 740
729static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) 741static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
742 struct iwl_rxon_context *ctx,
743 bool send_if_empty)
730{ 744{
731 int i, not_empty = 0; 745 int i, not_empty = 0;
732 u8 buff[sizeof(struct iwl_wep_cmd) + 746 u8 buff[sizeof(struct iwl_wep_cmd) +
@@ -734,7 +748,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
734 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff; 748 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
735 size_t cmd_size = sizeof(struct iwl_wep_cmd); 749 size_t cmd_size = sizeof(struct iwl_wep_cmd);
736 struct iwl_host_cmd cmd = { 750 struct iwl_host_cmd cmd = {
737 .id = REPLY_WEPKEY, 751 .id = ctx->wep_key_cmd,
738 .data = wep_cmd, 752 .data = wep_cmd,
739 .flags = CMD_SYNC, 753 .flags = CMD_SYNC,
740 }; 754 };
@@ -746,16 +760,16 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
746 760
747 for (i = 0; i < WEP_KEYS_MAX ; i++) { 761 for (i = 0; i < WEP_KEYS_MAX ; i++) {
748 wep_cmd->key[i].key_index = i; 762 wep_cmd->key[i].key_index = i;
749 if (priv->wep_keys[i].key_size) { 763 if (ctx->wep_keys[i].key_size) {
750 wep_cmd->key[i].key_offset = i; 764 wep_cmd->key[i].key_offset = i;
751 not_empty = 1; 765 not_empty = 1;
752 } else { 766 } else {
753 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; 767 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
754 } 768 }
755 769
756 wep_cmd->key[i].key_size = priv->wep_keys[i].key_size; 770 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
757 memcpy(&wep_cmd->key[i].key[3], priv->wep_keys[i].key, 771 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
758 priv->wep_keys[i].key_size); 772 ctx->wep_keys[i].key_size);
759 } 773 }
760 774
761 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; 775 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
@@ -771,15 +785,17 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
771 return 0; 785 return 0;
772} 786}
773 787
774int iwl_restore_default_wep_keys(struct iwl_priv *priv) 788int iwl_restore_default_wep_keys(struct iwl_priv *priv,
789 struct iwl_rxon_context *ctx)
775{ 790{
776 lockdep_assert_held(&priv->mutex); 791 lockdep_assert_held(&priv->mutex);
777 792
778 return iwl_send_static_wepkey_cmd(priv, 0); 793 return iwl_send_static_wepkey_cmd(priv, ctx, false);
779} 794}
780EXPORT_SYMBOL(iwl_restore_default_wep_keys); 795EXPORT_SYMBOL(iwl_restore_default_wep_keys);
781 796
782int iwl_remove_default_wep_key(struct iwl_priv *priv, 797int iwl_remove_default_wep_key(struct iwl_priv *priv,
798 struct iwl_rxon_context *ctx,
783 struct ieee80211_key_conf *keyconf) 799 struct ieee80211_key_conf *keyconf)
784{ 800{
785 int ret; 801 int ret;
@@ -789,13 +805,13 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
789 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", 805 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
790 keyconf->keyidx); 806 keyconf->keyidx);
791 807
792 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); 808 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
793 if (iwl_is_rfkill(priv)) { 809 if (iwl_is_rfkill(priv)) {
794 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); 810 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
795 /* but keys in device are clear anyway so return success */ 811 /* but keys in device are clear anyway so return success */
796 return 0; 812 return 0;
797 } 813 }
798 ret = iwl_send_static_wepkey_cmd(priv, 1); 814 ret = iwl_send_static_wepkey_cmd(priv, ctx, 1);
799 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", 815 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
800 keyconf->keyidx, ret); 816 keyconf->keyidx, ret);
801 817
@@ -804,6 +820,7 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
804EXPORT_SYMBOL(iwl_remove_default_wep_key); 820EXPORT_SYMBOL(iwl_remove_default_wep_key);
805 821
806int iwl_set_default_wep_key(struct iwl_priv *priv, 822int iwl_set_default_wep_key(struct iwl_priv *priv,
823 struct iwl_rxon_context *ctx,
807 struct ieee80211_key_conf *keyconf) 824 struct ieee80211_key_conf *keyconf)
808{ 825{
809 int ret; 826 int ret;
@@ -818,13 +835,13 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
818 835
819 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; 836 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
820 keyconf->hw_key_idx = HW_KEY_DEFAULT; 837 keyconf->hw_key_idx = HW_KEY_DEFAULT;
821 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP; 838 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
822 839
823 priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; 840 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
824 memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key, 841 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
825 keyconf->keylen); 842 keyconf->keylen);
826 843
827 ret = iwl_send_static_wepkey_cmd(priv, 0); 844 ret = iwl_send_static_wepkey_cmd(priv, ctx, false);
828 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", 845 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
829 keyconf->keylen, keyconf->keyidx, ret); 846 keyconf->keylen, keyconf->keyidx, ret);
830 847
@@ -833,8 +850,9 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
833EXPORT_SYMBOL(iwl_set_default_wep_key); 850EXPORT_SYMBOL(iwl_set_default_wep_key);
834 851
835static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv, 852static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
836 struct ieee80211_key_conf *keyconf, 853 struct iwl_rxon_context *ctx,
837 u8 sta_id) 854 struct ieee80211_key_conf *keyconf,
855 u8 sta_id)
838{ 856{
839 unsigned long flags; 857 unsigned long flags;
840 __le16 key_flags = 0; 858 __le16 key_flags = 0;
@@ -851,12 +869,12 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
851 if (keyconf->keylen == WEP_KEY_LEN_128) 869 if (keyconf->keylen == WEP_KEY_LEN_128)
852 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; 870 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
853 871
854 if (sta_id == priv->hw_params.bcast_sta_id) 872 if (sta_id == ctx->bcast_sta_id)
855 key_flags |= STA_KEY_MULTICAST_MSK; 873 key_flags |= STA_KEY_MULTICAST_MSK;
856 874
857 spin_lock_irqsave(&priv->sta_lock, flags); 875 spin_lock_irqsave(&priv->sta_lock, flags);
858 876
859 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 877 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
860 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; 878 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
861 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; 879 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
862 880
@@ -887,8 +905,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
887} 905}
888 906
889static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv, 907static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
890 struct ieee80211_key_conf *keyconf, 908 struct iwl_rxon_context *ctx,
891 u8 sta_id) 909 struct ieee80211_key_conf *keyconf,
910 u8 sta_id)
892{ 911{
893 unsigned long flags; 912 unsigned long flags;
894 __le16 key_flags = 0; 913 __le16 key_flags = 0;
@@ -900,13 +919,13 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
900 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 919 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
901 key_flags &= ~STA_KEY_FLG_INVALID; 920 key_flags &= ~STA_KEY_FLG_INVALID;
902 921
903 if (sta_id == priv->hw_params.bcast_sta_id) 922 if (sta_id == ctx->bcast_sta_id)
904 key_flags |= STA_KEY_MULTICAST_MSK; 923 key_flags |= STA_KEY_MULTICAST_MSK;
905 924
906 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 925 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
907 926
908 spin_lock_irqsave(&priv->sta_lock, flags); 927 spin_lock_irqsave(&priv->sta_lock, flags);
909 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 928 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
910 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; 929 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
911 930
912 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 931 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
@@ -936,8 +955,9 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
936} 955}
937 956
938static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, 957static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
939 struct ieee80211_key_conf *keyconf, 958 struct iwl_rxon_context *ctx,
940 u8 sta_id) 959 struct ieee80211_key_conf *keyconf,
960 u8 sta_id)
941{ 961{
942 unsigned long flags; 962 unsigned long flags;
943 int ret = 0; 963 int ret = 0;
@@ -947,7 +967,7 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
947 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 967 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
948 key_flags &= ~STA_KEY_FLG_INVALID; 968 key_flags &= ~STA_KEY_FLG_INVALID;
949 969
950 if (sta_id == priv->hw_params.bcast_sta_id) 970 if (sta_id == ctx->bcast_sta_id)
951 key_flags |= STA_KEY_MULTICAST_MSK; 971 key_flags |= STA_KEY_MULTICAST_MSK;
952 972
953 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 973 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -955,7 +975,7 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
955 975
956 spin_lock_irqsave(&priv->sta_lock, flags); 976 spin_lock_irqsave(&priv->sta_lock, flags);
957 977
958 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 978 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
959 priv->stations[sta_id].keyinfo.keylen = 16; 979 priv->stations[sta_id].keyinfo.keylen = 16;
960 980
961 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) 981 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
@@ -982,8 +1002,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
982} 1002}
983 1003
984void iwl_update_tkip_key(struct iwl_priv *priv, 1004void iwl_update_tkip_key(struct iwl_priv *priv,
985 struct ieee80211_key_conf *keyconf, 1005 struct iwl_rxon_context *ctx,
986 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) 1006 struct ieee80211_key_conf *keyconf,
1007 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
987{ 1008{
988 u8 sta_id; 1009 u8 sta_id;
989 unsigned long flags; 1010 unsigned long flags;
@@ -995,7 +1016,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
995 return; 1016 return;
996 } 1017 }
997 1018
998 sta_id = iwl_sta_id_or_broadcast(priv, sta); 1019 sta_id = iwl_sta_id_or_broadcast(priv, ctx, sta);
999 if (sta_id == IWL_INVALID_STATION) 1020 if (sta_id == IWL_INVALID_STATION)
1000 return; 1021 return;
1001 1022
@@ -1018,8 +1039,9 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
1018EXPORT_SYMBOL(iwl_update_tkip_key); 1039EXPORT_SYMBOL(iwl_update_tkip_key);
1019 1040
1020int iwl_remove_dynamic_key(struct iwl_priv *priv, 1041int iwl_remove_dynamic_key(struct iwl_priv *priv,
1021 struct ieee80211_key_conf *keyconf, 1042 struct iwl_rxon_context *ctx,
1022 u8 sta_id) 1043 struct ieee80211_key_conf *keyconf,
1044 u8 sta_id)
1023{ 1045{
1024 unsigned long flags; 1046 unsigned long flags;
1025 u16 key_flags; 1047 u16 key_flags;
@@ -1028,7 +1050,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1028 1050
1029 lockdep_assert_held(&priv->mutex); 1051 lockdep_assert_held(&priv->mutex);
1030 1052
1031 priv->key_mapping_key--; 1053 ctx->key_mapping_keys--;
1032 1054
1033 spin_lock_irqsave(&priv->sta_lock, flags); 1055 spin_lock_irqsave(&priv->sta_lock, flags);
1034 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags); 1056 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
@@ -1080,34 +1102,36 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1080} 1102}
1081EXPORT_SYMBOL(iwl_remove_dynamic_key); 1103EXPORT_SYMBOL(iwl_remove_dynamic_key);
1082 1104
1083int iwl_set_dynamic_key(struct iwl_priv *priv, 1105int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
1084 struct ieee80211_key_conf *keyconf, u8 sta_id) 1106 struct ieee80211_key_conf *keyconf, u8 sta_id)
1085{ 1107{
1086 int ret; 1108 int ret;
1087 1109
1088 lockdep_assert_held(&priv->mutex); 1110 lockdep_assert_held(&priv->mutex);
1089 1111
1090 priv->key_mapping_key++; 1112 ctx->key_mapping_keys++;
1091 keyconf->hw_key_idx = HW_KEY_DYNAMIC; 1113 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
1092 1114
1093 switch (keyconf->alg) { 1115 switch (keyconf->cipher) {
1094 case ALG_CCMP: 1116 case WLAN_CIPHER_SUITE_CCMP:
1095 ret = iwl_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); 1117 ret = iwl_set_ccmp_dynamic_key_info(priv, ctx, keyconf, sta_id);
1096 break; 1118 break;
1097 case ALG_TKIP: 1119 case WLAN_CIPHER_SUITE_TKIP:
1098 ret = iwl_set_tkip_dynamic_key_info(priv, keyconf, sta_id); 1120 ret = iwl_set_tkip_dynamic_key_info(priv, ctx, keyconf, sta_id);
1099 break; 1121 break;
1100 case ALG_WEP: 1122 case WLAN_CIPHER_SUITE_WEP40:
1101 ret = iwl_set_wep_dynamic_key_info(priv, keyconf, sta_id); 1123 case WLAN_CIPHER_SUITE_WEP104:
1124 ret = iwl_set_wep_dynamic_key_info(priv, ctx, keyconf, sta_id);
1102 break; 1125 break;
1103 default: 1126 default:
1104 IWL_ERR(priv, 1127 IWL_ERR(priv,
1105 "Unknown alg: %s alg = %d\n", __func__, keyconf->alg); 1128 "Unknown alg: %s cipher = %x\n", __func__,
1129 keyconf->cipher);
1106 ret = -EINVAL; 1130 ret = -EINVAL;
1107 } 1131 }
1108 1132
1109 IWL_DEBUG_WEP(priv, "Set dynamic key: alg= %d len=%d idx=%d sta=%d ret=%d\n", 1133 IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
1110 keyconf->alg, keyconf->keylen, keyconf->keyidx, 1134 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1111 sta_id, ret); 1135 sta_id, ret);
1112 1136
1113 return ret; 1137 return ret;
@@ -1147,16 +1171,16 @@ static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
1147 * RXON flags are updated and when LQ command is updated. 1171 * RXON flags are updated and when LQ command is updated.
1148 */ 1172 */
1149static bool is_lq_table_valid(struct iwl_priv *priv, 1173static bool is_lq_table_valid(struct iwl_priv *priv,
1174 struct iwl_rxon_context *ctx,
1150 struct iwl_link_quality_cmd *lq) 1175 struct iwl_link_quality_cmd *lq)
1151{ 1176{
1152 int i; 1177 int i;
1153 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1154 1178
1155 if (ht_conf->is_ht) 1179 if (ctx->ht.enabled)
1156 return true; 1180 return true;
1157 1181
1158 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n", 1182 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
1159 priv->active_rxon.channel); 1183 ctx->active.channel);
1160 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 1184 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
1161 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { 1185 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
1162 IWL_DEBUG_INFO(priv, 1186 IWL_DEBUG_INFO(priv,
@@ -1178,7 +1202,7 @@ static bool is_lq_table_valid(struct iwl_priv *priv,
1178 * this case to clear the state indicating that station creation is in 1202 * this case to clear the state indicating that station creation is in
1179 * progress. 1203 * progress.
1180 */ 1204 */
1181int iwl_send_lq_cmd(struct iwl_priv *priv, 1205int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
1182 struct iwl_link_quality_cmd *lq, u8 flags, bool init) 1206 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
1183{ 1207{
1184 int ret = 0; 1208 int ret = 0;
@@ -1197,7 +1221,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
1197 iwl_dump_lq_cmd(priv, lq); 1221 iwl_dump_lq_cmd(priv, lq);
1198 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 1222 BUG_ON(init && (cmd.flags & CMD_ASYNC));
1199 1223
1200 if (is_lq_table_valid(priv, lq)) 1224 if (is_lq_table_valid(priv, ctx, lq))
1201 ret = iwl_send_cmd(priv, &cmd); 1225 ret = iwl_send_cmd(priv, &cmd);
1202 else 1226 else
1203 ret = -EINVAL; 1227 ret = -EINVAL;
@@ -1223,14 +1247,15 @@ EXPORT_SYMBOL(iwl_send_lq_cmd);
1223 * and marks it driver active, so that it will be restored to the 1247 * and marks it driver active, so that it will be restored to the
1224 * device at the next best time. 1248 * device at the next best time.
1225 */ 1249 */
1226int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq) 1250int iwl_alloc_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
1251 bool init_lq)
1227{ 1252{
1228 struct iwl_link_quality_cmd *link_cmd; 1253 struct iwl_link_quality_cmd *link_cmd;
1229 unsigned long flags; 1254 unsigned long flags;
1230 u8 sta_id; 1255 u8 sta_id;
1231 1256
1232 spin_lock_irqsave(&priv->sta_lock, flags); 1257 spin_lock_irqsave(&priv->sta_lock, flags);
1233 sta_id = iwl_prep_station(priv, iwl_bcast_addr, false, NULL); 1258 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
1234 if (sta_id == IWL_INVALID_STATION) { 1259 if (sta_id == IWL_INVALID_STATION) {
1235 IWL_ERR(priv, "Unable to prepare broadcast station\n"); 1260 IWL_ERR(priv, "Unable to prepare broadcast station\n");
1236 spin_unlock_irqrestore(&priv->sta_lock, flags); 1261 spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -1265,11 +1290,12 @@ EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station);
1265 * Only used by iwlagn. Placed here to have all bcast station management 1290 * Only used by iwlagn. Placed here to have all bcast station management
1266 * code together. 1291 * code together.
1267 */ 1292 */
1268int iwl_update_bcast_station(struct iwl_priv *priv) 1293static int iwl_update_bcast_station(struct iwl_priv *priv,
1294 struct iwl_rxon_context *ctx)
1269{ 1295{
1270 unsigned long flags; 1296 unsigned long flags;
1271 struct iwl_link_quality_cmd *link_cmd; 1297 struct iwl_link_quality_cmd *link_cmd;
1272 u8 sta_id = priv->hw_params.bcast_sta_id; 1298 u8 sta_id = ctx->bcast_sta_id;
1273 1299
1274 link_cmd = iwl_sta_alloc_lq(priv, sta_id); 1300 link_cmd = iwl_sta_alloc_lq(priv, sta_id);
1275 if (!link_cmd) { 1301 if (!link_cmd) {
@@ -1287,9 +1313,23 @@ int iwl_update_bcast_station(struct iwl_priv *priv)
1287 1313
1288 return 0; 1314 return 0;
1289} 1315}
1290EXPORT_SYMBOL_GPL(iwl_update_bcast_station);
1291 1316
1292void iwl_dealloc_bcast_station(struct iwl_priv *priv) 1317int iwl_update_bcast_stations(struct iwl_priv *priv)
1318{
1319 struct iwl_rxon_context *ctx;
1320 int ret = 0;
1321
1322 for_each_context(priv, ctx) {
1323 ret = iwl_update_bcast_station(priv, ctx);
1324 if (ret)
1325 break;
1326 }
1327
1328 return ret;
1329}
1330EXPORT_SYMBOL_GPL(iwl_update_bcast_stations);
1331
1332void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
1293{ 1333{
1294 unsigned long flags; 1334 unsigned long flags;
1295 int i; 1335 int i;
@@ -1307,7 +1347,7 @@ void iwl_dealloc_bcast_station(struct iwl_priv *priv)
1307 } 1347 }
1308 spin_unlock_irqrestore(&priv->sta_lock, flags); 1348 spin_unlock_irqrestore(&priv->sta_lock, flags);
1309} 1349}
1310EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_station); 1350EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
1311 1351
1312/** 1352/**
1313 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table 1353 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index d38a350ba0bd..56bad3f60d81 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -44,32 +44,37 @@
44 44
45 45
46int iwl_remove_default_wep_key(struct iwl_priv *priv, 46int iwl_remove_default_wep_key(struct iwl_priv *priv,
47 struct iwl_rxon_context *ctx,
47 struct ieee80211_key_conf *key); 48 struct ieee80211_key_conf *key);
48int iwl_set_default_wep_key(struct iwl_priv *priv, 49int iwl_set_default_wep_key(struct iwl_priv *priv,
50 struct iwl_rxon_context *ctx,
49 struct ieee80211_key_conf *key); 51 struct ieee80211_key_conf *key);
50int iwl_restore_default_wep_keys(struct iwl_priv *priv); 52int iwl_restore_default_wep_keys(struct iwl_priv *priv,
51int iwl_set_dynamic_key(struct iwl_priv *priv, 53 struct iwl_rxon_context *ctx);
54int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
52 struct ieee80211_key_conf *key, u8 sta_id); 55 struct ieee80211_key_conf *key, u8 sta_id);
53int iwl_remove_dynamic_key(struct iwl_priv *priv, 56int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
54 struct ieee80211_key_conf *key, u8 sta_id); 57 struct ieee80211_key_conf *key, u8 sta_id);
55void iwl_update_tkip_key(struct iwl_priv *priv, 58void iwl_update_tkip_key(struct iwl_priv *priv,
56 struct ieee80211_key_conf *keyconf, 59 struct iwl_rxon_context *ctx,
57 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); 60 struct ieee80211_key_conf *keyconf,
58 61 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
59void iwl_restore_stations(struct iwl_priv *priv); 62
60void iwl_clear_ucode_stations(struct iwl_priv *priv); 63void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
61int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq); 64void iwl_clear_ucode_stations(struct iwl_priv *priv,
62void iwl_dealloc_bcast_station(struct iwl_priv *priv); 65 struct iwl_rxon_context *ctx);
63int iwl_update_bcast_station(struct iwl_priv *priv); 66int iwl_alloc_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
67 bool init_lq);
68void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
69int iwl_update_bcast_stations(struct iwl_priv *priv);
64int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 70int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
65int iwl_send_add_sta(struct iwl_priv *priv, 71int iwl_send_add_sta(struct iwl_priv *priv,
66 struct iwl_addsta_cmd *sta, u8 flags); 72 struct iwl_addsta_cmd *sta, u8 flags);
67int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs, 73int iwl_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
68 u8 *sta_id_r); 74 const u8 *addr, bool init_rs, u8 *sta_id_r);
69int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr, 75int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
70 bool is_ap, 76 const u8 *addr, bool is_ap,
71 struct ieee80211_sta_ht_cap *ht_info, 77 struct ieee80211_sta *sta, u8 *sta_id_r);
72 u8 *sta_id_r);
73int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id, 78int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
74 const u8 *addr); 79 const u8 *addr);
75int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 80int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -94,20 +99,25 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
94static inline void iwl_clear_driver_stations(struct iwl_priv *priv) 99static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
95{ 100{
96 unsigned long flags; 101 unsigned long flags;
102 struct iwl_rxon_context *ctx;
97 103
98 spin_lock_irqsave(&priv->sta_lock, flags); 104 spin_lock_irqsave(&priv->sta_lock, flags);
99 memset(priv->stations, 0, sizeof(priv->stations)); 105 memset(priv->stations, 0, sizeof(priv->stations));
100 priv->num_stations = 0; 106 priv->num_stations = 0;
101 107
102 /*
103 * Remove all key information that is not stored as part of station
104 * information since mac80211 may not have had a
105 * chance to remove all the keys. When device is reconfigured by
106 * mac80211 after an error all keys will be reconfigured.
107 */
108 priv->ucode_key_table = 0; 108 priv->ucode_key_table = 0;
109 priv->key_mapping_key = 0; 109
110 memset(priv->wep_keys, 0, sizeof(priv->wep_keys)); 110 for_each_context(priv, ctx) {
111 /*
112 * Remove all key information that is not stored as part
113 * of station information since mac80211 may not have had
114 * a chance to remove all the keys. When device is
115 * reconfigured by mac80211 after an error all keys will
116 * be reconfigured.
117 */
118 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
119 ctx->key_mapping_keys = 0;
120 }
111 121
112 spin_unlock_irqrestore(&priv->sta_lock, flags); 122 spin_unlock_irqrestore(&priv->sta_lock, flags);
113} 123}
@@ -123,6 +133,7 @@ static inline int iwl_sta_id(struct ieee80211_sta *sta)
123/** 133/**
124 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta 134 * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
125 * @priv: iwl priv 135 * @priv: iwl priv
136 * @context: the current context
126 * @sta: mac80211 station 137 * @sta: mac80211 station
127 * 138 *
128 * In certain circumstances mac80211 passes a station pointer 139 * In certain circumstances mac80211 passes a station pointer
@@ -131,12 +142,13 @@ static inline int iwl_sta_id(struct ieee80211_sta *sta)
131 * inline wraps that pattern. 142 * inline wraps that pattern.
132 */ 143 */
133static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv, 144static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
145 struct iwl_rxon_context *context,
134 struct ieee80211_sta *sta) 146 struct ieee80211_sta *sta)
135{ 147{
136 int sta_id; 148 int sta_id;
137 149
138 if (!sta) 150 if (!sta)
139 return priv->hw_params.bcast_sta_id; 151 return context->bcast_sta_id;
140 152
141 sta_id = iwl_sta_id(sta); 153 sta_id = iwl_sta_id(sta);
142 154
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index a81989c06983..3290b1552f5a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -134,7 +134,7 @@ EXPORT_SYMBOL(iwl_tx_queue_free);
134 */ 134 */
135void iwl_cmd_queue_free(struct iwl_priv *priv) 135void iwl_cmd_queue_free(struct iwl_priv *priv)
136{ 136{
137 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 137 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
138 struct iwl_queue *q = &txq->q; 138 struct iwl_queue *q = &txq->q;
139 struct device *dev = &priv->pci_dev->dev; 139 struct device *dev = &priv->pci_dev->dev;
140 int i; 140 int i;
@@ -271,7 +271,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
271 271
272 /* Driver private data, only for Tx (not command) queues, 272 /* Driver private data, only for Tx (not command) queues,
273 * not shared with device. */ 273 * not shared with device. */
274 if (id != IWL_CMD_QUEUE_NUM) { 274 if (id != priv->cmd_queue) {
275 txq->txb = kzalloc(sizeof(txq->txb[0]) * 275 txq->txb = kzalloc(sizeof(txq->txb[0]) *
276 TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 276 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
277 if (!txq->txb) { 277 if (!txq->txb) {
@@ -314,13 +314,13 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
314 314
315 /* 315 /*
316 * Alloc buffer array for commands (Tx or other types of commands). 316 * Alloc buffer array for commands (Tx or other types of commands).
317 * For the command queue (#4), allocate command space + one big 317 * For the command queue (#4/#9), allocate command space + one big
318 * command for scan, since scan command is very huge; the system will 318 * command for scan, since scan command is very huge; the system will
319 * not have two scans at the same time, so only one is needed. 319 * not have two scans at the same time, so only one is needed.
320 * For normal Tx queues (all other queues), no super-size command 320 * For normal Tx queues (all other queues), no super-size command
321 * space is needed. 321 * space is needed.
322 */ 322 */
323 if (txq_id == IWL_CMD_QUEUE_NUM) 323 if (txq_id == priv->cmd_queue)
324 actual_slots++; 324 actual_slots++;
325 325
326 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, 326 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
@@ -355,7 +355,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
355 * need an swq_id so don't set one to catch errors, all others can 355 * need an swq_id so don't set one to catch errors, all others can
356 * be set up to the identity mapping. 356 * be set up to the identity mapping.
357 */ 357 */
358 if (txq_id != IWL_CMD_QUEUE_NUM) 358 if (txq_id != priv->cmd_queue)
359 txq->swq_id = txq_id; 359 txq->swq_id = txq_id;
360 360
361 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 361 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
@@ -385,7 +385,7 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
385{ 385{
386 int actual_slots = slots_num; 386 int actual_slots = slots_num;
387 387
388 if (txq_id == IWL_CMD_QUEUE_NUM) 388 if (txq_id == priv->cmd_queue)
389 actual_slots++; 389 actual_slots++;
390 390
391 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); 391 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
@@ -413,7 +413,7 @@ EXPORT_SYMBOL(iwl_tx_queue_reset);
413 */ 413 */
414int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 414int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
415{ 415{
416 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 416 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
417 struct iwl_queue *q = &txq->q; 417 struct iwl_queue *q = &txq->q;
418 struct iwl_device_cmd *out_cmd; 418 struct iwl_device_cmd *out_cmd;
419 struct iwl_cmd_meta *out_meta; 419 struct iwl_cmd_meta *out_meta;
@@ -422,6 +422,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
422 int len; 422 int len;
423 u32 idx; 423 u32 idx;
424 u16 fix_size; 424 u16 fix_size;
425 bool is_ct_kill = false;
425 426
426 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); 427 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
427 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); 428 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
@@ -443,9 +444,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
443 444
444 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 445 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
445 IWL_ERR(priv, "No space in command queue\n"); 446 IWL_ERR(priv, "No space in command queue\n");
446 if (iwl_within_ct_kill_margin(priv)) 447 if (priv->cfg->ops->lib->tt_ops.ct_kill_check) {
447 iwl_tt_enter_ct_kill(priv); 448 is_ct_kill =
448 else { 449 priv->cfg->ops->lib->tt_ops.ct_kill_check(priv);
450 }
451 if (!is_ct_kill) {
449 IWL_ERR(priv, "Restarting adapter due to queue full\n"); 452 IWL_ERR(priv, "Restarting adapter due to queue full\n");
450 queue_work(priv->workqueue, &priv->restart); 453 queue_work(priv->workqueue, &priv->restart);
451 } 454 }
@@ -480,7 +483,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
480 * information */ 483 * information */
481 484
482 out_cmd->hdr.flags = 0; 485 out_cmd->hdr.flags = 0;
483 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | 486 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
484 INDEX_TO_SEQ(q->write_ptr)); 487 INDEX_TO_SEQ(q->write_ptr));
485 if (cmd->flags & CMD_SIZE_HUGE) 488 if (cmd->flags & CMD_SIZE_HUGE)
486 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 489 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
@@ -497,15 +500,15 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
497 get_cmd_string(out_cmd->hdr.cmd), 500 get_cmd_string(out_cmd->hdr.cmd),
498 out_cmd->hdr.cmd, 501 out_cmd->hdr.cmd,
499 le16_to_cpu(out_cmd->hdr.sequence), fix_size, 502 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
500 q->write_ptr, idx, IWL_CMD_QUEUE_NUM); 503 q->write_ptr, idx, priv->cmd_queue);
501 break; 504 break;
502 default: 505 default:
503 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " 506 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
504 "%d bytes at %d[%d]:%d\n", 507 "%d bytes at %d[%d]:%d\n",
505 get_cmd_string(out_cmd->hdr.cmd), 508 get_cmd_string(out_cmd->hdr.cmd),
506 out_cmd->hdr.cmd, 509 out_cmd->hdr.cmd,
507 le16_to_cpu(out_cmd->hdr.sequence), fix_size, 510 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
508 q->write_ptr, idx, IWL_CMD_QUEUE_NUM); 511 q->write_ptr, idx, priv->cmd_queue);
509 } 512 }
510#endif 513#endif
511 txq->need_update = 1; 514 txq->need_update = 1;
@@ -584,16 +587,16 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
584 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 587 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
585 struct iwl_device_cmd *cmd; 588 struct iwl_device_cmd *cmd;
586 struct iwl_cmd_meta *meta; 589 struct iwl_cmd_meta *meta;
587 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 590 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
588 591
589 /* If a Tx command is being handled and it isn't in the actual 592 /* If a Tx command is being handled and it isn't in the actual
590 * command queue then there a command routing bug has been introduced 593 * command queue then there a command routing bug has been introduced
591 * in the queue management code. */ 594 * in the queue management code. */
592 if (WARN(txq_id != IWL_CMD_QUEUE_NUM, 595 if (WARN(txq_id != priv->cmd_queue,
593 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n", 596 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
594 txq_id, sequence, 597 txq_id, priv->cmd_queue, sequence,
595 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr, 598 priv->txq[priv->cmd_queue].q.read_ptr,
596 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) { 599 priv->txq[priv->cmd_queue].q.write_ptr)) {
597 iwl_print_hex_error(priv, pkt, 32); 600 iwl_print_hex_error(priv, pkt, 32);
598 return; 601 return;
599 } 602 }
@@ -663,8 +666,8 @@ const char *iwl_get_tx_fail_reason(u32 status)
663 TX_STATUS_FAIL(TID_DISABLE); 666 TX_STATUS_FAIL(TID_DISABLE);
664 TX_STATUS_FAIL(FIFO_FLUSHED); 667 TX_STATUS_FAIL(FIFO_FLUSHED);
665 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); 668 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
666 TX_STATUS_FAIL(FW_DROP); 669 TX_STATUS_FAIL(PASSIVE_NO_RX);
667 TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP); 670 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
668 } 671 }
669 672
670 return "UNKNOWN"; 673 return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 59a308b02f95..116777122a79 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -33,6 +33,7 @@
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/pci.h> 35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
36#include <linux/slab.h> 37#include <linux/slab.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38#include <linux/delay.h> 39#include <linux/delay.h>
@@ -143,7 +144,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
143 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); 144 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
144 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 145 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
145 146
146 if (sta_id == priv->hw_params.bcast_sta_id) 147 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
147 key_flags |= STA_KEY_MULTICAST_MSK; 148 key_flags |= STA_KEY_MULTICAST_MSK;
148 149
149 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 150 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -151,7 +152,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
151 key_flags &= ~STA_KEY_FLG_INVALID; 152 key_flags &= ~STA_KEY_FLG_INVALID;
152 153
153 spin_lock_irqsave(&priv->sta_lock, flags); 154 spin_lock_irqsave(&priv->sta_lock, flags);
154 priv->stations[sta_id].keyinfo.alg = keyconf->alg; 155 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
155 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; 156 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
156 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 157 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
157 keyconf->keylen); 158 keyconf->keylen);
@@ -222,23 +223,25 @@ static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
222 223
223 keyconf->hw_key_idx = HW_KEY_DYNAMIC; 224 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
224 225
225 switch (keyconf->alg) { 226 switch (keyconf->cipher) {
226 case ALG_CCMP: 227 case WLAN_CIPHER_SUITE_CCMP:
227 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); 228 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
228 break; 229 break;
229 case ALG_TKIP: 230 case WLAN_CIPHER_SUITE_TKIP:
230 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id); 231 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
231 break; 232 break;
232 case ALG_WEP: 233 case WLAN_CIPHER_SUITE_WEP40:
234 case WLAN_CIPHER_SUITE_WEP104:
233 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); 235 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
234 break; 236 break;
235 default: 237 default:
236 IWL_ERR(priv, "Unknown alg: %s alg = %d\n", __func__, keyconf->alg); 238 IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
239 keyconf->cipher);
237 ret = -EINVAL; 240 ret = -EINVAL;
238 } 241 }
239 242
240 IWL_DEBUG_WEP(priv, "Set dynamic key: alg= %d len=%d idx=%d sta=%d ret=%d\n", 243 IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
241 keyconf->alg, keyconf->keylen, keyconf->keyidx, 244 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
242 sta_id, ret); 245 sta_id, ret);
243 246
244 return ret; 247 return ret;
@@ -254,10 +257,11 @@ static int iwl3945_remove_static_key(struct iwl_priv *priv)
254static int iwl3945_set_static_key(struct iwl_priv *priv, 257static int iwl3945_set_static_key(struct iwl_priv *priv,
255 struct ieee80211_key_conf *key) 258 struct ieee80211_key_conf *key)
256{ 259{
257 if (key->alg == ALG_WEP) 260 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
261 key->cipher == WLAN_CIPHER_SUITE_WEP104)
258 return -EOPNOTSUPP; 262 return -EOPNOTSUPP;
259 263
260 IWL_ERR(priv, "Static key invalid: alg %d\n", key->alg); 264 IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
261 return -EINVAL; 265 return -EINVAL;
262} 266}
263 267
@@ -313,7 +317,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
313 int left) 317 int left)
314{ 318{
315 319
316 if (!iwl_is_associated(priv) || !priv->ibss_beacon) 320 if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->ibss_beacon)
317 return 0; 321 return 0;
318 322
319 if (priv->ibss_beacon->len > left) 323 if (priv->ibss_beacon->len > left)
@@ -339,7 +343,8 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
339 return -ENOMEM; 343 return -ENOMEM;
340 } 344 }
341 345
342 rate = iwl_rate_get_lowest_plcp(priv); 346 rate = iwl_rate_get_lowest_plcp(priv,
347 &priv->contexts[IWL_RXON_CTX_BSS]);
343 348
344 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); 349 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
345 350
@@ -369,23 +374,25 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
369 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 374 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
370 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; 375 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
371 376
372 switch (keyinfo->alg) { 377 tx_cmd->sec_ctl = 0;
373 case ALG_CCMP: 378
379 switch (keyinfo->cipher) {
380 case WLAN_CIPHER_SUITE_CCMP:
374 tx_cmd->sec_ctl = TX_CMD_SEC_CCM; 381 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
375 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); 382 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
376 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); 383 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
377 break; 384 break;
378 385
379 case ALG_TKIP: 386 case WLAN_CIPHER_SUITE_TKIP:
380 break; 387 break;
381 388
382 case ALG_WEP: 389 case WLAN_CIPHER_SUITE_WEP104:
383 tx_cmd->sec_ctl = TX_CMD_SEC_WEP | 390 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
391 /* fall through */
392 case WLAN_CIPHER_SUITE_WEP40:
393 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
384 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 394 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
385 395
386 if (keyinfo->keylen == 13)
387 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
388
389 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); 396 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
390 397
391 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " 398 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
@@ -393,7 +400,7 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
393 break; 400 break;
394 401
395 default: 402 default:
396 IWL_ERR(priv, "Unknown encode alg %d\n", keyinfo->alg); 403 IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
397 break; 404 break;
398 } 405 }
399} 406}
@@ -506,7 +513,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
506 hdr_len = ieee80211_hdrlen(fc); 513 hdr_len = ieee80211_hdrlen(fc);
507 514
508 /* Find index into station table for destination station */ 515 /* Find index into station table for destination station */
509 sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta); 516 sta_id = iwl_sta_id_or_broadcast(
517 priv, &priv->contexts[IWL_RXON_CTX_BSS],
518 info->control.sta);
510 if (sta_id == IWL_INVALID_STATION) { 519 if (sta_id == IWL_INVALID_STATION) {
511 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 520 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
512 hdr->addr1); 521 hdr->addr1);
@@ -536,6 +545,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
536 /* Set up driver data for this TFD */ 545 /* Set up driver data for this TFD */
537 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 546 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
538 txq->txb[q->write_ptr].skb = skb; 547 txq->txb[q->write_ptr].skb = skb;
548 txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
539 549
540 /* Init first empty entry in queue's array of Tx/cmd buffers */ 550 /* Init first empty entry in queue's array of Tx/cmd buffers */
541 out_cmd = txq->cmd[idx]; 551 out_cmd = txq->cmd[idx];
@@ -677,11 +687,12 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
677 int rc; 687 int rc;
678 int spectrum_resp_status; 688 int spectrum_resp_status;
679 int duration = le16_to_cpu(params->duration); 689 int duration = le16_to_cpu(params->duration);
690 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
680 691
681 if (iwl_is_associated(priv)) 692 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
682 add_time = iwl_usecs_to_beacons(priv, 693 add_time = iwl_usecs_to_beacons(priv,
683 le64_to_cpu(params->start_time) - priv->_3945.last_tsf, 694 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
684 le16_to_cpu(priv->rxon_timing.beacon_interval)); 695 le16_to_cpu(ctx->timing.beacon_interval));
685 696
686 memset(&spectrum, 0, sizeof(spectrum)); 697 memset(&spectrum, 0, sizeof(spectrum));
687 698
@@ -692,18 +703,18 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
692 cmd.len = sizeof(spectrum); 703 cmd.len = sizeof(spectrum);
693 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); 704 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
694 705
695 if (iwl_is_associated(priv)) 706 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
696 spectrum.start_time = 707 spectrum.start_time =
697 iwl_add_beacon_time(priv, 708 iwl_add_beacon_time(priv,
698 priv->_3945.last_beacon_time, add_time, 709 priv->_3945.last_beacon_time, add_time,
699 le16_to_cpu(priv->rxon_timing.beacon_interval)); 710 le16_to_cpu(ctx->timing.beacon_interval));
700 else 711 else
701 spectrum.start_time = 0; 712 spectrum.start_time = 0;
702 713
703 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); 714 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
704 spectrum.channels[0].channel = params->channel; 715 spectrum.channels[0].channel = params->channel;
705 spectrum.channels[0].type = type; 716 spectrum.channels[0].type = type;
706 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK) 717 if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
707 spectrum.flags |= RXON_FLG_BAND_24G_MSK | 718 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
708 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; 719 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
709 720
@@ -792,7 +803,8 @@ static void iwl3945_bg_beacon_update(struct work_struct *work)
792 struct sk_buff *beacon; 803 struct sk_buff *beacon;
793 804
794 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 805 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
795 beacon = ieee80211_beacon_get(priv->hw, priv->vif); 806 beacon = ieee80211_beacon_get(priv->hw,
807 priv->contexts[IWL_RXON_CTX_BSS].vif);
796 808
797 if (!beacon) { 809 if (!beacon) {
798 IWL_ERR(priv, "update beacon failed\n"); 810 IWL_ERR(priv, "update beacon failed\n");
@@ -813,9 +825,9 @@ static void iwl3945_bg_beacon_update(struct work_struct *work)
813static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, 825static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
814 struct iwl_rx_mem_buffer *rxb) 826 struct iwl_rx_mem_buffer *rxb)
815{ 827{
816#ifdef CONFIG_IWLWIFI_DEBUG
817 struct iwl_rx_packet *pkt = rxb_addr(rxb); 828 struct iwl_rx_packet *pkt = rxb_addr(rxb);
818 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 829 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
830#ifdef CONFIG_IWLWIFI_DEBUG
819 u8 rate = beacon->beacon_notify_hdr.rate; 831 u8 rate = beacon->beacon_notify_hdr.rate;
820 832
821 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " 833 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
@@ -827,6 +839,8 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
827 le32_to_cpu(beacon->low_tsf), rate); 839 le32_to_cpu(beacon->low_tsf), rate);
828#endif 840#endif
829 841
842 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
843
830 if ((priv->iw_mode == NL80211_IFTYPE_AP) && 844 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
831 (!test_bit(STATUS_EXIT_PENDING, &priv->status))) 845 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
832 queue_work(priv->workqueue, &priv->beacon_update); 846 queue_work(priv->workqueue, &priv->beacon_update);
@@ -1716,7 +1730,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1716 IWL_ERR(priv, "Microcode SW error detected. " 1730 IWL_ERR(priv, "Microcode SW error detected. "
1717 "Restarting 0x%X.\n", inta); 1731 "Restarting 0x%X.\n", inta);
1718 priv->isr_stats.sw++; 1732 priv->isr_stats.sw++;
1719 priv->isr_stats.sw_err = inta;
1720 iwl_irq_handle_error(priv); 1733 iwl_irq_handle_error(priv);
1721 handled |= CSR_INT_BIT_SW_ERR; 1734 handled |= CSR_INT_BIT_SW_ERR;
1722 } 1735 }
@@ -2460,6 +2473,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2460{ 2473{
2461 int thermal_spin = 0; 2474 int thermal_spin = 0;
2462 u32 rfkill; 2475 u32 rfkill;
2476 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2463 2477
2464 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 2478 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2465 2479
@@ -2517,22 +2531,22 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2517 2531
2518 iwl_power_update_mode(priv, true); 2532 iwl_power_update_mode(priv, true);
2519 2533
2520 if (iwl_is_associated(priv)) { 2534 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
2521 struct iwl3945_rxon_cmd *active_rxon = 2535 struct iwl3945_rxon_cmd *active_rxon =
2522 (struct iwl3945_rxon_cmd *)(&priv->active_rxon); 2536 (struct iwl3945_rxon_cmd *)(&ctx->active);
2523 2537
2524 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2538 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2525 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2539 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2526 } else { 2540 } else {
2527 /* Initialize our rx_config data */ 2541 /* Initialize our rx_config data */
2528 iwl_connection_init_rx_config(priv, NULL); 2542 iwl_connection_init_rx_config(priv, ctx);
2529 } 2543 }
2530 2544
2531 /* Configure Bluetooth device coexistence support */ 2545 /* Configure Bluetooth device coexistence support */
2532 priv->cfg->ops->hcmd->send_bt_config(priv); 2546 priv->cfg->ops->hcmd->send_bt_config(priv);
2533 2547
2534 /* Configure the adapter for unassociated operation */ 2548 /* Configure the adapter for unassociated operation */
2535 iwlcore_commit_rxon(priv); 2549 iwlcore_commit_rxon(priv, ctx);
2536 2550
2537 iwl3945_reg_txpower_periodic(priv); 2551 iwl3945_reg_txpower_periodic(priv);
2538 2552
@@ -2553,19 +2567,22 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
2553static void __iwl3945_down(struct iwl_priv *priv) 2567static void __iwl3945_down(struct iwl_priv *priv)
2554{ 2568{
2555 unsigned long flags; 2569 unsigned long flags;
2556 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 2570 int exit_pending;
2557 struct ieee80211_conf *conf = NULL;
2558 2571
2559 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2572 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2560 2573
2561 conf = ieee80211_get_hw_conf(priv->hw); 2574 iwl_scan_cancel_timeout(priv, 200);
2562 2575
2563 if (!exit_pending) 2576 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2564 set_bit(STATUS_EXIT_PENDING, &priv->status); 2577
2578 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2579 * to prevent rearm timer */
2580 if (priv->cfg->ops->lib->recover_from_tx_stall)
2581 del_timer_sync(&priv->monitor_recover);
2565 2582
2566 /* Station information will now be cleared in device */ 2583 /* Station information will now be cleared in device */
2567 iwl_clear_ucode_stations(priv); 2584 iwl_clear_ucode_stations(priv, NULL);
2568 iwl_dealloc_bcast_station(priv); 2585 iwl_dealloc_bcast_stations(priv);
2569 iwl_clear_driver_stations(priv); 2586 iwl_clear_driver_stations(priv);
2570 2587
2571 /* Unblock any waiting calls */ 2588 /* Unblock any waiting calls */
@@ -2647,7 +2664,8 @@ static int __iwl3945_up(struct iwl_priv *priv)
2647{ 2664{
2648 int rc, i; 2665 int rc, i;
2649 2666
2650 rc = iwl_alloc_bcast_station(priv, false); 2667 rc = iwl_alloc_bcast_station(priv, &priv->contexts[IWL_RXON_CTX_BSS],
2668 false);
2651 if (rc) 2669 if (rc)
2652 return rc; 2670 return rc;
2653 2671
@@ -2799,7 +2817,7 @@ static void iwl3945_rfkill_poll(struct work_struct *data)
2799 2817
2800} 2818}
2801 2819
2802void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) 2820int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2803{ 2821{
2804 struct iwl_host_cmd cmd = { 2822 struct iwl_host_cmd cmd = {
2805 .id = REPLY_SCAN_CMD, 2823 .id = REPLY_SCAN_CMD,
@@ -2807,61 +2825,19 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2807 .flags = CMD_SIZE_HUGE, 2825 .flags = CMD_SIZE_HUGE,
2808 }; 2826 };
2809 struct iwl3945_scan_cmd *scan; 2827 struct iwl3945_scan_cmd *scan;
2810 struct ieee80211_conf *conf = NULL;
2811 u8 n_probes = 0; 2828 u8 n_probes = 0;
2812 enum ieee80211_band band; 2829 enum ieee80211_band band;
2813 bool is_active = false; 2830 bool is_active = false;
2831 int ret;
2814 2832
2815 conf = ieee80211_get_hw_conf(priv->hw); 2833 lockdep_assert_held(&priv->mutex);
2816
2817 cancel_delayed_work(&priv->scan_check);
2818
2819 if (!iwl_is_ready(priv)) {
2820 IWL_WARN(priv, "request scan called when driver not ready.\n");
2821 goto done;
2822 }
2823
2824 /* Make sure the scan wasn't canceled before this queued work
2825 * was given the chance to run... */
2826 if (!test_bit(STATUS_SCANNING, &priv->status))
2827 goto done;
2828
2829 /* This should never be called or scheduled if there is currently
2830 * a scan active in the hardware. */
2831 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
2832 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests "
2833 "Ignoring second request.\n");
2834 goto done;
2835 }
2836
2837 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2838 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
2839 goto done;
2840 }
2841
2842 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2843 IWL_DEBUG_HC(priv,
2844 "Scan request while abort pending. Queuing.\n");
2845 goto done;
2846 }
2847
2848 if (iwl_is_rfkill(priv)) {
2849 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
2850 goto done;
2851 }
2852
2853 if (!test_bit(STATUS_READY, &priv->status)) {
2854 IWL_DEBUG_HC(priv,
2855 "Scan request while uninitialized. Queuing.\n");
2856 goto done;
2857 }
2858 2834
2859 if (!priv->scan_cmd) { 2835 if (!priv->scan_cmd) {
2860 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) + 2836 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2861 IWL_MAX_SCAN_SIZE, GFP_KERNEL); 2837 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2862 if (!priv->scan_cmd) { 2838 if (!priv->scan_cmd) {
2863 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); 2839 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2864 goto done; 2840 return -ENOMEM;
2865 } 2841 }
2866 } 2842 }
2867 scan = priv->scan_cmd; 2843 scan = priv->scan_cmd;
@@ -2870,7 +2846,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2870 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 2846 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2871 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 2847 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2872 2848
2873 if (iwl_is_associated(priv)) { 2849 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
2874 u16 interval = 0; 2850 u16 interval = 0;
2875 u32 extra; 2851 u32 extra;
2876 u32 suspend_time = 100; 2852 u32 suspend_time = 100;
@@ -2931,7 +2907,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2931 /* We don't build a direct scan probe request; the uCode will do 2907 /* We don't build a direct scan probe request; the uCode will do
2932 * that based on the direct_mask added to each channel entry */ 2908 * that based on the direct_mask added to each channel entry */
2933 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 2909 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2934 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id; 2910 scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2935 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2911 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2936 2912
2937 /* flags + rate selection */ 2913 /* flags + rate selection */
@@ -2956,7 +2932,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2956 break; 2932 break;
2957 default: 2933 default:
2958 IWL_WARN(priv, "Invalid scan band\n"); 2934 IWL_WARN(priv, "Invalid scan band\n");
2959 goto done; 2935 return -EIO;
2960 } 2936 }
2961 2937
2962 if (!priv->is_internal_short_scan) { 2938 if (!priv->is_internal_short_scan) {
@@ -2991,7 +2967,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2991 2967
2992 if (scan->channel_count == 0) { 2968 if (scan->channel_count == 0) {
2993 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); 2969 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
2994 goto done; 2970 return -EIO;
2995 } 2971 }
2996 2972
2997 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 2973 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
@@ -3000,25 +2976,10 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
3000 scan->len = cpu_to_le16(cmd.len); 2976 scan->len = cpu_to_le16(cmd.len);
3001 2977
3002 set_bit(STATUS_SCAN_HW, &priv->status); 2978 set_bit(STATUS_SCAN_HW, &priv->status);
3003 if (iwl_send_cmd_sync(priv, &cmd)) 2979 ret = iwl_send_cmd_sync(priv, &cmd);
3004 goto done; 2980 if (ret)
3005 2981 clear_bit(STATUS_SCAN_HW, &priv->status);
3006 queue_delayed_work(priv->workqueue, &priv->scan_check, 2982 return ret;
3007 IWL_SCAN_CHECK_WATCHDOG);
3008
3009 return;
3010
3011 done:
3012 /* can not perform scan make sure we clear scanning
3013 * bits from status so next scan request can be performed.
3014 * if we dont clear scanning status bit here all next scan
3015 * will fail
3016 */
3017 clear_bit(STATUS_SCAN_HW, &priv->status);
3018 clear_bit(STATUS_SCANNING, &priv->status);
3019
3020 /* inform mac80211 scan aborted */
3021 queue_work(priv->workqueue, &priv->scan_completed);
3022} 2983}
3023 2984
3024static void iwl3945_bg_restart(struct work_struct *data) 2985static void iwl3945_bg_restart(struct work_struct *data)
@@ -3029,8 +2990,10 @@ static void iwl3945_bg_restart(struct work_struct *data)
3029 return; 2990 return;
3030 2991
3031 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { 2992 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2993 struct iwl_rxon_context *ctx;
3032 mutex_lock(&priv->mutex); 2994 mutex_lock(&priv->mutex);
3033 priv->vif = NULL; 2995 for_each_context(priv, ctx)
2996 ctx->vif = NULL;
3034 priv->is_open = 0; 2997 priv->is_open = 0;
3035 mutex_unlock(&priv->mutex); 2998 mutex_unlock(&priv->mutex);
3036 iwl3945_down(priv); 2999 iwl3945_down(priv);
@@ -3064,6 +3027,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3064{ 3027{
3065 int rc = 0; 3028 int rc = 0;
3066 struct ieee80211_conf *conf = NULL; 3029 struct ieee80211_conf *conf = NULL;
3030 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3067 3031
3068 if (!vif || !priv->is_open) 3032 if (!vif || !priv->is_open)
3069 return; 3033 return;
@@ -3074,7 +3038,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3074 } 3038 }
3075 3039
3076 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3040 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3077 vif->bss_conf.aid, priv->active_rxon.bssid_addr); 3041 vif->bss_conf.aid, ctx->active.bssid_addr);
3078 3042
3079 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3043 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3080 return; 3044 return;
@@ -3083,37 +3047,34 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3083 3047
3084 conf = ieee80211_get_hw_conf(priv->hw); 3048 conf = ieee80211_get_hw_conf(priv->hw);
3085 3049
3086 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3050 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3087 iwlcore_commit_rxon(priv); 3051 iwlcore_commit_rxon(priv, ctx);
3088 3052
3089 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); 3053 rc = iwl_send_rxon_timing(priv, ctx);
3090 iwl_setup_rxon_timing(priv, vif);
3091 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3092 sizeof(priv->rxon_timing), &priv->rxon_timing);
3093 if (rc) 3054 if (rc)
3094 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3055 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3095 "Attempting to continue.\n"); 3056 "Attempting to continue.\n");
3096 3057
3097 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3058 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3098 3059
3099 priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid); 3060 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
3100 3061
3101 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 3062 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3102 vif->bss_conf.aid, vif->bss_conf.beacon_int); 3063 vif->bss_conf.aid, vif->bss_conf.beacon_int);
3103 3064
3104 if (vif->bss_conf.use_short_preamble) 3065 if (vif->bss_conf.use_short_preamble)
3105 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3066 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3106 else 3067 else
3107 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 3068 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3108 3069
3109 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3070 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3110 if (vif->bss_conf.use_short_slot) 3071 if (vif->bss_conf.use_short_slot)
3111 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 3072 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3112 else 3073 else
3113 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3074 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3114 } 3075 }
3115 3076
3116 iwlcore_commit_rxon(priv); 3077 iwlcore_commit_rxon(priv, ctx);
3117 3078
3118 switch (vif->type) { 3079 switch (vif->type) {
3119 case NL80211_IFTYPE_STATION: 3080 case NL80211_IFTYPE_STATION:
@@ -3212,15 +3173,6 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
3212 3173
3213 priv->is_open = 0; 3174 priv->is_open = 0;
3214 3175
3215 if (iwl_is_ready_rf(priv)) {
3216 /* stop mac, cancel any scan request and clear
3217 * RXON_FILTER_ASSOC_MSK BIT
3218 */
3219 mutex_lock(&priv->mutex);
3220 iwl_scan_cancel_timeout(priv, 100);
3221 mutex_unlock(&priv->mutex);
3222 }
3223
3224 iwl3945_down(priv); 3176 iwl3945_down(priv);
3225 3177
3226 flush_workqueue(priv->workqueue); 3178 flush_workqueue(priv->workqueue);
@@ -3250,48 +3202,45 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3250 3202
3251void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) 3203void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3252{ 3204{
3205 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3253 int rc = 0; 3206 int rc = 0;
3254 3207
3255 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3208 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3256 return; 3209 return;
3257 3210
3258 /* The following should be done only at AP bring up */ 3211 /* The following should be done only at AP bring up */
3259 if (!(iwl_is_associated(priv))) { 3212 if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) {
3260 3213
3261 /* RXON - unassoc (to set timing command) */ 3214 /* RXON - unassoc (to set timing command) */
3262 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3215 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3263 iwlcore_commit_rxon(priv); 3216 iwlcore_commit_rxon(priv, ctx);
3264 3217
3265 /* RXON Timing */ 3218 /* RXON Timing */
3266 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); 3219 rc = iwl_send_rxon_timing(priv, ctx);
3267 iwl_setup_rxon_timing(priv, vif);
3268 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3269 sizeof(priv->rxon_timing),
3270 &priv->rxon_timing);
3271 if (rc) 3220 if (rc)
3272 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3221 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3273 "Attempting to continue.\n"); 3222 "Attempting to continue.\n");
3274 3223
3275 priv->staging_rxon.assoc_id = 0; 3224 ctx->staging.assoc_id = 0;
3276 3225
3277 if (vif->bss_conf.use_short_preamble) 3226 if (vif->bss_conf.use_short_preamble)
3278 priv->staging_rxon.flags |= 3227 ctx->staging.flags |=
3279 RXON_FLG_SHORT_PREAMBLE_MSK; 3228 RXON_FLG_SHORT_PREAMBLE_MSK;
3280 else 3229 else
3281 priv->staging_rxon.flags &= 3230 ctx->staging.flags &=
3282 ~RXON_FLG_SHORT_PREAMBLE_MSK; 3231 ~RXON_FLG_SHORT_PREAMBLE_MSK;
3283 3232
3284 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3233 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3285 if (vif->bss_conf.use_short_slot) 3234 if (vif->bss_conf.use_short_slot)
3286 priv->staging_rxon.flags |= 3235 ctx->staging.flags |=
3287 RXON_FLG_SHORT_SLOT_MSK; 3236 RXON_FLG_SHORT_SLOT_MSK;
3288 else 3237 else
3289 priv->staging_rxon.flags &= 3238 ctx->staging.flags &=
3290 ~RXON_FLG_SHORT_SLOT_MSK; 3239 ~RXON_FLG_SHORT_SLOT_MSK;
3291 } 3240 }
3292 /* restore RXON assoc */ 3241 /* restore RXON assoc */
3293 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3242 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3294 iwlcore_commit_rxon(priv); 3243 iwlcore_commit_rxon(priv, ctx);
3295 } 3244 }
3296 iwl3945_send_beacon_cmd(priv); 3245 iwl3945_send_beacon_cmd(priv);
3297 3246
@@ -3317,10 +3266,11 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3317 return -EOPNOTSUPP; 3266 return -EOPNOTSUPP;
3318 } 3267 }
3319 3268
3320 static_key = !iwl_is_associated(priv); 3269 static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS);
3321 3270
3322 if (!static_key) { 3271 if (!static_key) {
3323 sta_id = iwl_sta_id_or_broadcast(priv, sta); 3272 sta_id = iwl_sta_id_or_broadcast(
3273 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
3324 if (sta_id == IWL_INVALID_STATION) 3274 if (sta_id == IWL_INVALID_STATION)
3325 return -EINVAL; 3275 return -EINVAL;
3326 } 3276 }
@@ -3371,8 +3321,8 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3371 sta_priv->common.sta_id = IWL_INVALID_STATION; 3321 sta_priv->common.sta_id = IWL_INVALID_STATION;
3372 3322
3373 3323
3374 ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap, 3324 ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS],
3375 &sta_id); 3325 sta->addr, is_ap, sta, &sta_id);
3376 if (ret) { 3326 if (ret) {
3377 IWL_ERR(priv, "Unable to add station %pM (%d)\n", 3327 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3378 sta->addr, ret); 3328 sta->addr, ret);
@@ -3399,6 +3349,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3399{ 3349{
3400 struct iwl_priv *priv = hw->priv; 3350 struct iwl_priv *priv = hw->priv;
3401 __le32 filter_or = 0, filter_nand = 0; 3351 __le32 filter_or = 0, filter_nand = 0;
3352 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3402 3353
3403#define CHK(test, flag) do { \ 3354#define CHK(test, flag) do { \
3404 if (*total_flags & (test)) \ 3355 if (*total_flags & (test)) \
@@ -3418,8 +3369,8 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3418 3369
3419 mutex_lock(&priv->mutex); 3370 mutex_lock(&priv->mutex);
3420 3371
3421 priv->staging_rxon.filter_flags &= ~filter_nand; 3372 ctx->staging.filter_flags &= ~filter_nand;
3422 priv->staging_rxon.filter_flags |= filter_or; 3373 ctx->staging.filter_flags |= filter_or;
3423 3374
3424 /* 3375 /*
3425 * Committing directly here breaks for some reason, 3376 * Committing directly here breaks for some reason,
@@ -3533,8 +3484,9 @@ static ssize_t show_flags(struct device *d,
3533 struct device_attribute *attr, char *buf) 3484 struct device_attribute *attr, char *buf)
3534{ 3485{
3535 struct iwl_priv *priv = dev_get_drvdata(d); 3486 struct iwl_priv *priv = dev_get_drvdata(d);
3487 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3536 3488
3537 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); 3489 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3538} 3490}
3539 3491
3540static ssize_t store_flags(struct device *d, 3492static ssize_t store_flags(struct device *d,
@@ -3543,17 +3495,18 @@ static ssize_t store_flags(struct device *d,
3543{ 3495{
3544 struct iwl_priv *priv = dev_get_drvdata(d); 3496 struct iwl_priv *priv = dev_get_drvdata(d);
3545 u32 flags = simple_strtoul(buf, NULL, 0); 3497 u32 flags = simple_strtoul(buf, NULL, 0);
3498 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3546 3499
3547 mutex_lock(&priv->mutex); 3500 mutex_lock(&priv->mutex);
3548 if (le32_to_cpu(priv->staging_rxon.flags) != flags) { 3501 if (le32_to_cpu(ctx->staging.flags) != flags) {
3549 /* Cancel any currently running scans... */ 3502 /* Cancel any currently running scans... */
3550 if (iwl_scan_cancel_timeout(priv, 100)) 3503 if (iwl_scan_cancel_timeout(priv, 100))
3551 IWL_WARN(priv, "Could not cancel scan.\n"); 3504 IWL_WARN(priv, "Could not cancel scan.\n");
3552 else { 3505 else {
3553 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", 3506 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
3554 flags); 3507 flags);
3555 priv->staging_rxon.flags = cpu_to_le32(flags); 3508 ctx->staging.flags = cpu_to_le32(flags);
3556 iwlcore_commit_rxon(priv); 3509 iwlcore_commit_rxon(priv, ctx);
3557 } 3510 }
3558 } 3511 }
3559 mutex_unlock(&priv->mutex); 3512 mutex_unlock(&priv->mutex);
@@ -3567,9 +3520,10 @@ static ssize_t show_filter_flags(struct device *d,
3567 struct device_attribute *attr, char *buf) 3520 struct device_attribute *attr, char *buf)
3568{ 3521{
3569 struct iwl_priv *priv = dev_get_drvdata(d); 3522 struct iwl_priv *priv = dev_get_drvdata(d);
3523 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3570 3524
3571 return sprintf(buf, "0x%04X\n", 3525 return sprintf(buf, "0x%04X\n",
3572 le32_to_cpu(priv->active_rxon.filter_flags)); 3526 le32_to_cpu(ctx->active.filter_flags));
3573} 3527}
3574 3528
3575static ssize_t store_filter_flags(struct device *d, 3529static ssize_t store_filter_flags(struct device *d,
@@ -3577,19 +3531,20 @@ static ssize_t store_filter_flags(struct device *d,
3577 const char *buf, size_t count) 3531 const char *buf, size_t count)
3578{ 3532{
3579 struct iwl_priv *priv = dev_get_drvdata(d); 3533 struct iwl_priv *priv = dev_get_drvdata(d);
3534 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3580 u32 filter_flags = simple_strtoul(buf, NULL, 0); 3535 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3581 3536
3582 mutex_lock(&priv->mutex); 3537 mutex_lock(&priv->mutex);
3583 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { 3538 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3584 /* Cancel any currently running scans... */ 3539 /* Cancel any currently running scans... */
3585 if (iwl_scan_cancel_timeout(priv, 100)) 3540 if (iwl_scan_cancel_timeout(priv, 100))
3586 IWL_WARN(priv, "Could not cancel scan.\n"); 3541 IWL_WARN(priv, "Could not cancel scan.\n");
3587 else { 3542 else {
3588 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " 3543 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3589 "0x%04X\n", filter_flags); 3544 "0x%04X\n", filter_flags);
3590 priv->staging_rxon.filter_flags = 3545 ctx->staging.filter_flags =
3591 cpu_to_le32(filter_flags); 3546 cpu_to_le32(filter_flags);
3592 iwlcore_commit_rxon(priv); 3547 iwlcore_commit_rxon(priv, ctx);
3593 } 3548 }
3594 } 3549 }
3595 mutex_unlock(&priv->mutex); 3550 mutex_unlock(&priv->mutex);
@@ -3637,8 +3592,9 @@ static ssize_t store_measurement(struct device *d,
3637 const char *buf, size_t count) 3592 const char *buf, size_t count)
3638{ 3593{
3639 struct iwl_priv *priv = dev_get_drvdata(d); 3594 struct iwl_priv *priv = dev_get_drvdata(d);
3595 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3640 struct ieee80211_measurement_params params = { 3596 struct ieee80211_measurement_params params = {
3641 .channel = le16_to_cpu(priv->active_rxon.channel), 3597 .channel = le16_to_cpu(ctx->active.channel),
3642 .start_time = cpu_to_le64(priv->_3945.last_tsf), 3598 .start_time = cpu_to_le64(priv->_3945.last_tsf),
3643 .duration = cpu_to_le16(1), 3599 .duration = cpu_to_le16(1),
3644 }; 3600 };
@@ -3785,10 +3741,8 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3785 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 3741 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3786 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 3742 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3787 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); 3743 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3788 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); 3744
3789 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); 3745 iwl_setup_scan_deferred_work(priv);
3790 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
3791 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
3792 3746
3793 iwl3945_hw_setup_deferred_work(priv); 3747 iwl3945_hw_setup_deferred_work(priv);
3794 3748
@@ -3808,12 +3762,10 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3808 iwl3945_hw_cancel_deferred_work(priv); 3762 iwl3945_hw_cancel_deferred_work(priv);
3809 3763
3810 cancel_delayed_work_sync(&priv->init_alive_start); 3764 cancel_delayed_work_sync(&priv->init_alive_start);
3811 cancel_delayed_work(&priv->scan_check);
3812 cancel_delayed_work(&priv->alive_start); 3765 cancel_delayed_work(&priv->alive_start);
3813 cancel_work_sync(&priv->start_internal_scan);
3814 cancel_work_sync(&priv->beacon_update); 3766 cancel_work_sync(&priv->beacon_update);
3815 if (priv->cfg->ops->lib->recover_from_tx_stall) 3767
3816 del_timer_sync(&priv->monitor_recover); 3768 iwl_cancel_scan_deferred_work(priv);
3817} 3769}
3818 3770
3819static struct attribute *iwl3945_sysfs_entries[] = { 3771static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3853,6 +3805,7 @@ static struct ieee80211_ops iwl3945_hw_ops = {
3853 .hw_scan = iwl_mac_hw_scan, 3805 .hw_scan = iwl_mac_hw_scan,
3854 .sta_add = iwl3945_mac_sta_add, 3806 .sta_add = iwl3945_mac_sta_add,
3855 .sta_remove = iwl_mac_sta_remove, 3807 .sta_remove = iwl_mac_sta_remove,
3808 .tx_last_beacon = iwl_mac_tx_last_beacon,
3856}; 3809};
3857 3810
3858static int iwl3945_init_drv(struct iwl_priv *priv) 3811static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3933,8 +3886,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3933 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 3886 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3934 3887
3935 hw->wiphy->interface_modes = 3888 hw->wiphy->interface_modes =
3936 BIT(NL80211_IFTYPE_STATION) | 3889 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3937 BIT(NL80211_IFTYPE_ADHOC);
3938 3890
3939 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | 3891 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3940 WIPHY_FLAG_DISABLE_BEACON_HINTS; 3892 WIPHY_FLAG_DISABLE_BEACON_HINTS;
@@ -3966,7 +3918,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3966 3918
3967static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3919static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3968{ 3920{
3969 int err = 0; 3921 int err = 0, i;
3970 struct iwl_priv *priv; 3922 struct iwl_priv *priv;
3971 struct ieee80211_hw *hw; 3923 struct ieee80211_hw *hw;
3972 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 3924 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
@@ -3988,6 +3940,27 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
3988 priv = hw->priv; 3940 priv = hw->priv;
3989 SET_IEEE80211_DEV(hw, &pdev->dev); 3941 SET_IEEE80211_DEV(hw, &pdev->dev);
3990 3942
3943 priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
3944
3945 /* 3945 has only one valid context */
3946 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3947
3948 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3949 priv->contexts[i].ctxid = i;
3950
3951 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3952 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3953 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3954 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3955 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3956 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3957 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3958 BIT(NL80211_IFTYPE_STATION) |
3959 BIT(NL80211_IFTYPE_ADHOC);
3960 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3961 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3962 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3963
3991 /* 3964 /*
3992 * Disabling hardware scan means that mac80211 will perform scans 3965 * Disabling hardware scan means that mac80211 will perform scans
3993 * "the hard way", rather than using device's scan. 3966 * "the hard way", rather than using device's scan.
@@ -4009,6 +3982,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4009 /*************************** 3982 /***************************
4010 * 2. Initializing PCI bus 3983 * 2. Initializing PCI bus
4011 * *************************/ 3984 * *************************/
3985 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3986 PCIE_LINK_STATE_CLKPM);
3987
4012 if (pci_enable_device(pdev)) { 3988 if (pci_enable_device(pdev)) {
4013 err = -ENODEV; 3989 err = -ENODEV;
4014 goto out_ieee80211_free_hw; 3990 goto out_ieee80211_free_hw;
@@ -4120,7 +4096,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4120 } 4096 }
4121 4097
4122 iwl_set_rxon_channel(priv, 4098 iwl_set_rxon_channel(priv,
4123 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]); 4099 &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
4100 &priv->contexts[IWL_RXON_CTX_BSS]);
4124 iwl3945_setup_deferred_work(priv); 4101 iwl3945_setup_deferred_work(priv);
4125 iwl3945_setup_rx_handlers(priv); 4102 iwl3945_setup_rx_handlers(priv);
4126 iwl_power_initialize(priv); 4103 iwl_power_initialize(priv);
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index c02fcedea9fa..a944893ae3ca 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -1195,11 +1195,8 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
1195 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: " 1195 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
1196 "oid is 0x%x\n", hdr->oid); 1196 "oid is 0x%x\n", hdr->oid);
1197 1197
1198 if (hdr->oid <= WIFI_IF_NTFY_MAX) { 1198 set_bit(hdr->oid, &iwm->wifi_ntfy[0]);
1199 set_bit(hdr->oid, &iwm->wifi_ntfy[0]); 1199 wake_up_interruptible(&iwm->wifi_ntfy_queue);
1200 wake_up_interruptible(&iwm->wifi_ntfy_queue);
1201 } else
1202 return -EINVAL;
1203 1200
1204 switch (hdr->oid) { 1201 switch (hdr->oid) {
1205 case UMAC_WIFI_IF_CMD_SET_PROFILE: 1202 case UMAC_WIFI_IF_CMD_SET_PROFILE:
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 3e82f1627209..1bbdb14f7d76 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -10,6 +10,7 @@
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/wait.h>
13#include <linux/ieee80211.h> 14#include <linux/ieee80211.h>
14#include <net/cfg80211.h> 15#include <net/cfg80211.h>
15#include <asm/unaligned.h> 16#include <asm/unaligned.h>
@@ -480,7 +481,6 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
480 struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp; 481 struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
481 int bsssize; 482 int bsssize;
482 const u8 *pos; 483 const u8 *pos;
483 u16 nr_sets;
484 const u8 *tsfdesc; 484 const u8 *tsfdesc;
485 int tsfsize; 485 int tsfsize;
486 int i; 486 int i;
@@ -489,12 +489,11 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
489 lbs_deb_enter(LBS_DEB_CFG80211); 489 lbs_deb_enter(LBS_DEB_CFG80211);
490 490
491 bsssize = get_unaligned_le16(&scanresp->bssdescriptsize); 491 bsssize = get_unaligned_le16(&scanresp->bssdescriptsize);
492 nr_sets = le16_to_cpu(scanresp->nr_sets);
493 492
494 lbs_deb_scan("scan response: %d BSSs (%d bytes); resp size %d bytes\n", 493 lbs_deb_scan("scan response: %d BSSs (%d bytes); resp size %d bytes\n",
495 nr_sets, bsssize, le16_to_cpu(resp->size)); 494 scanresp->nr_sets, bsssize, le16_to_cpu(resp->size));
496 495
497 if (nr_sets == 0) { 496 if (scanresp->nr_sets == 0) {
498 ret = 0; 497 ret = 0;
499 goto done; 498 goto done;
500 } 499 }
@@ -526,20 +525,31 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
526 525
527 pos = scanresp->bssdesc_and_tlvbuffer; 526 pos = scanresp->bssdesc_and_tlvbuffer;
528 527
528 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_RSP", scanresp->bssdesc_and_tlvbuffer,
529 scanresp->bssdescriptsize);
530
529 tsfdesc = pos + bsssize; 531 tsfdesc = pos + bsssize;
530 tsfsize = 4 + 8 * scanresp->nr_sets; 532 tsfsize = 4 + 8 * scanresp->nr_sets;
533 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TSF", (u8 *) tsfdesc, tsfsize);
531 534
532 /* Validity check: we expect a Marvell-Local TLV */ 535 /* Validity check: we expect a Marvell-Local TLV */
533 i = get_unaligned_le16(tsfdesc); 536 i = get_unaligned_le16(tsfdesc);
534 tsfdesc += 2; 537 tsfdesc += 2;
535 if (i != TLV_TYPE_TSFTIMESTAMP) 538 if (i != TLV_TYPE_TSFTIMESTAMP) {
539 lbs_deb_scan("scan response: invalid TSF Timestamp %d\n", i);
536 goto done; 540 goto done;
541 }
542
537 /* Validity check: the TLV holds TSF values with 8 bytes each, so 543 /* Validity check: the TLV holds TSF values with 8 bytes each, so
538 * the size in the TLV must match the nr_sets value */ 544 * the size in the TLV must match the nr_sets value */
539 i = get_unaligned_le16(tsfdesc); 545 i = get_unaligned_le16(tsfdesc);
540 tsfdesc += 2; 546 tsfdesc += 2;
541 if (i / 8 != scanresp->nr_sets) 547 if (i / 8 != scanresp->nr_sets) {
548 lbs_deb_scan("scan response: invalid number of TSF timestamp "
549 "sets (expected %d got %d)\n", scanresp->nr_sets,
550 i / 8);
542 goto done; 551 goto done;
552 }
543 553
544 for (i = 0; i < scanresp->nr_sets; i++) { 554 for (i = 0; i < scanresp->nr_sets; i++) {
545 const u8 *bssid; 555 const u8 *bssid;
@@ -581,8 +591,11 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
581 id = *pos++; 591 id = *pos++;
582 elen = *pos++; 592 elen = *pos++;
583 left -= 2; 593 left -= 2;
584 if (elen > left || elen == 0) 594 if (elen > left || elen == 0) {
595 lbs_deb_scan("scan response: invalid IE fmt\n");
585 goto done; 596 goto done;
597 }
598
586 if (id == WLAN_EID_DS_PARAMS) 599 if (id == WLAN_EID_DS_PARAMS)
587 chan_no = *pos; 600 chan_no = *pos;
588 if (id == WLAN_EID_SSID) { 601 if (id == WLAN_EID_SSID) {
@@ -613,7 +626,9 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
613 capa, intvl, ie, ielen, 626 capa, intvl, ie, ielen,
614 LBS_SCAN_RSSI_TO_MBM(rssi), 627 LBS_SCAN_RSSI_TO_MBM(rssi),
615 GFP_KERNEL); 628 GFP_KERNEL);
616 } 629 } else
630 lbs_deb_scan("scan response: missing BSS channel IE\n");
631
617 tsfdesc += 8; 632 tsfdesc += 8;
618 } 633 }
619 ret = 0; 634 ret = 0;
@@ -1103,7 +1118,7 @@ static int lbs_associate(struct lbs_private *priv,
1103 lbs_deb_hex(LBS_DEB_ASSOC, "Common Rates", tmp, pos - tmp); 1118 lbs_deb_hex(LBS_DEB_ASSOC, "Common Rates", tmp, pos - tmp);
1104 1119
1105 /* add auth type TLV */ 1120 /* add auth type TLV */
1106 if (priv->fwrelease >= 0x09000000) 1121 if (MRVL_FW_MAJOR_REV(priv->fwrelease) >= 9)
1107 pos += lbs_add_auth_type_tlv(pos, sme->auth_type); 1122 pos += lbs_add_auth_type_tlv(pos, sme->auth_type);
1108 1123
1109 /* add WPA/WPA2 TLV */ 1124 /* add WPA/WPA2 TLV */
@@ -1114,6 +1129,9 @@ static int lbs_associate(struct lbs_private *priv,
1114 (u16)(pos - (u8 *) &cmd->iebuf); 1129 (u16)(pos - (u8 *) &cmd->iebuf);
1115 cmd->hdr.size = cpu_to_le16(len); 1130 cmd->hdr.size = cpu_to_le16(len);
1116 1131
1132 lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_CMD", (u8 *) cmd,
1133 le16_to_cpu(cmd->hdr.size));
1134
1117 /* store for later use */ 1135 /* store for later use */
1118 memcpy(priv->assoc_bss, bss->bssid, ETH_ALEN); 1136 memcpy(priv->assoc_bss, bss->bssid, ETH_ALEN);
1119 1137
@@ -1121,14 +1139,28 @@ static int lbs_associate(struct lbs_private *priv,
1121 if (ret) 1139 if (ret)
1122 goto done; 1140 goto done;
1123 1141
1124
1125 /* generate connect message to cfg80211 */ 1142 /* generate connect message to cfg80211 */
1126 1143
1127 resp = (void *) cmd; /* recast for easier field access */ 1144 resp = (void *) cmd; /* recast for easier field access */
1128 status = le16_to_cpu(resp->statuscode); 1145 status = le16_to_cpu(resp->statuscode);
1129 1146
1130 /* Convert statis code of old firmware */ 1147 /* Older FW versions map the IEEE 802.11 Status Code in the association
1131 if (priv->fwrelease < 0x09000000) 1148 * response to the following values returned in resp->statuscode:
1149 *
1150 * IEEE Status Code Marvell Status Code
1151 * 0 -> 0x0000 ASSOC_RESULT_SUCCESS
1152 * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1153 * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1154 * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1155 * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1156 * others -> 0x0003 ASSOC_RESULT_REFUSED
1157 *
1158 * Other response codes:
1159 * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused)
1160 * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for
1161 * association response from the AP)
1162 */
1163 if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) {
1132 switch (status) { 1164 switch (status) {
1133 case 0: 1165 case 0:
1134 break; 1166 break;
@@ -1150,11 +1182,16 @@ static int lbs_associate(struct lbs_private *priv,
1150 break; 1182 break;
1151 default: 1183 default:
1152 lbs_deb_assoc("association failure %d\n", status); 1184 lbs_deb_assoc("association failure %d\n", status);
1153 status = WLAN_STATUS_UNSPECIFIED_FAILURE; 1185 /* v5 OLPC firmware does return the AP status code if
1186 * it's not one of the values above. Let that through.
1187 */
1188 break;
1189 }
1154 } 1190 }
1155 1191
1156 lbs_deb_assoc("status %d, capability 0x%04x\n", status, 1192 lbs_deb_assoc("status %d, statuscode 0x%04x, capability 0x%04x, "
1157 le16_to_cpu(resp->capability)); 1193 "aid 0x%04x\n", status, le16_to_cpu(resp->statuscode),
1194 le16_to_cpu(resp->capability), le16_to_cpu(resp->aid));
1158 1195
1159 resp_ie_len = le16_to_cpu(resp->hdr.size) 1196 resp_ie_len = le16_to_cpu(resp->hdr.size)
1160 - sizeof(resp->hdr) 1197 - sizeof(resp->hdr)
@@ -1174,7 +1211,6 @@ static int lbs_associate(struct lbs_private *priv,
1174 netif_tx_wake_all_queues(priv->dev); 1211 netif_tx_wake_all_queues(priv->dev);
1175 } 1212 }
1176 1213
1177
1178done: 1214done:
1179 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); 1215 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
1180 return ret; 1216 return ret;
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 1d141fefd767..2ae752d10065 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -8,7 +8,14 @@
8#define _LBS_DECL_H_ 8#define _LBS_DECL_H_
9 9
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/firmware.h>
11 12
13/* Should be terminated by a NULL entry */
14struct lbs_fw_table {
15 int model;
16 const char *helper;
17 const char *fwname;
18};
12 19
13struct lbs_private; 20struct lbs_private;
14struct sk_buff; 21struct sk_buff;
@@ -53,4 +60,10 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
53u32 lbs_fw_index_to_data_rate(u8 index); 60u32 lbs_fw_index_to_data_rate(u8 index);
54u8 lbs_data_rate_to_fw_index(u32 rate); 61u8 lbs_data_rate_to_fw_index(u32 rate);
55 62
63int lbs_get_firmware(struct device *dev, const char *user_helper,
64 const char *user_mainfw, u32 card_model,
65 const struct lbs_fw_table *fw_table,
66 const struct firmware **helper,
67 const struct firmware **mainfw);
68
56#endif 69#endif
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 9c298396be50..e213a5dc049d 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -48,7 +48,6 @@
48MODULE_AUTHOR("Holger Schurig <hs4233@mail.mn-solutions.de>"); 48MODULE_AUTHOR("Holger Schurig <hs4233@mail.mn-solutions.de>");
49MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards"); 49MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards");
50MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51MODULE_FIRMWARE("libertas_cs_helper.fw");
52 51
53 52
54 53
@@ -61,9 +60,34 @@ struct if_cs_card {
61 struct lbs_private *priv; 60 struct lbs_private *priv;
62 void __iomem *iobase; 61 void __iomem *iobase;
63 bool align_regs; 62 bool align_regs;
63 u32 model;
64}; 64};
65 65
66 66
67enum {
68 MODEL_UNKNOWN = 0x00,
69 MODEL_8305 = 0x01,
70 MODEL_8381 = 0x02,
71 MODEL_8385 = 0x03
72};
73
74static const struct lbs_fw_table fw_table[] = {
75 { MODEL_8305, "libertas/cf8305.bin", NULL },
76 { MODEL_8305, "libertas_cs_helper.fw", NULL },
77 { MODEL_8381, "libertas/cf8381_helper.bin", "libertas/cf8381.bin" },
78 { MODEL_8381, "libertas_cs_helper.fw", "libertas_cs.fw" },
79 { MODEL_8385, "libertas/cf8385_helper.bin", "libertas/cf8385.bin" },
80 { MODEL_8385, "libertas_cs_helper.fw", "libertas_cs.fw" },
81 { 0, NULL, NULL }
82};
83MODULE_FIRMWARE("libertas/cf8305.bin");
84MODULE_FIRMWARE("libertas/cf8381_helper.bin");
85MODULE_FIRMWARE("libertas/cf8381.bin");
86MODULE_FIRMWARE("libertas/cf8385_helper.bin");
87MODULE_FIRMWARE("libertas/cf8385.bin");
88MODULE_FIRMWARE("libertas_cs_helper.fw");
89MODULE_FIRMWARE("libertas_cs.fw");
90
67 91
68/********************************************************************/ 92/********************************************************************/
69/* Hardware access */ 93/* Hardware access */
@@ -289,22 +313,19 @@ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 r
289#define CF8385_MANFID 0x02df 313#define CF8385_MANFID 0x02df
290#define CF8385_CARDID 0x8103 314#define CF8385_CARDID 0x8103
291 315
292static inline int if_cs_hw_is_cf8305(struct pcmcia_device *p_dev) 316/* FIXME: just use the 'driver_info' field of 'struct pcmcia_device_id' when
293{ 317 * that gets fixed. Currently there's no way to access it from the probe hook.
294 return (p_dev->manf_id == CF8305_MANFID && 318 */
295 p_dev->card_id == CF8305_CARDID); 319static inline u32 get_model(u16 manf_id, u16 card_id)
296}
297
298static inline int if_cs_hw_is_cf8381(struct pcmcia_device *p_dev)
299{
300 return (p_dev->manf_id == CF8381_MANFID &&
301 p_dev->card_id == CF8381_CARDID);
302}
303
304static inline int if_cs_hw_is_cf8385(struct pcmcia_device *p_dev)
305{ 320{
306 return (p_dev->manf_id == CF8385_MANFID && 321 /* NOTE: keep in sync with if_cs_ids */
307 p_dev->card_id == CF8385_CARDID); 322 if (manf_id == CF8305_MANFID && card_id == CF8305_CARDID)
323 return MODEL_8305;
324 else if (manf_id == CF8381_MANFID && card_id == CF8381_CARDID)
325 return MODEL_8381;
326 else if (manf_id == CF8385_MANFID && card_id == CF8385_CARDID)
327 return MODEL_8385;
328 return MODEL_UNKNOWN;
308} 329}
309 330
310/********************************************************************/ 331/********************************************************************/
@@ -558,12 +579,11 @@ static irqreturn_t if_cs_interrupt(int irq, void *data)
558 * 579 *
559 * Return 0 on success 580 * Return 0 on success
560 */ 581 */
561static int if_cs_prog_helper(struct if_cs_card *card) 582static int if_cs_prog_helper(struct if_cs_card *card, const struct firmware *fw)
562{ 583{
563 int ret = 0; 584 int ret = 0;
564 int sent = 0; 585 int sent = 0;
565 u8 scratch; 586 u8 scratch;
566 const struct firmware *fw;
567 587
568 lbs_deb_enter(LBS_DEB_CS); 588 lbs_deb_enter(LBS_DEB_CS);
569 589
@@ -589,14 +609,6 @@ static int if_cs_prog_helper(struct if_cs_card *card)
589 goto done; 609 goto done;
590 } 610 }
591 611
592 /* TODO: make firmware file configurable */
593 ret = request_firmware(&fw, "libertas_cs_helper.fw",
594 &card->p_dev->dev);
595 if (ret) {
596 lbs_pr_err("can't load helper firmware\n");
597 ret = -ENODEV;
598 goto done;
599 }
600 lbs_deb_cs("helper size %td\n", fw->size); 612 lbs_deb_cs("helper size %td\n", fw->size);
601 613
602 /* "Set the 5 bytes of the helper image to 0" */ 614 /* "Set the 5 bytes of the helper image to 0" */
@@ -635,7 +647,7 @@ static int if_cs_prog_helper(struct if_cs_card *card)
635 if (ret < 0) { 647 if (ret < 0) {
636 lbs_pr_err("can't download helper at 0x%x, ret %d\n", 648 lbs_pr_err("can't download helper at 0x%x, ret %d\n",
637 sent, ret); 649 sent, ret);
638 goto err_release; 650 goto done;
639 } 651 }
640 652
641 if (count == 0) 653 if (count == 0)
@@ -644,17 +656,14 @@ static int if_cs_prog_helper(struct if_cs_card *card)
644 sent += count; 656 sent += count;
645 } 657 }
646 658
647err_release:
648 release_firmware(fw);
649done: 659done:
650 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 660 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
651 return ret; 661 return ret;
652} 662}
653 663
654 664
655static int if_cs_prog_real(struct if_cs_card *card) 665static int if_cs_prog_real(struct if_cs_card *card, const struct firmware *fw)
656{ 666{
657 const struct firmware *fw;
658 int ret = 0; 667 int ret = 0;
659 int retry = 0; 668 int retry = 0;
660 int len = 0; 669 int len = 0;
@@ -662,21 +671,13 @@ static int if_cs_prog_real(struct if_cs_card *card)
662 671
663 lbs_deb_enter(LBS_DEB_CS); 672 lbs_deb_enter(LBS_DEB_CS);
664 673
665 /* TODO: make firmware file configurable */
666 ret = request_firmware(&fw, "libertas_cs.fw",
667 &card->p_dev->dev);
668 if (ret) {
669 lbs_pr_err("can't load firmware\n");
670 ret = -ENODEV;
671 goto done;
672 }
673 lbs_deb_cs("fw size %td\n", fw->size); 674 lbs_deb_cs("fw size %td\n", fw->size);
674 675
675 ret = if_cs_poll_while_fw_download(card, IF_CS_SQ_READ_LOW, 676 ret = if_cs_poll_while_fw_download(card, IF_CS_SQ_READ_LOW,
676 IF_CS_SQ_HELPER_OK); 677 IF_CS_SQ_HELPER_OK);
677 if (ret < 0) { 678 if (ret < 0) {
678 lbs_pr_err("helper firmware doesn't answer\n"); 679 lbs_pr_err("helper firmware doesn't answer\n");
679 goto err_release; 680 goto done;
680 } 681 }
681 682
682 for (sent = 0; sent < fw->size; sent += len) { 683 for (sent = 0; sent < fw->size; sent += len) {
@@ -691,7 +692,7 @@ static int if_cs_prog_real(struct if_cs_card *card)
691 if (retry > 20) { 692 if (retry > 20) {
692 lbs_pr_err("could not download firmware\n"); 693 lbs_pr_err("could not download firmware\n");
693 ret = -ENODEV; 694 ret = -ENODEV;
694 goto err_release; 695 goto done;
695 } 696 }
696 if (retry) { 697 if (retry) {
697 sent -= len; 698 sent -= len;
@@ -710,7 +711,7 @@ static int if_cs_prog_real(struct if_cs_card *card)
710 IF_CS_BIT_COMMAND); 711 IF_CS_BIT_COMMAND);
711 if (ret < 0) { 712 if (ret < 0) {
712 lbs_pr_err("can't download firmware at 0x%x\n", sent); 713 lbs_pr_err("can't download firmware at 0x%x\n", sent);
713 goto err_release; 714 goto done;
714 } 715 }
715 } 716 }
716 717
@@ -718,9 +719,6 @@ static int if_cs_prog_real(struct if_cs_card *card)
718 if (ret < 0) 719 if (ret < 0)
719 lbs_pr_err("firmware download failed\n"); 720 lbs_pr_err("firmware download failed\n");
720 721
721err_release:
722 release_firmware(fw);
723
724done: 722done:
725 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 723 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
726 return ret; 724 return ret;
@@ -824,6 +822,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
824 unsigned int prod_id; 822 unsigned int prod_id;
825 struct lbs_private *priv; 823 struct lbs_private *priv;
826 struct if_cs_card *card; 824 struct if_cs_card *card;
825 const struct firmware *helper = NULL;
826 const struct firmware *mainfw = NULL;
827 827
828 lbs_deb_enter(LBS_DEB_CS); 828 lbs_deb_enter(LBS_DEB_CS);
829 829
@@ -843,7 +843,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
843 goto out1; 843 goto out1;
844 } 844 }
845 845
846
847 /* 846 /*
848 * Allocate an interrupt line. Note that this does not assign 847 * Allocate an interrupt line. Note that this does not assign
849 * a handler to the interrupt, unless the 'Handler' member of 848 * a handler to the interrupt, unless the 'Handler' member of
@@ -881,34 +880,47 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
881 */ 880 */
882 card->align_regs = 0; 881 card->align_regs = 0;
883 882
883 card->model = get_model(p_dev->manf_id, p_dev->card_id);
884 if (card->model == MODEL_UNKNOWN) {
885 lbs_pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n",
886 p_dev->manf_id, p_dev->card_id);
887 goto out2;
888 }
889
884 /* Check if we have a current silicon */ 890 /* Check if we have a current silicon */
885 prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID); 891 prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID);
886 if (if_cs_hw_is_cf8305(p_dev)) { 892 if (card->model == MODEL_8305) {
887 card->align_regs = 1; 893 card->align_regs = 1;
888 if (prod_id < IF_CS_CF8305_B1_REV) { 894 if (prod_id < IF_CS_CF8305_B1_REV) {
889 lbs_pr_err("old chips like 8305 rev B3 " 895 lbs_pr_err("8305 rev B0 and older are not supported\n");
890 "aren't supported\n");
891 ret = -ENODEV; 896 ret = -ENODEV;
892 goto out2; 897 goto out2;
893 } 898 }
894 } 899 }
895 900
896 if (if_cs_hw_is_cf8381(p_dev) && prod_id < IF_CS_CF8381_B3_REV) { 901 if ((card->model == MODEL_8381) && prod_id < IF_CS_CF8381_B3_REV) {
897 lbs_pr_err("old chips like 8381 rev B3 aren't supported\n"); 902 lbs_pr_err("8381 rev B2 and older are not supported\n");
898 ret = -ENODEV; 903 ret = -ENODEV;
899 goto out2; 904 goto out2;
900 } 905 }
901 906
902 if (if_cs_hw_is_cf8385(p_dev) && prod_id < IF_CS_CF8385_B1_REV) { 907 if ((card->model == MODEL_8385) && prod_id < IF_CS_CF8385_B1_REV) {
903 lbs_pr_err("old chips like 8385 rev B1 aren't supported\n"); 908 lbs_pr_err("8385 rev B0 and older are not supported\n");
904 ret = -ENODEV; 909 ret = -ENODEV;
905 goto out2; 910 goto out2;
906 } 911 }
907 912
913 ret = lbs_get_firmware(&p_dev->dev, NULL, NULL, card->model,
914 &fw_table[0], &helper, &mainfw);
915 if (ret) {
916 lbs_pr_err("failed to find firmware (%d)\n", ret);
917 goto out2;
918 }
919
908 /* Load the firmware early, before calling into libertas.ko */ 920 /* Load the firmware early, before calling into libertas.ko */
909 ret = if_cs_prog_helper(card); 921 ret = if_cs_prog_helper(card, helper);
910 if (ret == 0 && !if_cs_hw_is_cf8305(p_dev)) 922 if (ret == 0 && (card->model != MODEL_8305))
911 ret = if_cs_prog_real(card); 923 ret = if_cs_prog_real(card, mainfw);
912 if (ret) 924 if (ret)
913 goto out2; 925 goto out2;
914 926
@@ -957,6 +969,11 @@ out2:
957out1: 969out1:
958 pcmcia_disable_device(p_dev); 970 pcmcia_disable_device(p_dev);
959out: 971out:
972 if (helper)
973 release_firmware(helper);
974 if (mainfw)
975 release_firmware(mainfw);
976
960 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 977 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
961 return ret; 978 return ret;
962} 979}
@@ -993,6 +1010,7 @@ static struct pcmcia_device_id if_cs_ids[] = {
993 PCMCIA_DEVICE_MANF_CARD(CF8305_MANFID, CF8305_CARDID), 1010 PCMCIA_DEVICE_MANF_CARD(CF8305_MANFID, CF8305_CARDID),
994 PCMCIA_DEVICE_MANF_CARD(CF8381_MANFID, CF8381_CARDID), 1011 PCMCIA_DEVICE_MANF_CARD(CF8381_MANFID, CF8381_CARDID),
995 PCMCIA_DEVICE_MANF_CARD(CF8385_MANFID, CF8385_CARDID), 1012 PCMCIA_DEVICE_MANF_CARD(CF8385_MANFID, CF8385_CARDID),
1013 /* NOTE: keep in sync with get_model() */
996 PCMCIA_DEVICE_NULL, 1014 PCMCIA_DEVICE_NULL,
997}; 1015};
998MODULE_DEVICE_TABLE(pcmcia, if_cs_ids); 1016MODULE_DEVICE_TABLE(pcmcia, if_cs_ids);
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 87b634978b35..296fd00a5129 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -76,36 +76,32 @@ static const struct sdio_device_id if_sdio_ids[] = {
76 76
77MODULE_DEVICE_TABLE(sdio, if_sdio_ids); 77MODULE_DEVICE_TABLE(sdio, if_sdio_ids);
78 78
79struct if_sdio_model { 79#define MODEL_8385 0x04
80 int model; 80#define MODEL_8686 0x0b
81 const char *helper; 81#define MODEL_8688 0x10
82 const char *firmware; 82
83}; 83static const struct lbs_fw_table fw_table[] = {
84 84 { MODEL_8385, "libertas/sd8385_helper.bin", "libertas/sd8385.bin" },
85static struct if_sdio_model if_sdio_models[] = { 85 { MODEL_8385, "sd8385_helper.bin", "sd8385.bin" },
86 { 86 { MODEL_8686, "libertas/sd8686_v9_helper.bin", "libertas/sd8686_v9.bin" },
87 /* 8385 */ 87 { MODEL_8686, "libertas/sd8686_v8_helper.bin", "libertas/sd8686_v8.bin" },
88 .model = IF_SDIO_MODEL_8385, 88 { MODEL_8686, "sd8686_helper.bin", "sd8686.bin" },
89 .helper = "sd8385_helper.bin", 89 { MODEL_8688, "libertas/sd8688_helper.bin", "libertas/sd8688.bin" },
90 .firmware = "sd8385.bin", 90 { MODEL_8688, "sd8688_helper.bin", "sd8688.bin" },
91 }, 91 { 0, NULL, NULL }
92 {
93 /* 8686 */
94 .model = IF_SDIO_MODEL_8686,
95 .helper = "sd8686_helper.bin",
96 .firmware = "sd8686.bin",
97 },
98 {
99 /* 8688 */
100 .model = IF_SDIO_MODEL_8688,
101 .helper = "sd8688_helper.bin",
102 .firmware = "sd8688.bin",
103 },
104}; 92};
93MODULE_FIRMWARE("libertas/sd8385_helper.bin");
94MODULE_FIRMWARE("libertas/sd8385.bin");
105MODULE_FIRMWARE("sd8385_helper.bin"); 95MODULE_FIRMWARE("sd8385_helper.bin");
106MODULE_FIRMWARE("sd8385.bin"); 96MODULE_FIRMWARE("sd8385.bin");
97MODULE_FIRMWARE("libertas/sd8686_v9_helper.bin");
98MODULE_FIRMWARE("libertas/sd8686_v9.bin");
99MODULE_FIRMWARE("libertas/sd8686_v8_helper.bin");
100MODULE_FIRMWARE("libertas/sd8686_v8.bin");
107MODULE_FIRMWARE("sd8686_helper.bin"); 101MODULE_FIRMWARE("sd8686_helper.bin");
108MODULE_FIRMWARE("sd8686.bin"); 102MODULE_FIRMWARE("sd8686.bin");
103MODULE_FIRMWARE("libertas/sd8688_helper.bin");
104MODULE_FIRMWARE("libertas/sd8688.bin");
109MODULE_FIRMWARE("sd8688_helper.bin"); 105MODULE_FIRMWARE("sd8688_helper.bin");
110MODULE_FIRMWARE("sd8688.bin"); 106MODULE_FIRMWARE("sd8688.bin");
111 107
@@ -187,11 +183,11 @@ static u16 if_sdio_read_rx_len(struct if_sdio_card *card, int *err)
187 u16 rx_len; 183 u16 rx_len;
188 184
189 switch (card->model) { 185 switch (card->model) {
190 case IF_SDIO_MODEL_8385: 186 case MODEL_8385:
191 case IF_SDIO_MODEL_8686: 187 case MODEL_8686:
192 rx_len = if_sdio_read_scratch(card, &ret); 188 rx_len = if_sdio_read_scratch(card, &ret);
193 break; 189 break;
194 case IF_SDIO_MODEL_8688: 190 case MODEL_8688:
195 default: /* for newer chipsets */ 191 default: /* for newer chipsets */
196 rx_len = sdio_readb(card->func, IF_SDIO_RX_LEN, &ret); 192 rx_len = sdio_readb(card->func, IF_SDIO_RX_LEN, &ret);
197 if (!ret) 193 if (!ret)
@@ -288,7 +284,7 @@ static int if_sdio_handle_event(struct if_sdio_card *card,
288 284
289 lbs_deb_enter(LBS_DEB_SDIO); 285 lbs_deb_enter(LBS_DEB_SDIO);
290 286
291 if (card->model == IF_SDIO_MODEL_8385) { 287 if (card->model == MODEL_8385) {
292 event = sdio_readb(card->func, IF_SDIO_EVENT, &ret); 288 event = sdio_readb(card->func, IF_SDIO_EVENT, &ret);
293 if (ret) 289 if (ret)
294 goto out; 290 goto out;
@@ -466,10 +462,10 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
466 462
467#define FW_DL_READY_STATUS (IF_SDIO_IO_RDY | IF_SDIO_DL_RDY) 463#define FW_DL_READY_STATUS (IF_SDIO_IO_RDY | IF_SDIO_DL_RDY)
468 464
469static int if_sdio_prog_helper(struct if_sdio_card *card) 465static int if_sdio_prog_helper(struct if_sdio_card *card,
466 const struct firmware *fw)
470{ 467{
471 int ret; 468 int ret;
472 const struct firmware *fw;
473 unsigned long timeout; 469 unsigned long timeout;
474 u8 *chunk_buffer; 470 u8 *chunk_buffer;
475 u32 chunk_size; 471 u32 chunk_size;
@@ -478,16 +474,10 @@ static int if_sdio_prog_helper(struct if_sdio_card *card)
478 474
479 lbs_deb_enter(LBS_DEB_SDIO); 475 lbs_deb_enter(LBS_DEB_SDIO);
480 476
481 ret = request_firmware(&fw, card->helper, &card->func->dev);
482 if (ret) {
483 lbs_pr_err("can't load helper firmware\n");
484 goto out;
485 }
486
487 chunk_buffer = kzalloc(64, GFP_KERNEL); 477 chunk_buffer = kzalloc(64, GFP_KERNEL);
488 if (!chunk_buffer) { 478 if (!chunk_buffer) {
489 ret = -ENOMEM; 479 ret = -ENOMEM;
490 goto release_fw; 480 goto out;
491 } 481 }
492 482
493 sdio_claim_host(card->func); 483 sdio_claim_host(card->func);
@@ -562,22 +552,19 @@ static int if_sdio_prog_helper(struct if_sdio_card *card)
562release: 552release:
563 sdio_release_host(card->func); 553 sdio_release_host(card->func);
564 kfree(chunk_buffer); 554 kfree(chunk_buffer);
565release_fw:
566 release_firmware(fw);
567 555
568out: 556out:
569 if (ret) 557 if (ret)
570 lbs_pr_err("failed to load helper firmware\n"); 558 lbs_pr_err("failed to load helper firmware\n");
571 559
572 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 560 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
573
574 return ret; 561 return ret;
575} 562}
576 563
577static int if_sdio_prog_real(struct if_sdio_card *card) 564static int if_sdio_prog_real(struct if_sdio_card *card,
565 const struct firmware *fw)
578{ 566{
579 int ret; 567 int ret;
580 const struct firmware *fw;
581 unsigned long timeout; 568 unsigned long timeout;
582 u8 *chunk_buffer; 569 u8 *chunk_buffer;
583 u32 chunk_size; 570 u32 chunk_size;
@@ -586,16 +573,10 @@ static int if_sdio_prog_real(struct if_sdio_card *card)
586 573
587 lbs_deb_enter(LBS_DEB_SDIO); 574 lbs_deb_enter(LBS_DEB_SDIO);
588 575
589 ret = request_firmware(&fw, card->firmware, &card->func->dev);
590 if (ret) {
591 lbs_pr_err("can't load firmware\n");
592 goto out;
593 }
594
595 chunk_buffer = kzalloc(512, GFP_KERNEL); 576 chunk_buffer = kzalloc(512, GFP_KERNEL);
596 if (!chunk_buffer) { 577 if (!chunk_buffer) {
597 ret = -ENOMEM; 578 ret = -ENOMEM;
598 goto release_fw; 579 goto out;
599 } 580 }
600 581
601 sdio_claim_host(card->func); 582 sdio_claim_host(card->func);
@@ -685,15 +666,12 @@ static int if_sdio_prog_real(struct if_sdio_card *card)
685release: 666release:
686 sdio_release_host(card->func); 667 sdio_release_host(card->func);
687 kfree(chunk_buffer); 668 kfree(chunk_buffer);
688release_fw:
689 release_firmware(fw);
690 669
691out: 670out:
692 if (ret) 671 if (ret)
693 lbs_pr_err("failed to load firmware\n"); 672 lbs_pr_err("failed to load firmware\n");
694 673
695 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 674 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
696
697 return ret; 675 return ret;
698} 676}
699 677
@@ -701,6 +679,8 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
701{ 679{
702 int ret; 680 int ret;
703 u16 scratch; 681 u16 scratch;
682 const struct firmware *helper = NULL;
683 const struct firmware *mainfw = NULL;
704 684
705 lbs_deb_enter(LBS_DEB_SDIO); 685 lbs_deb_enter(LBS_DEB_SDIO);
706 686
@@ -718,11 +698,18 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
718 goto success; 698 goto success;
719 } 699 }
720 700
721 ret = if_sdio_prog_helper(card); 701 ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name,
702 card->model, &fw_table[0], &helper, &mainfw);
703 if (ret) {
704 lbs_pr_err("failed to find firmware (%d)\n", ret);
705 goto out;
706 }
707
708 ret = if_sdio_prog_helper(card, helper);
722 if (ret) 709 if (ret)
723 goto out; 710 goto out;
724 711
725 ret = if_sdio_prog_real(card); 712 ret = if_sdio_prog_real(card, mainfw);
726 if (ret) 713 if (ret)
727 goto out; 714 goto out;
728 715
@@ -733,8 +720,12 @@ success:
733 ret = 0; 720 ret = 0;
734 721
735out: 722out:
736 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 723 if (helper)
724 release_firmware(helper);
725 if (mainfw)
726 release_firmware(mainfw);
737 727
728 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
738 return ret; 729 return ret;
739} 730}
740 731
@@ -938,7 +929,7 @@ static int if_sdio_probe(struct sdio_func *func,
938 "ID: %x", &model) == 1) 929 "ID: %x", &model) == 1)
939 break; 930 break;
940 if (!strcmp(func->card->info[i], "IBIS Wireless SDIO Card")) { 931 if (!strcmp(func->card->info[i], "IBIS Wireless SDIO Card")) {
941 model = IF_SDIO_MODEL_8385; 932 model = MODEL_8385;
942 break; 933 break;
943 } 934 }
944 } 935 }
@@ -956,13 +947,13 @@ static int if_sdio_probe(struct sdio_func *func,
956 card->model = model; 947 card->model = model;
957 948
958 switch (card->model) { 949 switch (card->model) {
959 case IF_SDIO_MODEL_8385: 950 case MODEL_8385:
960 card->scratch_reg = IF_SDIO_SCRATCH_OLD; 951 card->scratch_reg = IF_SDIO_SCRATCH_OLD;
961 break; 952 break;
962 case IF_SDIO_MODEL_8686: 953 case MODEL_8686:
963 card->scratch_reg = IF_SDIO_SCRATCH; 954 card->scratch_reg = IF_SDIO_SCRATCH;
964 break; 955 break;
965 case IF_SDIO_MODEL_8688: 956 case MODEL_8688:
966 default: /* for newer chipsets */ 957 default: /* for newer chipsets */
967 card->scratch_reg = IF_SDIO_FW_STATUS; 958 card->scratch_reg = IF_SDIO_FW_STATUS;
968 break; 959 break;
@@ -972,49 +963,17 @@ static int if_sdio_probe(struct sdio_func *func,
972 card->workqueue = create_workqueue("libertas_sdio"); 963 card->workqueue = create_workqueue("libertas_sdio");
973 INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); 964 INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
974 965
975 for (i = 0;i < ARRAY_SIZE(if_sdio_models);i++) { 966 /* Check if we support this card */
976 if (card->model == if_sdio_models[i].model) 967 for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
968 if (card->model == fw_table[i].model)
977 break; 969 break;
978 } 970 }
979 971 if (i == ARRAY_SIZE(fw_table)) {
980 if (i == ARRAY_SIZE(if_sdio_models)) {
981 lbs_pr_err("unknown card model 0x%x\n", card->model); 972 lbs_pr_err("unknown card model 0x%x\n", card->model);
982 ret = -ENODEV; 973 ret = -ENODEV;
983 goto free; 974 goto free;
984 } 975 }
985 976
986 card->helper = if_sdio_models[i].helper;
987 card->firmware = if_sdio_models[i].firmware;
988
989 kparam_block_sysfs_write(helper_name);
990 if (lbs_helper_name) {
991 char *helper = kstrdup(lbs_helper_name, GFP_KERNEL);
992 if (!helper) {
993 kparam_unblock_sysfs_write(helper_name);
994 ret = -ENOMEM;
995 goto free;
996 }
997 lbs_deb_sdio("overriding helper firmware: %s\n",
998 lbs_helper_name);
999 card->helper = helper;
1000 card->helper_allocated = true;
1001 }
1002 kparam_unblock_sysfs_write(helper_name);
1003
1004 kparam_block_sysfs_write(fw_name);
1005 if (lbs_fw_name) {
1006 char *fw_name = kstrdup(lbs_fw_name, GFP_KERNEL);
1007 if (!fw_name) {
1008 kparam_unblock_sysfs_write(fw_name);
1009 ret = -ENOMEM;
1010 goto free;
1011 }
1012 lbs_deb_sdio("overriding firmware: %s\n", lbs_fw_name);
1013 card->firmware = fw_name;
1014 card->firmware_allocated = true;
1015 }
1016 kparam_unblock_sysfs_write(fw_name);
1017
1018 sdio_claim_host(func); 977 sdio_claim_host(func);
1019 978
1020 ret = sdio_enable_func(func); 979 ret = sdio_enable_func(func);
@@ -1028,7 +987,7 @@ static int if_sdio_probe(struct sdio_func *func,
1028 /* For 1-bit transfers to the 8686 model, we need to enable the 987 /* For 1-bit transfers to the 8686 model, we need to enable the
1029 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0 988 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
1030 * bit to allow access to non-vendor registers. */ 989 * bit to allow access to non-vendor registers. */
1031 if ((card->model == IF_SDIO_MODEL_8686) && 990 if ((card->model == MODEL_8686) &&
1032 (host->caps & MMC_CAP_SDIO_IRQ) && 991 (host->caps & MMC_CAP_SDIO_IRQ) &&
1033 (host->ios.bus_width == MMC_BUS_WIDTH_1)) { 992 (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
1034 u8 reg; 993 u8 reg;
@@ -1091,8 +1050,8 @@ static int if_sdio_probe(struct sdio_func *func,
1091 * Get rx_unit if the chip is SD8688 or newer. 1050 * Get rx_unit if the chip is SD8688 or newer.
1092 * SD8385 & SD8686 do not have rx_unit. 1051 * SD8385 & SD8686 do not have rx_unit.
1093 */ 1052 */
1094 if ((card->model != IF_SDIO_MODEL_8385) 1053 if ((card->model != MODEL_8385)
1095 && (card->model != IF_SDIO_MODEL_8686)) 1054 && (card->model != MODEL_8686))
1096 card->rx_unit = if_sdio_read_rx_unit(card); 1055 card->rx_unit = if_sdio_read_rx_unit(card);
1097 else 1056 else
1098 card->rx_unit = 0; 1057 card->rx_unit = 0;
@@ -1108,7 +1067,7 @@ static int if_sdio_probe(struct sdio_func *func,
1108 /* 1067 /*
1109 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions 1068 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
1110 */ 1069 */
1111 if (card->model == IF_SDIO_MODEL_8688) { 1070 if (card->model == MODEL_8688) {
1112 struct cmd_header cmd; 1071 struct cmd_header cmd;
1113 1072
1114 memset(&cmd, 0, sizeof(cmd)); 1073 memset(&cmd, 0, sizeof(cmd));
@@ -1165,7 +1124,7 @@ static void if_sdio_remove(struct sdio_func *func)
1165 1124
1166 card = sdio_get_drvdata(func); 1125 card = sdio_get_drvdata(func);
1167 1126
1168 if (user_rmmod && (card->model == IF_SDIO_MODEL_8688)) { 1127 if (user_rmmod && (card->model == MODEL_8688)) {
1169 /* 1128 /*
1170 * FUNC_SHUTDOWN is required for SD8688 WLAN/BT 1129 * FUNC_SHUTDOWN is required for SD8688 WLAN/BT
1171 * multiple functions 1130 * multiple functions
diff --git a/drivers/net/wireless/libertas/if_sdio.h b/drivers/net/wireless/libertas/if_sdio.h
index 12179c1dc9c9..62fda3592f67 100644
--- a/drivers/net/wireless/libertas/if_sdio.h
+++ b/drivers/net/wireless/libertas/if_sdio.h
@@ -12,10 +12,6 @@
12#ifndef _LBS_IF_SDIO_H 12#ifndef _LBS_IF_SDIO_H
13#define _LBS_IF_SDIO_H 13#define _LBS_IF_SDIO_H
14 14
15#define IF_SDIO_MODEL_8385 0x04
16#define IF_SDIO_MODEL_8686 0x0b
17#define IF_SDIO_MODEL_8688 0x10
18
19#define IF_SDIO_IOPORT 0x00 15#define IF_SDIO_IOPORT 0x00
20 16
21#define IF_SDIO_H_INT_MASK 0x04 17#define IF_SDIO_H_INT_MASK 0x04
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index fe3f08028eb3..79bcb4e5d2ca 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -39,9 +39,6 @@ struct if_spi_card {
39 struct lbs_private *priv; 39 struct lbs_private *priv;
40 struct libertas_spi_platform_data *pdata; 40 struct libertas_spi_platform_data *pdata;
41 41
42 char helper_fw_name[IF_SPI_FW_NAME_MAX];
43 char main_fw_name[IF_SPI_FW_NAME_MAX];
44
45 /* The card ID and card revision, as reported by the hardware. */ 42 /* The card ID and card revision, as reported by the hardware. */
46 u16 card_id; 43 u16 card_id;
47 u8 card_rev; 44 u8 card_rev;
@@ -70,10 +67,28 @@ static void free_if_spi_card(struct if_spi_card *card)
70 kfree(card); 67 kfree(card);
71} 68}
72 69
73static struct chip_ident chip_id_to_device_name[] = { 70#define MODEL_8385 0x04
74 { .chip_id = 0x04, .name = 8385 }, 71#define MODEL_8686 0x0b
75 { .chip_id = 0x0b, .name = 8686 }, 72#define MODEL_8688 0x10
73
74static const struct lbs_fw_table fw_table[] = {
75 { MODEL_8385, "libertas/gspi8385_helper.bin", "libertas/gspi8385.bin" },
76 { MODEL_8385, "libertas/gspi8385_hlp.bin", "libertas/gspi8385.bin" },
77 { MODEL_8686, "libertas/gspi8686_v9_helper.bin", "libertas/gspi8686_v9.bin" },
78 { MODEL_8686, "libertas/gspi8686_hlp.bin", "libertas/gspi8686.bin" },
79 { MODEL_8688, "libertas/gspi8688_helper.bin", "libertas/gspi8688.bin" },
80 { 0, NULL, NULL }
76}; 81};
82MODULE_FIRMWARE("libertas/gspi8385_helper.bin");
83MODULE_FIRMWARE("libertas/gspi8385_hlp.bin");
84MODULE_FIRMWARE("libertas/gspi8385.bin");
85MODULE_FIRMWARE("libertas/gspi8686_v9_helper.bin");
86MODULE_FIRMWARE("libertas/gspi8686_v9.bin");
87MODULE_FIRMWARE("libertas/gspi8686_hlp.bin");
88MODULE_FIRMWARE("libertas/gspi8686.bin");
89MODULE_FIRMWARE("libertas/gspi8688_helper.bin");
90MODULE_FIRMWARE("libertas/gspi8688.bin");
91
77 92
78/* 93/*
79 * SPI Interface Unit Routines 94 * SPI Interface Unit Routines
@@ -399,26 +414,20 @@ static int spu_init(struct if_spi_card *card, int use_dummy_writes)
399 * Firmware Loading 414 * Firmware Loading
400 */ 415 */
401 416
402static int if_spi_prog_helper_firmware(struct if_spi_card *card) 417static int if_spi_prog_helper_firmware(struct if_spi_card *card,
418 const struct firmware *firmware)
403{ 419{
404 int err = 0; 420 int err = 0;
405 const struct firmware *firmware = NULL;
406 int bytes_remaining; 421 int bytes_remaining;
407 const u8 *fw; 422 const u8 *fw;
408 u8 temp[HELPER_FW_LOAD_CHUNK_SZ]; 423 u8 temp[HELPER_FW_LOAD_CHUNK_SZ];
409 struct spi_device *spi = card->spi;
410 424
411 lbs_deb_enter(LBS_DEB_SPI); 425 lbs_deb_enter(LBS_DEB_SPI);
412 426
413 err = spu_set_interrupt_mode(card, 1, 0); 427 err = spu_set_interrupt_mode(card, 1, 0);
414 if (err) 428 if (err)
415 goto out; 429 goto out;
416 /* Get helper firmware image */ 430
417 err = request_firmware(&firmware, card->helper_fw_name, &spi->dev);
418 if (err) {
419 lbs_pr_err("request_firmware failed with err = %d\n", err);
420 goto out;
421 }
422 bytes_remaining = firmware->size; 431 bytes_remaining = firmware->size;
423 fw = firmware->data; 432 fw = firmware->data;
424 433
@@ -429,13 +438,13 @@ static int if_spi_prog_helper_firmware(struct if_spi_card *card)
429 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, 438 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG,
430 HELPER_FW_LOAD_CHUNK_SZ); 439 HELPER_FW_LOAD_CHUNK_SZ);
431 if (err) 440 if (err)
432 goto release_firmware; 441 goto out;
433 442
434 err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG, 443 err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG,
435 IF_SPI_HIST_CMD_DOWNLOAD_RDY, 444 IF_SPI_HIST_CMD_DOWNLOAD_RDY,
436 IF_SPI_HIST_CMD_DOWNLOAD_RDY); 445 IF_SPI_HIST_CMD_DOWNLOAD_RDY);
437 if (err) 446 if (err)
438 goto release_firmware; 447 goto out;
439 448
440 /* Feed the data into the command read/write port reg 449 /* Feed the data into the command read/write port reg
441 * in chunks of 64 bytes */ 450 * in chunks of 64 bytes */
@@ -446,16 +455,16 @@ static int if_spi_prog_helper_firmware(struct if_spi_card *card)
446 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, 455 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG,
447 temp, HELPER_FW_LOAD_CHUNK_SZ); 456 temp, HELPER_FW_LOAD_CHUNK_SZ);
448 if (err) 457 if (err)
449 goto release_firmware; 458 goto out;
450 459
451 /* Interrupt the boot code */ 460 /* Interrupt the boot code */
452 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0); 461 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
453 if (err) 462 if (err)
454 goto release_firmware; 463 goto out;
455 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, 464 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG,
456 IF_SPI_CIC_CMD_DOWNLOAD_OVER); 465 IF_SPI_CIC_CMD_DOWNLOAD_OVER);
457 if (err) 466 if (err)
458 goto release_firmware; 467 goto out;
459 bytes_remaining -= HELPER_FW_LOAD_CHUNK_SZ; 468 bytes_remaining -= HELPER_FW_LOAD_CHUNK_SZ;
460 fw += HELPER_FW_LOAD_CHUNK_SZ; 469 fw += HELPER_FW_LOAD_CHUNK_SZ;
461 } 470 }
@@ -465,18 +474,16 @@ static int if_spi_prog_helper_firmware(struct if_spi_card *card)
465 * bootloader. This completes the helper download. */ 474 * bootloader. This completes the helper download. */
466 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, FIRMWARE_DNLD_OK); 475 err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, FIRMWARE_DNLD_OK);
467 if (err) 476 if (err)
468 goto release_firmware; 477 goto out;
469 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0); 478 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
470 if (err) 479 if (err)
471 goto release_firmware; 480 goto out;
472 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, 481 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG,
473 IF_SPI_CIC_CMD_DOWNLOAD_OVER); 482 IF_SPI_CIC_CMD_DOWNLOAD_OVER);
474 goto release_firmware; 483 goto out;
475 484
476 lbs_deb_spi("waiting for helper to boot...\n"); 485 lbs_deb_spi("waiting for helper to boot...\n");
477 486
478release_firmware:
479 release_firmware(firmware);
480out: 487out:
481 if (err) 488 if (err)
482 lbs_pr_err("failed to load helper firmware (err=%d)\n", err); 489 lbs_pr_err("failed to load helper firmware (err=%d)\n", err);
@@ -523,13 +530,12 @@ static int if_spi_prog_main_firmware_check_len(struct if_spi_card *card,
523 return len; 530 return len;
524} 531}
525 532
526static int if_spi_prog_main_firmware(struct if_spi_card *card) 533static int if_spi_prog_main_firmware(struct if_spi_card *card,
534 const struct firmware *firmware)
527{ 535{
528 int len, prev_len; 536 int len, prev_len;
529 int bytes, crc_err = 0, err = 0; 537 int bytes, crc_err = 0, err = 0;
530 const struct firmware *firmware = NULL;
531 const u8 *fw; 538 const u8 *fw;
532 struct spi_device *spi = card->spi;
533 u16 num_crc_errs; 539 u16 num_crc_errs;
534 540
535 lbs_deb_enter(LBS_DEB_SPI); 541 lbs_deb_enter(LBS_DEB_SPI);
@@ -538,19 +544,11 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card)
538 if (err) 544 if (err)
539 goto out; 545 goto out;
540 546
541 /* Get firmware image */
542 err = request_firmware(&firmware, card->main_fw_name, &spi->dev);
543 if (err) {
544 lbs_pr_err("%s: can't get firmware '%s' from kernel. "
545 "err = %d\n", __func__, card->main_fw_name, err);
546 goto out;
547 }
548
549 err = spu_wait_for_u16(card, IF_SPI_SCRATCH_1_REG, 0, 0); 547 err = spu_wait_for_u16(card, IF_SPI_SCRATCH_1_REG, 0, 0);
550 if (err) { 548 if (err) {
551 lbs_pr_err("%s: timed out waiting for initial " 549 lbs_pr_err("%s: timed out waiting for initial "
552 "scratch reg = 0\n", __func__); 550 "scratch reg = 0\n", __func__);
553 goto release_firmware; 551 goto out;
554 } 552 }
555 553
556 num_crc_errs = 0; 554 num_crc_errs = 0;
@@ -560,7 +558,7 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card)
560 while ((len = if_spi_prog_main_firmware_check_len(card, &crc_err))) { 558 while ((len = if_spi_prog_main_firmware_check_len(card, &crc_err))) {
561 if (len < 0) { 559 if (len < 0) {
562 err = len; 560 err = len;
563 goto release_firmware; 561 goto out;
564 } 562 }
565 if (bytes < 0) { 563 if (bytes < 0) {
566 /* If there are no more bytes left, we would normally 564 /* If there are no more bytes left, we would normally
@@ -575,7 +573,7 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card)
575 lbs_pr_err("Too many CRC errors encountered " 573 lbs_pr_err("Too many CRC errors encountered "
576 "in firmware load.\n"); 574 "in firmware load.\n");
577 err = -EIO; 575 err = -EIO;
578 goto release_firmware; 576 goto out;
579 } 577 }
580 } else { 578 } else {
581 /* Previous transfer succeeded. Advance counters. */ 579 /* Previous transfer succeeded. Advance counters. */
@@ -590,15 +588,15 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card)
590 588
591 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0); 589 err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
592 if (err) 590 if (err)
593 goto release_firmware; 591 goto out;
594 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, 592 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG,
595 card->cmd_buffer, len); 593 card->cmd_buffer, len);
596 if (err) 594 if (err)
597 goto release_firmware; 595 goto out;
598 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG , 596 err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG ,
599 IF_SPI_CIC_CMD_DOWNLOAD_OVER); 597 IF_SPI_CIC_CMD_DOWNLOAD_OVER);
600 if (err) 598 if (err)
601 goto release_firmware; 599 goto out;
602 prev_len = len; 600 prev_len = len;
603 } 601 }
604 if (bytes > prev_len) { 602 if (bytes > prev_len) {
@@ -611,12 +609,9 @@ static int if_spi_prog_main_firmware(struct if_spi_card *card)
611 SUCCESSFUL_FW_DOWNLOAD_MAGIC); 609 SUCCESSFUL_FW_DOWNLOAD_MAGIC);
612 if (err) { 610 if (err) {
613 lbs_pr_err("failed to confirm the firmware download\n"); 611 lbs_pr_err("failed to confirm the firmware download\n");
614 goto release_firmware; 612 goto out;
615 } 613 }
616 614
617release_firmware:
618 release_firmware(firmware);
619
620out: 615out:
621 if (err) 616 if (err)
622 lbs_pr_err("failed to load firmware (err=%d)\n", err); 617 lbs_pr_err("failed to load firmware (err=%d)\n", err);
@@ -800,14 +795,16 @@ static int lbs_spi_thread(void *data)
800 goto err; 795 goto err;
801 } 796 }
802 797
803 if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) 798 if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
804 err = if_spi_c2h_cmd(card); 799 err = if_spi_c2h_cmd(card);
805 if (err) 800 if (err)
806 goto err; 801 goto err;
807 if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) 802 }
803 if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
808 err = if_spi_c2h_data(card); 804 err = if_spi_c2h_data(card);
809 if (err) 805 if (err)
810 goto err; 806 goto err;
807 }
811 808
812 /* workaround: in PS mode, the card does not set the Command 809 /* workaround: in PS mode, the card does not set the Command
813 * Download Ready bit, but it sets TX Download Ready. */ 810 * Download Ready bit, but it sets TX Download Ready. */
@@ -886,37 +883,16 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
886 * SPI callbacks 883 * SPI callbacks
887 */ 884 */
888 885
889static int if_spi_calculate_fw_names(u16 card_id,
890 char *helper_fw, char *main_fw)
891{
892 int i;
893 for (i = 0; i < ARRAY_SIZE(chip_id_to_device_name); ++i) {
894 if (card_id == chip_id_to_device_name[i].chip_id)
895 break;
896 }
897 if (i == ARRAY_SIZE(chip_id_to_device_name)) {
898 lbs_pr_err("Unsupported chip_id: 0x%02x\n", card_id);
899 return -EAFNOSUPPORT;
900 }
901 snprintf(helper_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d_hlp.bin",
902 chip_id_to_device_name[i].name);
903 snprintf(main_fw, IF_SPI_FW_NAME_MAX, "libertas/gspi%d.bin",
904 chip_id_to_device_name[i].name);
905 return 0;
906}
907MODULE_FIRMWARE("libertas/gspi8385_hlp.bin");
908MODULE_FIRMWARE("libertas/gspi8385.bin");
909MODULE_FIRMWARE("libertas/gspi8686_hlp.bin");
910MODULE_FIRMWARE("libertas/gspi8686.bin");
911
912static int __devinit if_spi_probe(struct spi_device *spi) 886static int __devinit if_spi_probe(struct spi_device *spi)
913{ 887{
914 struct if_spi_card *card; 888 struct if_spi_card *card;
915 struct lbs_private *priv = NULL; 889 struct lbs_private *priv = NULL;
916 struct libertas_spi_platform_data *pdata = spi->dev.platform_data; 890 struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
917 int err = 0; 891 int err = 0, i;
918 u32 scratch; 892 u32 scratch;
919 struct sched_param param = { .sched_priority = 1 }; 893 struct sched_param param = { .sched_priority = 1 };
894 const struct firmware *helper = NULL;
895 const struct firmware *mainfw = NULL;
920 896
921 lbs_deb_enter(LBS_DEB_SPI); 897 lbs_deb_enter(LBS_DEB_SPI);
922 898
@@ -961,10 +937,25 @@ static int __devinit if_spi_probe(struct spi_device *spi)
961 lbs_deb_spi("Firmware is already loaded for " 937 lbs_deb_spi("Firmware is already loaded for "
962 "Marvell WLAN 802.11 adapter\n"); 938 "Marvell WLAN 802.11 adapter\n");
963 else { 939 else {
964 err = if_spi_calculate_fw_names(card->card_id, 940 /* Check if we support this card */
965 card->helper_fw_name, card->main_fw_name); 941 for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
966 if (err) 942 if (card->card_id == fw_table[i].model)
943 break;
944 }
945 if (i == ARRAY_SIZE(fw_table)) {
946 lbs_pr_err("Unsupported chip_id: 0x%02x\n",
947 card->card_id);
948 err = -ENODEV;
967 goto free_card; 949 goto free_card;
950 }
951
952 err = lbs_get_firmware(&card->spi->dev, NULL, NULL,
953 card->card_id, &fw_table[0], &helper,
954 &mainfw);
955 if (err) {
956 lbs_pr_err("failed to find firmware (%d)\n", err);
957 goto free_card;
958 }
968 959
969 lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter " 960 lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
970 "(chip_id = 0x%04x, chip_rev = 0x%02x) " 961 "(chip_id = 0x%04x, chip_rev = 0x%02x) "
@@ -973,10 +964,10 @@ static int __devinit if_spi_probe(struct spi_device *spi)
973 card->card_id, card->card_rev, 964 card->card_id, card->card_rev,
974 spi->master->bus_num, spi->chip_select, 965 spi->master->bus_num, spi->chip_select,
975 spi->max_speed_hz); 966 spi->max_speed_hz);
976 err = if_spi_prog_helper_firmware(card); 967 err = if_spi_prog_helper_firmware(card, helper);
977 if (err) 968 if (err)
978 goto free_card; 969 goto free_card;
979 err = if_spi_prog_main_firmware(card); 970 err = if_spi_prog_main_firmware(card, mainfw);
980 if (err) 971 if (err)
981 goto free_card; 972 goto free_card;
982 lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n"); 973 lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
@@ -1044,6 +1035,11 @@ remove_card:
1044free_card: 1035free_card:
1045 free_if_spi_card(card); 1036 free_if_spi_card(card);
1046out: 1037out:
1038 if (helper)
1039 release_firmware(helper);
1040 if (mainfw)
1041 release_firmware(mainfw);
1042
1047 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err); 1043 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
1048 return err; 1044 return err;
1049} 1045}
diff --git a/drivers/net/wireless/libertas/if_spi.h b/drivers/net/wireless/libertas/if_spi.h
index f87eec410848..8b1417d3b71b 100644
--- a/drivers/net/wireless/libertas/if_spi.h
+++ b/drivers/net/wireless/libertas/if_spi.h
@@ -25,11 +25,6 @@
25 25
26#define IF_SPI_FW_NAME_MAX 30 26#define IF_SPI_FW_NAME_MAX 30
27 27
28struct chip_ident {
29 u16 chip_id;
30 u16 name;
31};
32
33#define MAX_MAIN_FW_LOAD_CRC_ERR 10 28#define MAX_MAIN_FW_LOAD_CRC_ERR 10
34 29
35/* Chunk size when loading the helper firmware */ 30/* Chunk size when loading the helper firmware */
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 3ff61063671a..e906616232a2 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -26,15 +26,25 @@
26 26
27#define MESSAGE_HEADER_LEN 4 27#define MESSAGE_HEADER_LEN 4
28 28
29static char *lbs_fw_name = "usb8388.bin"; 29static char *lbs_fw_name = NULL;
30module_param_named(fw_name, lbs_fw_name, charp, 0644); 30module_param_named(fw_name, lbs_fw_name, charp, 0644);
31 31
32MODULE_FIRMWARE("libertas/usb8388_v9.bin");
33MODULE_FIRMWARE("libertas/usb8388_v5.bin");
34MODULE_FIRMWARE("libertas/usb8388.bin");
35MODULE_FIRMWARE("libertas/usb8682.bin");
32MODULE_FIRMWARE("usb8388.bin"); 36MODULE_FIRMWARE("usb8388.bin");
33 37
38enum {
39 MODEL_UNKNOWN = 0x0,
40 MODEL_8388 = 0x1,
41 MODEL_8682 = 0x2
42};
43
34static struct usb_device_id if_usb_table[] = { 44static struct usb_device_id if_usb_table[] = {
35 /* Enter the device signature inside */ 45 /* Enter the device signature inside */
36 { USB_DEVICE(0x1286, 0x2001) }, 46 { USB_DEVICE(0x1286, 0x2001), .driver_info = MODEL_8388 },
37 { USB_DEVICE(0x05a3, 0x8388) }, 47 { USB_DEVICE(0x05a3, 0x8388), .driver_info = MODEL_8388 },
38 {} /* Terminating entry */ 48 {} /* Terminating entry */
39}; 49};
40 50
@@ -66,6 +76,8 @@ static ssize_t if_usb_firmware_set(struct device *dev,
66 struct if_usb_card *cardp = priv->card; 76 struct if_usb_card *cardp = priv->card;
67 int ret; 77 int ret;
68 78
79 BUG_ON(buf == NULL);
80
69 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW); 81 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW);
70 if (ret == 0) 82 if (ret == 0)
71 return count; 83 return count;
@@ -91,6 +103,8 @@ static ssize_t if_usb_boot2_set(struct device *dev,
91 struct if_usb_card *cardp = priv->card; 103 struct if_usb_card *cardp = priv->card;
92 int ret; 104 int ret;
93 105
106 BUG_ON(buf == NULL);
107
94 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2); 108 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2);
95 if (ret == 0) 109 if (ret == 0)
96 return count; 110 return count;
@@ -244,6 +258,7 @@ static int if_usb_probe(struct usb_interface *intf,
244 init_waitqueue_head(&cardp->fw_wq); 258 init_waitqueue_head(&cardp->fw_wq);
245 259
246 cardp->udev = udev; 260 cardp->udev = udev;
261 cardp->model = (uint32_t) id->driver_info;
247 iface_desc = intf->cur_altsetting; 262 iface_desc = intf->cur_altsetting;
248 263
249 lbs_deb_usbd(&udev->dev, "bcdUSB = 0x%X bDeviceClass = 0x%X" 264 lbs_deb_usbd(&udev->dev, "bcdUSB = 0x%X bDeviceClass = 0x%X"
@@ -924,6 +939,38 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp,
924 return ret; 939 return ret;
925} 940}
926 941
942/* table of firmware file names */
943static const struct {
944 u32 model;
945 const char *fwname;
946} fw_table[] = {
947 { MODEL_8388, "libertas/usb8388_v9.bin" },
948 { MODEL_8388, "libertas/usb8388_v5.bin" },
949 { MODEL_8388, "libertas/usb8388.bin" },
950 { MODEL_8388, "usb8388.bin" },
951 { MODEL_8682, "libertas/usb8682.bin" }
952};
953
954static int get_fw(struct if_usb_card *cardp, const char *fwname)
955{
956 int i;
957
958 /* Try user-specified firmware first */
959 if (fwname)
960 return request_firmware(&cardp->fw, fwname, &cardp->udev->dev);
961
962 /* Otherwise search for firmware to use */
963 for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
964 if (fw_table[i].model != cardp->model)
965 continue;
966 if (request_firmware(&cardp->fw, fw_table[i].fwname,
967 &cardp->udev->dev) == 0)
968 return 0;
969 }
970
971 return -ENOENT;
972}
973
927static int __if_usb_prog_firmware(struct if_usb_card *cardp, 974static int __if_usb_prog_firmware(struct if_usb_card *cardp,
928 const char *fwname, int cmd) 975 const char *fwname, int cmd)
929{ 976{
@@ -933,10 +980,9 @@ static int __if_usb_prog_firmware(struct if_usb_card *cardp,
933 980
934 lbs_deb_enter(LBS_DEB_USB); 981 lbs_deb_enter(LBS_DEB_USB);
935 982
936 ret = request_firmware(&cardp->fw, fwname, &cardp->udev->dev); 983 ret = get_fw(cardp, fwname);
937 if (ret < 0) { 984 if (ret) {
938 lbs_pr_err("request_firmware() failed with %#x\n", ret); 985 lbs_pr_err("failed to find firmware (%d)\n", ret);
939 lbs_pr_err("firmware %s not found\n", fwname);
940 goto done; 986 goto done;
941 } 987 }
942 988
diff --git a/drivers/net/wireless/libertas/if_usb.h b/drivers/net/wireless/libertas/if_usb.h
index 5ba0aee0eb2f..d819e7e3c9aa 100644
--- a/drivers/net/wireless/libertas/if_usb.h
+++ b/drivers/net/wireless/libertas/if_usb.h
@@ -43,6 +43,7 @@ struct bootcmdresp
43/** USB card description structure*/ 43/** USB card description structure*/
44struct if_usb_card { 44struct if_usb_card {
45 struct usb_device *udev; 45 struct usb_device *udev;
46 uint32_t model; /* MODEL_* */
46 struct urb *rx_urb, *tx_urb; 47 struct urb *rx_urb, *tx_urb;
47 struct lbs_private *priv; 48 struct lbs_private *priv;
48 49
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 24958a86747b..47ce5a6ba120 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -1047,6 +1047,111 @@ void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx)
1047} 1047}
1048EXPORT_SYMBOL_GPL(lbs_notify_command_response); 1048EXPORT_SYMBOL_GPL(lbs_notify_command_response);
1049 1049
1050/**
1051 * @brief Retrieves two-stage firmware
1052 *
1053 * @param dev A pointer to device structure
1054 * @param user_helper User-defined helper firmware file
1055 * @param user_mainfw User-defined main firmware file
1056 * @param card_model Bus-specific card model ID used to filter firmware table
1057 * elements
1058 * @param fw_table Table of firmware file names and device model numbers
1059 * terminated by an entry with a NULL helper name
1060 * @param helper On success, the helper firmware; caller must free
1061 * @param mainfw On success, the main firmware; caller must free
1062 *
1063 * @return 0 on success, non-zero on failure
1064 */
1065int lbs_get_firmware(struct device *dev, const char *user_helper,
1066 const char *user_mainfw, u32 card_model,
1067 const struct lbs_fw_table *fw_table,
1068 const struct firmware **helper,
1069 const struct firmware **mainfw)
1070{
1071 const struct lbs_fw_table *iter;
1072 int ret;
1073
1074 BUG_ON(helper == NULL);
1075 BUG_ON(mainfw == NULL);
1076
1077 /* Try user-specified firmware first */
1078 if (user_helper) {
1079 ret = request_firmware(helper, user_helper, dev);
1080 if (ret) {
1081 lbs_pr_err("couldn't find helper firmware %s",
1082 user_helper);
1083 goto fail;
1084 }
1085 }
1086 if (user_mainfw) {
1087 ret = request_firmware(mainfw, user_mainfw, dev);
1088 if (ret) {
1089 lbs_pr_err("couldn't find main firmware %s",
1090 user_mainfw);
1091 goto fail;
1092 }
1093 }
1094
1095 if (*helper && *mainfw)
1096 return 0;
1097
1098 /* Otherwise search for firmware to use. If neither the helper or
1099 * the main firmware were specified by the user, then we need to
1100 * make sure that found helper & main are from the same entry in
1101 * fw_table.
1102 */
1103 iter = fw_table;
1104 while (iter && iter->helper) {
1105 if (iter->model != card_model)
1106 goto next;
1107
1108 if (*helper == NULL) {
1109 ret = request_firmware(helper, iter->helper, dev);
1110 if (ret)
1111 goto next;
1112
1113 /* If the device has one-stage firmware (ie cf8305) and
1114 * we've got it then we don't need to bother with the
1115 * main firmware.
1116 */
1117 if (iter->fwname == NULL)
1118 return 0;
1119 }
1120
1121 if (*mainfw == NULL) {
1122 ret = request_firmware(mainfw, iter->fwname, dev);
1123 if (ret && !user_helper) {
1124 /* Clear the helper if it wasn't user-specified
1125 * and the main firmware load failed, to ensure
1126 * we don't have mismatched firmware pairs.
1127 */
1128 release_firmware(*helper);
1129 *helper = NULL;
1130 }
1131 }
1132
1133 if (*helper && *mainfw)
1134 return 0;
1135
1136 next:
1137 iter++;
1138 }
1139
1140 fail:
1141 /* Failed */
1142 if (*helper) {
1143 release_firmware(*helper);
1144 *helper = NULL;
1145 }
1146 if (*mainfw) {
1147 release_firmware(*mainfw);
1148 *mainfw = NULL;
1149 }
1150
1151 return -ENOENT;
1152}
1153EXPORT_SYMBOL_GPL(lbs_get_firmware);
1154
1050static int __init lbs_init_module(void) 1155static int __init lbs_init_module(void)
1051{ 1156{
1052 lbs_deb_enter(LBS_DEB_MAIN); 1157 lbs_deb_enter(LBS_DEB_MAIN);
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 194762ab0142..acf3bf63ee33 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -574,7 +574,7 @@ int lbs_mesh_bt_set_inverted(struct lbs_private *priv, bool inverted)
574 memset(&cmd, 0, sizeof(cmd)); 574 memset(&cmd, 0, sizeof(cmd));
575 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 575 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
576 cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_SET_INVERT); 576 cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_SET_INVERT);
577 cmd.id = !!inverted; 577 cmd.id = cpu_to_le32(!!inverted);
578 578
579 ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd); 579 ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
580 580
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 41a4f214ade1..ba7d96584cb6 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -54,7 +54,7 @@ static int if_usb_reset_device(struct if_usb_card *cardp);
54/** 54/**
55 * if_usb_wrike_bulk_callback - call back to handle URB status 55 * if_usb_wrike_bulk_callback - call back to handle URB status
56 * 56 *
57 * @param urb pointer to urb structure 57 * @param urb pointer to urb structure
58 */ 58 */
59static void if_usb_write_bulk_callback(struct urb *urb) 59static void if_usb_write_bulk_callback(struct urb *urb)
60{ 60{
@@ -178,16 +178,19 @@ static int if_usb_probe(struct usb_interface *intf,
178 le16_to_cpu(endpoint->wMaxPacketSize); 178 le16_to_cpu(endpoint->wMaxPacketSize);
179 cardp->ep_in = usb_endpoint_num(endpoint); 179 cardp->ep_in = usb_endpoint_num(endpoint);
180 180
181 lbtf_deb_usbd(&udev->dev, "in_endpoint = %d\n", cardp->ep_in); 181 lbtf_deb_usbd(&udev->dev, "in_endpoint = %d\n",
182 lbtf_deb_usbd(&udev->dev, "Bulk in size is %d\n", cardp->ep_in_size); 182 cardp->ep_in);
183 lbtf_deb_usbd(&udev->dev, "Bulk in size is %d\n",
184 cardp->ep_in_size);
183 } else if (usb_endpoint_is_bulk_out(endpoint)) { 185 } else if (usb_endpoint_is_bulk_out(endpoint)) {
184 cardp->ep_out_size = 186 cardp->ep_out_size =
185 le16_to_cpu(endpoint->wMaxPacketSize); 187 le16_to_cpu(endpoint->wMaxPacketSize);
186 cardp->ep_out = usb_endpoint_num(endpoint); 188 cardp->ep_out = usb_endpoint_num(endpoint);
187 189
188 lbtf_deb_usbd(&udev->dev, "out_endpoint = %d\n", cardp->ep_out); 190 lbtf_deb_usbd(&udev->dev, "out_endpoint = %d\n",
191 cardp->ep_out);
189 lbtf_deb_usbd(&udev->dev, "Bulk out size is %d\n", 192 lbtf_deb_usbd(&udev->dev, "Bulk out size is %d\n",
190 cardp->ep_out_size); 193 cardp->ep_out_size);
191 } 194 }
192 } 195 }
193 if (!cardp->ep_out_size || !cardp->ep_in_size) { 196 if (!cardp->ep_out_size || !cardp->ep_in_size) {
@@ -318,10 +321,12 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
318 321
319 if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_DATA_TO_RECV)) { 322 if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_DATA_TO_RECV)) {
320 lbtf_deb_usb2(&cardp->udev->dev, "There are data to follow\n"); 323 lbtf_deb_usb2(&cardp->udev->dev, "There are data to follow\n");
321 lbtf_deb_usb2(&cardp->udev->dev, "seqnum = %d totalbytes = %d\n", 324 lbtf_deb_usb2(&cardp->udev->dev,
322 cardp->fwseqnum, cardp->totalbytes); 325 "seqnum = %d totalbytes = %d\n",
326 cardp->fwseqnum, cardp->totalbytes);
323 } else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) { 327 } else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) {
324 lbtf_deb_usb2(&cardp->udev->dev, "Host has finished FW downloading\n"); 328 lbtf_deb_usb2(&cardp->udev->dev,
329 "Host has finished FW downloading\n");
325 lbtf_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n"); 330 lbtf_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n");
326 331
327 /* Host has finished FW downloading 332 /* Host has finished FW downloading
@@ -367,7 +372,7 @@ EXPORT_SYMBOL_GPL(if_usb_reset_device);
367/** 372/**
368 * usb_tx_block - transfer data to the device 373 * usb_tx_block - transfer data to the device
369 * 374 *
370 * @priv pointer to struct lbtf_private 375 * @priv pointer to struct lbtf_private
371 * @payload pointer to payload data 376 * @payload pointer to payload data
372 * @nb data length 377 * @nb data length
373 * @data non-zero for data, zero for commands 378 * @data non-zero for data, zero for commands
@@ -400,7 +405,8 @@ static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
400 urb->transfer_flags |= URB_ZERO_PACKET; 405 urb->transfer_flags |= URB_ZERO_PACKET;
401 406
402 if (usb_submit_urb(urb, GFP_ATOMIC)) { 407 if (usb_submit_urb(urb, GFP_ATOMIC)) {
403 lbtf_deb_usbd(&cardp->udev->dev, "usb_submit_urb failed: %d\n", ret); 408 lbtf_deb_usbd(&cardp->udev->dev,
409 "usb_submit_urb failed: %d\n", ret);
404 goto tx_ret; 410 goto tx_ret;
405 } 411 }
406 412
@@ -438,10 +444,12 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
438 444
439 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; 445 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
440 446
441 lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb); 447 lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n",
448 cardp->rx_urb);
442 ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC); 449 ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
443 if (ret) { 450 if (ret) {
444 lbtf_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret); 451 lbtf_deb_usbd(&cardp->udev->dev,
452 "Submit Rx URB failed: %d\n", ret);
445 kfree_skb(skb); 453 kfree_skb(skb);
446 cardp->rx_skb = NULL; 454 cardp->rx_skb = NULL;
447 lbtf_deb_leave(LBTF_DEB_USB); 455 lbtf_deb_leave(LBTF_DEB_USB);
@@ -522,14 +530,14 @@ static void if_usb_receive_fwload(struct urb *urb)
522 } 530 }
523 } else if (bcmdresp.cmd != BOOT_CMD_FW_BY_USB) { 531 } else if (bcmdresp.cmd != BOOT_CMD_FW_BY_USB) {
524 pr_info("boot cmd response cmd_tag error (%d)\n", 532 pr_info("boot cmd response cmd_tag error (%d)\n",
525 bcmdresp.cmd); 533 bcmdresp.cmd);
526 } else if (bcmdresp.result != BOOT_CMD_RESP_OK) { 534 } else if (bcmdresp.result != BOOT_CMD_RESP_OK) {
527 pr_info("boot cmd response result error (%d)\n", 535 pr_info("boot cmd response result error (%d)\n",
528 bcmdresp.result); 536 bcmdresp.result);
529 } else { 537 } else {
530 cardp->bootcmdresp = 1; 538 cardp->bootcmdresp = 1;
531 lbtf_deb_usbd(&cardp->udev->dev, 539 lbtf_deb_usbd(&cardp->udev->dev,
532 "Received valid boot command response\n"); 540 "Received valid boot command response\n");
533 } 541 }
534 542
535 kfree_skb(skb); 543 kfree_skb(skb);
@@ -541,19 +549,23 @@ static void if_usb_receive_fwload(struct urb *urb)
541 syncfwheader = kmemdup(skb->data, sizeof(struct fwsyncheader), 549 syncfwheader = kmemdup(skb->data, sizeof(struct fwsyncheader),
542 GFP_ATOMIC); 550 GFP_ATOMIC);
543 if (!syncfwheader) { 551 if (!syncfwheader) {
544 lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n"); 552 lbtf_deb_usbd(&cardp->udev->dev,
553 "Failure to allocate syncfwheader\n");
545 kfree_skb(skb); 554 kfree_skb(skb);
546 lbtf_deb_leave(LBTF_DEB_USB); 555 lbtf_deb_leave(LBTF_DEB_USB);
547 return; 556 return;
548 } 557 }
549 558
550 if (!syncfwheader->cmd) { 559 if (!syncfwheader->cmd) {
551 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n"); 560 lbtf_deb_usb2(&cardp->udev->dev,
552 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n", 561 "FW received Blk with correct CRC\n");
553 le32_to_cpu(syncfwheader->seqnum)); 562 lbtf_deb_usb2(&cardp->udev->dev,
563 "FW received Blk seqnum = %d\n",
564 le32_to_cpu(syncfwheader->seqnum));
554 cardp->CRC_OK = 1; 565 cardp->CRC_OK = 1;
555 } else { 566 } else {
556 lbtf_deb_usbd(&cardp->udev->dev, "FW received Blk with CRC error\n"); 567 lbtf_deb_usbd(&cardp->udev->dev,
568 "FW received Blk with CRC error\n");
557 cardp->CRC_OK = 0; 569 cardp->CRC_OK = 0;
558 } 570 }
559 571
@@ -666,7 +678,8 @@ static void if_usb_receive(struct urb *urb)
666 { 678 {
667 /* Event cause handling */ 679 /* Event cause handling */
668 u32 event_cause = le32_to_cpu(pkt[1]); 680 u32 event_cause = le32_to_cpu(pkt[1]);
669 lbtf_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n", event_cause); 681 lbtf_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n",
682 event_cause);
670 683
671 /* Icky undocumented magic special case */ 684 /* Icky undocumented magic special case */
672 if (event_cause & 0xffff0000) { 685 if (event_cause & 0xffff0000) {
@@ -689,7 +702,7 @@ static void if_usb_receive(struct urb *urb)
689 } 702 }
690 default: 703 default:
691 lbtf_deb_usbd(&cardp->udev->dev, 704 lbtf_deb_usbd(&cardp->udev->dev,
692 "libertastf: unknown command type 0x%X\n", recvtype); 705 "libertastf: unknown command type 0x%X\n", recvtype);
693 kfree_skb(skb); 706 kfree_skb(skb);
694 break; 707 break;
695 } 708 }
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 86fa8abdd66f..7eaaa3bab547 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -9,7 +9,8 @@
9 9
10/* 10/*
11 * TODO: 11 * TODO:
12 * - IBSS mode simulation (Beacon transmission with competition for "air time") 12 * - Add TSF sync and fix IBSS beacon transmission by adding
13 * competition for "air time" at TBTT
13 * - RX filtering based on filter configuration (data->rx_filter) 14 * - RX filtering based on filter configuration (data->rx_filter)
14 */ 15 */
15 16
@@ -594,17 +595,34 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
594 struct ieee80211_vif *vif) 595 struct ieee80211_vif *vif)
595{ 596{
596 wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", 597 wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
597 __func__, vif->type, vif->addr); 598 __func__, ieee80211_vif_type_p2p(vif),
599 vif->addr);
598 hwsim_set_magic(vif); 600 hwsim_set_magic(vif);
599 return 0; 601 return 0;
600} 602}
601 603
602 604
605static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
606 struct ieee80211_vif *vif,
607 enum nl80211_iftype newtype,
608 bool newp2p)
609{
610 newtype = ieee80211_iftype_p2p(newtype, newp2p);
611 wiphy_debug(hw->wiphy,
612 "%s (old type=%d, new type=%d, mac_addr=%pM)\n",
613 __func__, ieee80211_vif_type_p2p(vif),
614 newtype, vif->addr);
615 hwsim_check_magic(vif);
616
617 return 0;
618}
619
603static void mac80211_hwsim_remove_interface( 620static void mac80211_hwsim_remove_interface(
604 struct ieee80211_hw *hw, struct ieee80211_vif *vif) 621 struct ieee80211_hw *hw, struct ieee80211_vif *vif)
605{ 622{
606 wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", 623 wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
607 __func__, vif->type, vif->addr); 624 __func__, ieee80211_vif_type_p2p(vif),
625 vif->addr);
608 hwsim_check_magic(vif); 626 hwsim_check_magic(vif);
609 hwsim_clear_magic(vif); 627 hwsim_clear_magic(vif);
610} 628}
@@ -620,7 +638,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
620 hwsim_check_magic(vif); 638 hwsim_check_magic(vif);
621 639
622 if (vif->type != NL80211_IFTYPE_AP && 640 if (vif->type != NL80211_IFTYPE_AP &&
623 vif->type != NL80211_IFTYPE_MESH_POINT) 641 vif->type != NL80211_IFTYPE_MESH_POINT &&
642 vif->type != NL80211_IFTYPE_ADHOC)
624 return; 643 return;
625 644
626 skb = ieee80211_beacon_get(hw, vif); 645 skb = ieee80211_beacon_get(hw, vif);
@@ -1025,6 +1044,7 @@ static struct ieee80211_ops mac80211_hwsim_ops =
1025 .start = mac80211_hwsim_start, 1044 .start = mac80211_hwsim_start,
1026 .stop = mac80211_hwsim_stop, 1045 .stop = mac80211_hwsim_stop,
1027 .add_interface = mac80211_hwsim_add_interface, 1046 .add_interface = mac80211_hwsim_add_interface,
1047 .change_interface = mac80211_hwsim_change_interface,
1028 .remove_interface = mac80211_hwsim_remove_interface, 1048 .remove_interface = mac80211_hwsim_remove_interface,
1029 .config = mac80211_hwsim_config, 1049 .config = mac80211_hwsim_config,
1030 .configure_filter = mac80211_hwsim_configure_filter, 1050 .configure_filter = mac80211_hwsim_configure_filter,
@@ -1295,6 +1315,9 @@ static int __init init_mac80211_hwsim(void)
1295 hw->wiphy->interface_modes = 1315 hw->wiphy->interface_modes =
1296 BIT(NL80211_IFTYPE_STATION) | 1316 BIT(NL80211_IFTYPE_STATION) |
1297 BIT(NL80211_IFTYPE_AP) | 1317 BIT(NL80211_IFTYPE_AP) |
1318 BIT(NL80211_IFTYPE_P2P_CLIENT) |
1319 BIT(NL80211_IFTYPE_P2P_GO) |
1320 BIT(NL80211_IFTYPE_ADHOC) |
1298 BIT(NL80211_IFTYPE_MESH_POINT); 1321 BIT(NL80211_IFTYPE_MESH_POINT);
1299 1322
1300 hw->flags = IEEE80211_HW_MFP_CAPABLE | 1323 hw->flags = IEEE80211_HW_MFP_CAPABLE |
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 077baa86756b..b4772c1c6135 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -762,14 +762,17 @@ int orinoco_hw_get_act_bitrate(struct orinoco_private *priv, int *bitrate)
762 case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */ 762 case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */
763 case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */ 763 case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */
764 for (i = 0; i < BITRATE_TABLE_SIZE; i++) 764 for (i = 0; i < BITRATE_TABLE_SIZE; i++)
765 if (bitrate_table[i].intersil_txratectrl == val) 765 if (bitrate_table[i].intersil_txratectrl == val) {
766 *bitrate = bitrate_table[i].bitrate * 100000;
766 break; 767 break;
768 }
767 769
768 if (i >= BITRATE_TABLE_SIZE) 770 if (i >= BITRATE_TABLE_SIZE) {
769 printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n", 771 printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n",
770 priv->ndev->name, val); 772 priv->ndev->name, val);
773 err = -EIO;
774 }
771 775
772 *bitrate = bitrate_table[i].bitrate * 100000;
773 break; 776 break;
774 default: 777 default:
775 BUG(); 778 BUG();
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index cf7be1eb6124..93505f93bf97 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -589,8 +589,15 @@ static int orinoco_ioctl_getrate(struct net_device *dev,
589 589
590 /* If the interface is running we try to find more about the 590 /* If the interface is running we try to find more about the
591 current mode */ 591 current mode */
592 if (netif_running(dev)) 592 if (netif_running(dev)) {
593 err = orinoco_hw_get_act_bitrate(priv, &bitrate); 593 int act_bitrate;
594 int lerr;
595
596 /* Ignore errors if we can't get the actual bitrate */
597 lerr = orinoco_hw_get_act_bitrate(priv, &act_bitrate);
598 if (!lerr)
599 bitrate = act_bitrate;
600 }
594 601
595 orinoco_unlock(priv, &flags); 602 orinoco_unlock(priv, &flags);
596 603
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index b0342a520bf1..e5f45cb2a7a2 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -2,6 +2,7 @@ config P54_COMMON
2 tristate "Softmac Prism54 support" 2 tristate "Softmac Prism54 support"
3 depends on MAC80211 && EXPERIMENTAL 3 depends on MAC80211 && EXPERIMENTAL
4 select FW_LOADER 4 select FW_LOADER
5 select CRC_CCITT
5 ---help--- 6 ---help---
6 This is common code for isl38xx/stlc45xx based modules. 7 This is common code for isl38xx/stlc45xx based modules.
7 This module does nothing by itself - the USB/PCI/SPI front-ends 8 This module does nothing by itself - the USB/PCI/SPI front-ends
@@ -48,6 +49,23 @@ config P54_SPI
48 49
49 If you choose to build a module, it'll be called p54spi. 50 If you choose to build a module, it'll be called p54spi.
50 51
52config P54_SPI_DEFAULT_EEPROM
53 bool "Include fallback EEPROM blob"
54 depends on P54_SPI
55 default n
56 ---help---
57 Unlike the PCI or USB devices, the SPI variants don't have
58 a dedicated EEPROM chip to store all device specific values
59 for calibration, country and interface settings.
60
61 The driver will try to load the image "3826.eeprom", if the
62 file is put at the right place. (usually /lib/firmware.)
63
64 Only if this request fails, this option will provide a
65 backup set of generic values to get the device working.
66
67 Enabling this option adds about 4k to p54spi.
68
51config P54_LEDS 69config P54_LEDS
52 bool 70 bool
53 depends on P54_COMMON && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = P54_COMMON) 71 depends on P54_COMMON && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = P54_COMMON)
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 78347041ec40..8c05266d37f4 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -23,6 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24 24
25#include <net/mac80211.h> 25#include <net/mac80211.h>
26#include <linux/crc-ccitt.h>
26 27
27#include "p54.h" 28#include "p54.h"
28#include "eeprom.h" 29#include "eeprom.h"
@@ -540,6 +541,7 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
540 int err; 541 int err;
541 u8 *end = (u8 *)eeprom + len; 542 u8 *end = (u8 *)eeprom + len;
542 u16 synth = 0; 543 u16 synth = 0;
544 u16 crc16 = ~0;
543 545
544 wrap = (struct eeprom_pda_wrap *) eeprom; 546 wrap = (struct eeprom_pda_wrap *) eeprom;
545 entry = (void *)wrap->data + le16_to_cpu(wrap->len); 547 entry = (void *)wrap->data + le16_to_cpu(wrap->len);
@@ -655,16 +657,29 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
655 } 657 }
656 break; 658 break;
657 case PDR_END: 659 case PDR_END:
658 /* make it overrun */ 660 crc16 = ~crc_ccitt(crc16, (u8 *) entry, sizeof(*entry));
659 entry_len = len; 661 if (crc16 != le16_to_cpup((__le16 *)entry->data)) {
662 wiphy_err(dev->wiphy, "eeprom failed checksum "
663 "test!\n");
664 err = -ENOMSG;
665 goto err;
666 } else {
667 goto good_eeprom;
668 }
660 break; 669 break;
661 default: 670 default:
662 break; 671 break;
663 } 672 }
664 673
665 entry = (void *)entry + (entry_len + 1)*2; 674 crc16 = crc_ccitt(crc16, (u8 *)entry, (entry_len + 1) * 2);
675 entry = (void *)entry + (entry_len + 1) * 2;
666 } 676 }
667 677
678 wiphy_err(dev->wiphy, "unexpected end of eeprom data.\n");
679 err = -ENODATA;
680 goto err;
681
682good_eeprom:
668 if (!synth || !priv->iq_autocal || !priv->output_limit || 683 if (!synth || !priv->iq_autocal || !priv->output_limit ||
669 !priv->curve_data) { 684 !priv->curve_data) {
670 wiphy_err(dev->wiphy, 685 wiphy_err(dev->wiphy,
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 15b20c29a604..92b9b1f05fd5 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -123,10 +123,14 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
123 bootrec = (struct bootrec *)&bootrec->data[len]; 123 bootrec = (struct bootrec *)&bootrec->data[len];
124 } 124 }
125 125
126 if (fw_version) 126 if (fw_version) {
127 wiphy_info(priv->hw->wiphy, 127 wiphy_info(priv->hw->wiphy,
128 "FW rev %s - Softmac protocol %x.%x\n", 128 "FW rev %s - Softmac protocol %x.%x\n",
129 fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); 129 fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
130 snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version),
131 "%s - %x.%x", fw_version,
132 priv->fw_var >> 8, priv->fw_var & 0xff);
133 }
130 134
131 if (priv->fw_var < 0x500) 135 if (priv->fw_var < 0x500)
132 wiphy_info(priv->hw->wiphy, 136 wiphy_info(priv->hw->wiphy,
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 47db439b63bf..622d27b6d8f2 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -429,8 +429,8 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
429 429
430 mutex_lock(&priv->conf_mutex); 430 mutex_lock(&priv->conf_mutex);
431 if (cmd == SET_KEY) { 431 if (cmd == SET_KEY) {
432 switch (key->alg) { 432 switch (key->cipher) {
433 case ALG_TKIP: 433 case WLAN_CIPHER_SUITE_TKIP:
434 if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL | 434 if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL |
435 BR_DESC_PRIV_CAP_TKIP))) { 435 BR_DESC_PRIV_CAP_TKIP))) {
436 ret = -EOPNOTSUPP; 436 ret = -EOPNOTSUPP;
@@ -439,7 +439,8 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
439 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 439 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
440 algo = P54_CRYPTO_TKIPMICHAEL; 440 algo = P54_CRYPTO_TKIPMICHAEL;
441 break; 441 break;
442 case ALG_WEP: 442 case WLAN_CIPHER_SUITE_WEP40:
443 case WLAN_CIPHER_SUITE_WEP104:
443 if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) { 444 if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) {
444 ret = -EOPNOTSUPP; 445 ret = -EOPNOTSUPP;
445 goto out_unlock; 446 goto out_unlock;
@@ -447,7 +448,7 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
447 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 448 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
448 algo = P54_CRYPTO_WEP; 449 algo = P54_CRYPTO_WEP;
449 break; 450 break;
450 case ALG_CCMP: 451 case WLAN_CIPHER_SUITE_CCMP:
451 if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) { 452 if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) {
452 ret = -EOPNOTSUPP; 453 ret = -EOPNOTSUPP;
453 goto out_unlock; 454 goto out_unlock;
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 087bf0698a5a..18d24b7b1e34 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -32,11 +32,14 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33 33
34#include "p54spi.h" 34#include "p54spi.h"
35#include "p54spi_eeprom.h"
36#include "p54.h" 35#include "p54.h"
37 36
38#include "lmac.h" 37#include "lmac.h"
39 38
39#ifdef CONFIG_P54_SPI_DEFAULT_EEPROM
40#include "p54spi_eeprom.h"
41#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
42
40MODULE_FIRMWARE("3826.arm"); 43MODULE_FIRMWARE("3826.arm");
41MODULE_ALIAS("stlc45xx"); 44MODULE_ALIAS("stlc45xx");
42 45
@@ -195,9 +198,13 @@ static int p54spi_request_eeprom(struct ieee80211_hw *dev)
195 198
196 ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev); 199 ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev);
197 if (ret < 0) { 200 if (ret < 0) {
201#ifdef CONFIG_P54_SPI_DEFAULT_EEPROM
198 dev_info(&priv->spi->dev, "loading default eeprom...\n"); 202 dev_info(&priv->spi->dev, "loading default eeprom...\n");
199 ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom, 203 ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom,
200 sizeof(p54spi_eeprom)); 204 sizeof(p54spi_eeprom));
205#else
206 dev_err(&priv->spi->dev, "Failed to request user eeprom\n");
207#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
201 } else { 208 } else {
202 dev_info(&priv->spi->dev, "loading user eeprom...\n"); 209 dev_info(&priv->spi->dev, "loading user eeprom...\n");
203 ret = p54_parse_eeprom(dev, (void *) eeprom->data, 210 ret = p54_parse_eeprom(dev, (void *) eeprom->data,
diff --git a/drivers/net/wireless/p54/p54spi_eeprom.h b/drivers/net/wireless/p54/p54spi_eeprom.h
index 1ea1050911d9..d592cbd34d78 100644
--- a/drivers/net/wireless/p54/p54spi_eeprom.h
+++ b/drivers/net/wireless/p54/p54spi_eeprom.h
@@ -671,7 +671,7 @@ static unsigned char p54spi_eeprom[] = {
671 0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01, 671 0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01,
672 672
6730x02, 0x00, 0x00, 0x00, /* PDR_END */ 6730x02, 0x00, 0x00, 0x00, /* PDR_END */
674 0xa8, 0xf5 /* bogus data */ 674 0x67, 0x99,
675}; 675};
676 676
677#endif /* P54SPI_EEPROM_H */ 677#endif /* P54SPI_EEPROM_H */
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index ad595958b7df..063248b35069 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -930,8 +930,8 @@ static int __devinit p54u_probe(struct usb_interface *intf,
930#ifdef CONFIG_PM 930#ifdef CONFIG_PM
931 /* ISL3887 needs a full reset on resume */ 931 /* ISL3887 needs a full reset on resume */
932 udev->reset_resume = 1; 932 udev->reset_resume = 1;
933#endif /* CONFIG_PM */
933 err = p54u_device_reset(dev); 934 err = p54u_device_reset(dev);
934#endif
935 935
936 priv->hw_type = P54U_3887; 936 priv->hw_type = P54U_3887;
937 dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr); 937 dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 0e937dc0c9c4..76b2318a7dc7 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -275,15 +275,15 @@ static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
275{ 275{
276 int band = priv->hw->conf.channel->band; 276 int band = priv->hw->conf.channel->band;
277 277
278 if (priv->rxhw != 5) 278 if (priv->rxhw != 5) {
279 return ((rssi * priv->rssical_db[band].mul) / 64 + 279 return ((rssi * priv->rssical_db[band].mul) / 64 +
280 priv->rssical_db[band].add) / 4; 280 priv->rssical_db[band].add) / 4;
281 else 281 } else {
282 /* 282 /*
283 * TODO: find the correct formula 283 * TODO: find the correct formula
284 */ 284 */
285 return ((rssi * priv->rssical_db[band].mul) / 64 + 285 return rssi / 2 - 110;
286 priv->rssical_db[band].add) / 4; 286 }
287} 287}
288 288
289/* 289/*
@@ -683,14 +683,15 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
683 } 683 }
684} 684}
685 685
686static u8 p54_convert_algo(enum ieee80211_key_alg alg) 686static u8 p54_convert_algo(u32 cipher)
687{ 687{
688 switch (alg) { 688 switch (cipher) {
689 case ALG_WEP: 689 case WLAN_CIPHER_SUITE_WEP40:
690 case WLAN_CIPHER_SUITE_WEP104:
690 return P54_CRYPTO_WEP; 691 return P54_CRYPTO_WEP;
691 case ALG_TKIP: 692 case WLAN_CIPHER_SUITE_TKIP:
692 return P54_CRYPTO_TKIPMICHAEL; 693 return P54_CRYPTO_TKIPMICHAEL;
693 case ALG_CCMP: 694 case WLAN_CIPHER_SUITE_CCMP:
694 return P54_CRYPTO_AESCCMP; 695 return P54_CRYPTO_AESCCMP;
695 default: 696 default:
696 return 0; 697 return 0;
@@ -731,7 +732,7 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
731 732
732 if (info->control.hw_key) { 733 if (info->control.hw_key) {
733 crypt_offset = ieee80211_get_hdrlen_from_skb(skb); 734 crypt_offset = ieee80211_get_hdrlen_from_skb(skb);
734 if (info->control.hw_key->alg == ALG_TKIP) { 735 if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
735 u8 *iv = (u8 *)(skb->data + crypt_offset); 736 u8 *iv = (u8 *)(skb->data + crypt_offset);
736 /* 737 /*
737 * The firmware excepts that the IV has to have 738 * The firmware excepts that the IV has to have
@@ -827,10 +828,10 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
827 hdr->tries = ridx; 828 hdr->tries = ridx;
828 txhdr->rts_rate_idx = 0; 829 txhdr->rts_rate_idx = 0;
829 if (info->control.hw_key) { 830 if (info->control.hw_key) {
830 txhdr->key_type = p54_convert_algo(info->control.hw_key->alg); 831 txhdr->key_type = p54_convert_algo(info->control.hw_key->cipher);
831 txhdr->key_len = min((u8)16, info->control.hw_key->keylen); 832 txhdr->key_len = min((u8)16, info->control.hw_key->keylen);
832 memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len); 833 memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len);
833 if (info->control.hw_key->alg == ALG_TKIP) { 834 if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
834 /* reserve space for the MIC key */ 835 /* reserve space for the MIC key */
835 len += 8; 836 len += 8;
836 memcpy(skb_put(skb, 8), &(info->control.hw_key->key 837 memcpy(skb_put(skb, 8), &(info->control.hw_key->key
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 77cd65db8500..d97a2caf582b 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -3234,7 +3234,7 @@ prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
3234 switch (cmd) { 3234 switch (cmd) {
3235 case PRISM54_HOSTAPD: 3235 case PRISM54_HOSTAPD:
3236 if (!capable(CAP_NET_ADMIN)) 3236 if (!capable(CAP_NET_ADMIN))
3237 return -EPERM; 3237 return -EPERM;
3238 ret = prism54_hostapd(ndev, &wrq->u.data); 3238 ret = prism54_hostapd(ndev, &wrq->u.data);
3239 return ret; 3239 return ret;
3240 } 3240 }
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88560d0ae50a..5ca624a64c42 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -43,7 +43,6 @@
43#include <linux/if_arp.h> 43#include <linux/if_arp.h>
44#include <linux/ioport.h> 44#include <linux/ioport.h>
45#include <linux/skbuff.h> 45#include <linux/skbuff.h>
46#include <linux/ethtool.h>
47#include <linux/ieee80211.h> 46#include <linux/ieee80211.h>
48 47
49#include <pcmcia/cs.h> 48#include <pcmcia/cs.h>
@@ -80,8 +79,6 @@ static int ray_dev_config(struct net_device *dev, struct ifmap *map);
80static struct net_device_stats *ray_get_stats(struct net_device *dev); 79static struct net_device_stats *ray_get_stats(struct net_device *dev);
81static int ray_dev_init(struct net_device *dev); 80static int ray_dev_init(struct net_device *dev);
82 81
83static const struct ethtool_ops netdev_ethtool_ops;
84
85static int ray_open(struct net_device *dev); 82static int ray_open(struct net_device *dev);
86static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb, 83static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
87 struct net_device *dev); 84 struct net_device *dev);
@@ -197,7 +194,7 @@ module_param(bc, int, 0);
197module_param(phy_addr, charp, 0); 194module_param(phy_addr, charp, 0);
198module_param(ray_mem_speed, int, 0); 195module_param(ray_mem_speed, int, 0);
199 196
200static UCHAR b5_default_startup_parms[] = { 197static const UCHAR b5_default_startup_parms[] = {
201 0, 0, /* Adhoc station */ 198 0, 0, /* Adhoc station */
202 'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */ 199 'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */
203 0, 0, 0, 0, 0, 0, 0, 0, 200 0, 0, 0, 0, 0, 0, 0, 0,
@@ -232,7 +229,7 @@ static UCHAR b5_default_startup_parms[] = {
232 2, 0, 0, 0, 0, 0, 0, 0 /* basic rate set */ 229 2, 0, 0, 0, 0, 0, 0, 0 /* basic rate set */
233}; 230};
234 231
235static UCHAR b4_default_startup_parms[] = { 232static const UCHAR b4_default_startup_parms[] = {
236 0, 0, /* Adhoc station */ 233 0, 0, /* Adhoc station */
237 'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */ 234 'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */
238 0, 0, 0, 0, 0, 0, 0, 0, 235 0, 0, 0, 0, 0, 0, 0, 0,
@@ -264,9 +261,9 @@ static UCHAR b4_default_startup_parms[] = {
264}; 261};
265 262
266/*===========================================================================*/ 263/*===========================================================================*/
267static unsigned char eth2_llc[] = { 0xaa, 0xaa, 3, 0, 0, 0 }; 264static const u8 eth2_llc[] = { 0xaa, 0xaa, 3, 0, 0, 0 };
268 265
269static char hop_pattern_length[] = { 1, 266static const char hop_pattern_length[] = { 1,
270 USA_HOP_MOD, EUROPE_HOP_MOD, 267 USA_HOP_MOD, EUROPE_HOP_MOD,
271 JAPAN_HOP_MOD, KOREA_HOP_MOD, 268 JAPAN_HOP_MOD, KOREA_HOP_MOD,
272 SPAIN_HOP_MOD, FRANCE_HOP_MOD, 269 SPAIN_HOP_MOD, FRANCE_HOP_MOD,
@@ -274,7 +271,7 @@ static char hop_pattern_length[] = { 1,
274 JAPAN_TEST_HOP_MOD 271 JAPAN_TEST_HOP_MOD
275}; 272};
276 273
277static char rcsid[] = 274static const char rcsid[] =
278 "Raylink/WebGear wireless LAN - Corey <Thomas corey@world.std.com>"; 275 "Raylink/WebGear wireless LAN - Corey <Thomas corey@world.std.com>";
279 276
280static const struct net_device_ops ray_netdev_ops = { 277static const struct net_device_ops ray_netdev_ops = {
@@ -333,7 +330,6 @@ static int ray_probe(struct pcmcia_device *p_dev)
333 330
334 /* Raylink entries in the device structure */ 331 /* Raylink entries in the device structure */
335 dev->netdev_ops = &ray_netdev_ops; 332 dev->netdev_ops = &ray_netdev_ops;
336 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
337 dev->wireless_handlers = &ray_handler_def; 333 dev->wireless_handlers = &ray_handler_def;
338#ifdef WIRELESS_SPY 334#ifdef WIRELESS_SPY
339 local->wireless_data.spy_data = &local->spy_data; 335 local->wireless_data.spy_data = &local->spy_data;
@@ -608,7 +604,7 @@ static int dl_startup_params(struct net_device *dev)
608 /* Start kernel timer to wait for dl startup to complete. */ 604 /* Start kernel timer to wait for dl startup to complete. */
609 local->timer.expires = jiffies + HZ / 2; 605 local->timer.expires = jiffies + HZ / 2;
610 local->timer.data = (long)local; 606 local->timer.data = (long)local;
611 local->timer.function = &verify_dl_startup; 607 local->timer.function = verify_dl_startup;
612 add_timer(&local->timer); 608 add_timer(&local->timer);
613 dev_dbg(&link->dev, 609 dev_dbg(&link->dev,
614 "ray_cs dl_startup_params started timer for verify_dl_startup\n"); 610 "ray_cs dl_startup_params started timer for verify_dl_startup\n");
@@ -1062,18 +1058,6 @@ AP to AP 1 1 dest AP src AP dest source
1062 } 1058 }
1063} /* end encapsulate_frame */ 1059} /* end encapsulate_frame */
1064 1060
1065/*===========================================================================*/
1066
1067static void netdev_get_drvinfo(struct net_device *dev,
1068 struct ethtool_drvinfo *info)
1069{
1070 strcpy(info->driver, "ray_cs");
1071}
1072
1073static const struct ethtool_ops netdev_ethtool_ops = {
1074 .get_drvinfo = netdev_get_drvinfo,
1075};
1076
1077/*====================================================================*/ 1061/*====================================================================*/
1078 1062
1079/*------------------------------------------------------------------*/ 1063/*------------------------------------------------------------------*/
@@ -1997,12 +1981,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
1997 dev_dbg(&link->dev, 1981 dev_dbg(&link->dev,
1998 "ray_cs interrupt network \"%s\" start failed\n", 1982 "ray_cs interrupt network \"%s\" start failed\n",
1999 local->sparm.b4.a_current_ess_id); 1983 local->sparm.b4.a_current_ess_id);
2000 local->timer.function = &start_net; 1984 local->timer.function = start_net;
2001 } else { 1985 } else {
2002 dev_dbg(&link->dev, 1986 dev_dbg(&link->dev,
2003 "ray_cs interrupt network \"%s\" join failed\n", 1987 "ray_cs interrupt network \"%s\" join failed\n",
2004 local->sparm.b4.a_current_ess_id); 1988 local->sparm.b4.a_current_ess_id);
2005 local->timer.function = &join_net; 1989 local->timer.function = join_net;
2006 } 1990 }
2007 add_timer(&local->timer); 1991 add_timer(&local->timer);
2008 } 1992 }
@@ -2470,9 +2454,9 @@ static void authenticate(ray_dev_t *local)
2470 2454
2471 del_timer(&local->timer); 2455 del_timer(&local->timer);
2472 if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) { 2456 if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
2473 local->timer.function = &join_net; 2457 local->timer.function = join_net;
2474 } else { 2458 } else {
2475 local->timer.function = &authenticate_timeout; 2459 local->timer.function = authenticate_timeout;
2476 } 2460 }
2477 local->timer.expires = jiffies + HZ * 2; 2461 local->timer.expires = jiffies + HZ * 2;
2478 local->timer.data = (long)local; 2462 local->timer.data = (long)local;
@@ -2557,7 +2541,7 @@ static void associate(ray_dev_t *local)
2557 del_timer(&local->timer); 2541 del_timer(&local->timer);
2558 local->timer.expires = jiffies + HZ * 2; 2542 local->timer.expires = jiffies + HZ * 2;
2559 local->timer.data = (long)local; 2543 local->timer.data = (long)local;
2560 local->timer.function = &join_net; 2544 local->timer.function = join_net;
2561 add_timer(&local->timer); 2545 add_timer(&local->timer);
2562 local->card_status = CARD_ASSOC_FAILED; 2546 local->card_status = CARD_ASSOC_FAILED;
2563 return; 2547 return;
@@ -2591,7 +2575,7 @@ static void clear_interrupt(ray_dev_t *local)
2591#ifdef CONFIG_PROC_FS 2575#ifdef CONFIG_PROC_FS
2592#define MAXDATA (PAGE_SIZE - 80) 2576#define MAXDATA (PAGE_SIZE - 80)
2593 2577
2594static char *card_status[] = { 2578static const char *card_status[] = {
2595 "Card inserted - uninitialized", /* 0 */ 2579 "Card inserted - uninitialized", /* 0 */
2596 "Card not downloaded", /* 1 */ 2580 "Card not downloaded", /* 1 */
2597 "Waiting for download parameters", /* 2 */ 2581 "Waiting for download parameters", /* 2 */
@@ -2608,8 +2592,8 @@ static char *card_status[] = {
2608 "Association failed" /* 16 */ 2592 "Association failed" /* 16 */
2609}; 2593};
2610 2594
2611static char *nettype[] = { "Adhoc", "Infra " }; 2595static const char *nettype[] = { "Adhoc", "Infra " };
2612static char *framing[] = { "Encapsulation", "Translation" } 2596static const char *framing[] = { "Encapsulation", "Translation" }
2613 2597
2614; 2598;
2615/*===========================================================================*/ 2599/*===========================================================================*/
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 5063e01410e5..d49e830fa1da 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -321,7 +321,8 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
321} 321}
322 322
323static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev, 323static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
324 struct rt2x00lib_erp *erp) 324 struct rt2x00lib_erp *erp,
325 u32 changed)
325{ 326{
326 int preamble_mask; 327 int preamble_mask;
327 u32 reg; 328 u32 reg;
@@ -329,59 +330,72 @@ static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
329 /* 330 /*
330 * When short preamble is enabled, we should set bit 0x08 331 * When short preamble is enabled, we should set bit 0x08
331 */ 332 */
332 preamble_mask = erp->short_preamble << 3; 333 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
333 334 preamble_mask = erp->short_preamble << 3;
334 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg); 335
335 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x1ff); 336 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
336 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0x13a); 337 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x1ff);
337 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); 338 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0x13a);
338 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1); 339 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
339 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); 340 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
340 341 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
341 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 342
342 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00); 343 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
343 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); 344 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00);
344 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 10)); 345 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04);
345 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); 346 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
346 347 GET_DURATION(ACK_SIZE, 10));
347 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg); 348 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg);
348 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask); 349
349 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04); 350 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg);
350 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 20)); 351 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask);
351 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); 352 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04);
352 353 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
353 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg); 354 GET_DURATION(ACK_SIZE, 20));
354 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask); 355 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg);
355 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04); 356
356 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 55)); 357 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg);
357 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); 358 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask);
358 359 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04);
359 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg); 360 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
360 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask); 361 GET_DURATION(ACK_SIZE, 55));
361 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84); 362 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg);
362 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 110)); 363
363 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); 364 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg);
364 365 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask);
365 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates); 366 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84);
367 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
368 GET_DURATION(ACK_SIZE, 110));
369 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg);
370 }
366 371
367 rt2x00pci_register_read(rt2x00dev, CSR11, &reg); 372 if (changed & BSS_CHANGED_BASIC_RATES)
368 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time); 373 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates);
369 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
370 374
371 rt2x00pci_register_read(rt2x00dev, CSR12, &reg); 375 if (changed & BSS_CHANGED_ERP_SLOT) {
372 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL, erp->beacon_int * 16); 376 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
373 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION, erp->beacon_int * 16); 377 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time);
374 rt2x00pci_register_write(rt2x00dev, CSR12, reg); 378 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
375 379
376 rt2x00pci_register_read(rt2x00dev, CSR18, &reg); 380 rt2x00pci_register_read(rt2x00dev, CSR18, &reg);
377 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs); 381 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs);
378 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs); 382 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs);
379 rt2x00pci_register_write(rt2x00dev, CSR18, reg); 383 rt2x00pci_register_write(rt2x00dev, CSR18, reg);
384
385 rt2x00pci_register_read(rt2x00dev, CSR19, &reg);
386 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs);
387 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs);
388 rt2x00pci_register_write(rt2x00dev, CSR19, reg);
389 }
380 390
381 rt2x00pci_register_read(rt2x00dev, CSR19, &reg); 391 if (changed & BSS_CHANGED_BEACON_INT) {
382 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs); 392 rt2x00pci_register_read(rt2x00dev, CSR12, &reg);
383 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs); 393 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL,
384 rt2x00pci_register_write(rt2x00dev, CSR19, reg); 394 erp->beacon_int * 16);
395 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION,
396 erp->beacon_int * 16);
397 rt2x00pci_register_write(rt2x00dev, CSR12, reg);
398 }
385} 399}
386 400
387static void rt2400pci_config_ant(struct rt2x00_dev *rt2x00dev, 401static void rt2400pci_config_ant(struct rt2x00_dev *rt2x00dev,
@@ -1007,12 +1021,11 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1007/* 1021/*
1008 * TX descriptor initialization 1022 * TX descriptor initialization
1009 */ 1023 */
1010static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1024static void rt2400pci_write_tx_desc(struct queue_entry *entry,
1011 struct sk_buff *skb,
1012 struct txentry_desc *txdesc) 1025 struct txentry_desc *txdesc)
1013{ 1026{
1014 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1027 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1015 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; 1028 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1016 __le32 *txd = entry_priv->desc; 1029 __le32 *txd = entry_priv->desc;
1017 u32 word; 1030 u32 word;
1018 1031
@@ -1096,7 +1109,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
1096 /* 1109 /*
1097 * Write the TX descriptor for the beacon. 1110 * Write the TX descriptor for the beacon.
1098 */ 1111 */
1099 rt2400pci_write_tx_desc(rt2x00dev, entry->skb, txdesc); 1112 rt2400pci_write_tx_desc(entry, txdesc);
1100 1113
1101 /* 1114 /*
1102 * Dump beacon to userspace through debugfs. 1115 * Dump beacon to userspace through debugfs.
@@ -1112,24 +1125,24 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
1112 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1125 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1113} 1126}
1114 1127
1115static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1128static void rt2400pci_kick_tx_queue(struct data_queue *queue)
1116 const enum data_queue_qid queue)
1117{ 1129{
1130 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1118 u32 reg; 1131 u32 reg;
1119 1132
1120 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1133 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1121 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE)); 1134 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue->qid == QID_AC_BE));
1122 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK)); 1135 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue->qid == QID_AC_BK));
1123 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue == QID_ATIM)); 1136 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue->qid == QID_ATIM));
1124 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1137 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1125} 1138}
1126 1139
1127static void rt2400pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 1140static void rt2400pci_kill_tx_queue(struct data_queue *queue)
1128 const enum data_queue_qid qid)
1129{ 1141{
1142 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1130 u32 reg; 1143 u32 reg;
1131 1144
1132 if (qid == QID_BEACON) { 1145 if (queue->qid == QID_BEACON) {
1133 rt2x00pci_register_write(rt2x00dev, CSR14, 0); 1146 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
1134 } else { 1147 } else {
1135 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1148 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
@@ -1481,15 +1494,17 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1481 /* 1494 /*
1482 * Create channel information array 1495 * Create channel information array
1483 */ 1496 */
1484 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 1497 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
1485 if (!info) 1498 if (!info)
1486 return -ENOMEM; 1499 return -ENOMEM;
1487 1500
1488 spec->channels_info = info; 1501 spec->channels_info = info;
1489 1502
1490 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); 1503 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1491 for (i = 0; i < 14; i++) 1504 for (i = 0; i < 14; i++) {
1492 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 1505 info[i].max_power = TXPOWER_FROM_DEV(MAX_TXPOWER);
1506 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1507 }
1493 1508
1494 return 0; 1509 return 0;
1495} 1510}
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index c2a555d5376b..2214c3231727 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -327,7 +327,8 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
327} 327}
328 328
329static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev, 329static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev,
330 struct rt2x00lib_erp *erp) 330 struct rt2x00lib_erp *erp,
331 u32 changed)
331{ 332{
332 int preamble_mask; 333 int preamble_mask;
333 u32 reg; 334 u32 reg;
@@ -335,59 +336,73 @@ static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev,
335 /* 336 /*
336 * When short preamble is enabled, we should set bit 0x08 337 * When short preamble is enabled, we should set bit 0x08
337 */ 338 */
338 preamble_mask = erp->short_preamble << 3; 339 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
339 340 preamble_mask = erp->short_preamble << 3;
340 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg); 341
341 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x162); 342 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
342 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0xa2); 343 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x162);
343 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); 344 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0xa2);
344 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1); 345 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
345 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); 346 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
346 347 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
347 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 348
348 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00); 349 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
349 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); 350 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00);
350 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 10)); 351 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04);
351 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); 352 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
352 353 GET_DURATION(ACK_SIZE, 10));
353 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg); 354 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg);
354 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask); 355
355 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04); 356 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg);
356 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 20)); 357 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask);
357 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); 358 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04);
358 359 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
359 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg); 360 GET_DURATION(ACK_SIZE, 20));
360 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask); 361 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg);
361 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04); 362
362 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 55)); 363 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg);
363 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); 364 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask);
364 365 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04);
365 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg); 366 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
366 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask); 367 GET_DURATION(ACK_SIZE, 55));
367 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84); 368 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg);
368 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 110)); 369
369 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); 370 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg);
370 371 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask);
371 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates); 372 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84);
373 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
374 GET_DURATION(ACK_SIZE, 110));
375 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg);
376 }
372 377
373 rt2x00pci_register_read(rt2x00dev, CSR11, &reg); 378 if (changed & BSS_CHANGED_BASIC_RATES)
374 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time); 379 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates);
375 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
376 380
377 rt2x00pci_register_read(rt2x00dev, CSR12, &reg); 381 if (changed & BSS_CHANGED_ERP_SLOT) {
378 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL, erp->beacon_int * 16); 382 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
379 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION, erp->beacon_int * 16); 383 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time);
380 rt2x00pci_register_write(rt2x00dev, CSR12, reg); 384 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
381 385
382 rt2x00pci_register_read(rt2x00dev, CSR18, &reg); 386 rt2x00pci_register_read(rt2x00dev, CSR18, &reg);
383 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs); 387 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs);
384 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs); 388 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs);
385 rt2x00pci_register_write(rt2x00dev, CSR18, reg); 389 rt2x00pci_register_write(rt2x00dev, CSR18, reg);
390
391 rt2x00pci_register_read(rt2x00dev, CSR19, &reg);
392 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs);
393 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs);
394 rt2x00pci_register_write(rt2x00dev, CSR19, reg);
395 }
396
397 if (changed & BSS_CHANGED_BEACON_INT) {
398 rt2x00pci_register_read(rt2x00dev, CSR12, &reg);
399 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL,
400 erp->beacon_int * 16);
401 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION,
402 erp->beacon_int * 16);
403 rt2x00pci_register_write(rt2x00dev, CSR12, reg);
404 }
386 405
387 rt2x00pci_register_read(rt2x00dev, CSR19, &reg);
388 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs);
389 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs);
390 rt2x00pci_register_write(rt2x00dev, CSR19, reg);
391} 406}
392 407
393static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev, 408static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
@@ -1161,12 +1176,11 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1161/* 1176/*
1162 * TX descriptor initialization 1177 * TX descriptor initialization
1163 */ 1178 */
1164static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1179static void rt2500pci_write_tx_desc(struct queue_entry *entry,
1165 struct sk_buff *skb,
1166 struct txentry_desc *txdesc) 1180 struct txentry_desc *txdesc)
1167{ 1181{
1168 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1182 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1169 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; 1183 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1170 __le32 *txd = entry_priv->desc; 1184 __le32 *txd = entry_priv->desc;
1171 u32 word; 1185 u32 word;
1172 1186
@@ -1249,7 +1263,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
1249 /* 1263 /*
1250 * Write the TX descriptor for the beacon. 1264 * Write the TX descriptor for the beacon.
1251 */ 1265 */
1252 rt2500pci_write_tx_desc(rt2x00dev, entry->skb, txdesc); 1266 rt2500pci_write_tx_desc(entry, txdesc);
1253 1267
1254 /* 1268 /*
1255 * Dump beacon to userspace through debugfs. 1269 * Dump beacon to userspace through debugfs.
@@ -1265,24 +1279,24 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
1265 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1279 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1266} 1280}
1267 1281
1268static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1282static void rt2500pci_kick_tx_queue(struct data_queue *queue)
1269 const enum data_queue_qid queue)
1270{ 1283{
1284 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1271 u32 reg; 1285 u32 reg;
1272 1286
1273 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1287 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1274 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE)); 1288 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue->qid == QID_AC_BE));
1275 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK)); 1289 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue->qid == QID_AC_BK));
1276 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue == QID_ATIM)); 1290 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue->qid == QID_ATIM));
1277 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1291 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1278} 1292}
1279 1293
1280static void rt2500pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 1294static void rt2500pci_kill_tx_queue(struct data_queue *queue)
1281 const enum data_queue_qid qid)
1282{ 1295{
1296 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1283 u32 reg; 1297 u32 reg;
1284 1298
1285 if (qid == QID_BEACON) { 1299 if (queue->qid == QID_BEACON) {
1286 rt2x00pci_register_write(rt2x00dev, CSR14, 0); 1300 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
1287 } else { 1301 } else {
1288 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1302 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
@@ -1795,19 +1809,23 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1795 /* 1809 /*
1796 * Create channel information array 1810 * Create channel information array
1797 */ 1811 */
1798 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 1812 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
1799 if (!info) 1813 if (!info)
1800 return -ENOMEM; 1814 return -ENOMEM;
1801 1815
1802 spec->channels_info = info; 1816 spec->channels_info = info;
1803 1817
1804 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); 1818 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1805 for (i = 0; i < 14; i++) 1819 for (i = 0; i < 14; i++) {
1806 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 1820 info[i].max_power = MAX_TXPOWER;
1821 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1822 }
1807 1823
1808 if (spec->num_channels > 14) { 1824 if (spec->num_channels > 14) {
1809 for (i = 14; i < spec->num_channels; i++) 1825 for (i = 14; i < spec->num_channels; i++) {
1810 info[i].tx_power1 = DEFAULT_TXPOWER; 1826 info[i].max_power = MAX_TXPOWER;
1827 info[i].default_power1 = DEFAULT_TXPOWER;
1828 }
1811 } 1829 }
1812 1830
1813 return 0; 1831 return 0;
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index cdaf93f48263..6e94356265b3 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -355,7 +355,9 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
355 * it is known that not work at least on some hardware. 355 * it is known that not work at least on some hardware.
356 * SW crypto will be used in that case. 356 * SW crypto will be used in that case.
357 */ 357 */
358 if (key->alg == ALG_WEP && key->keyidx != 0) 358 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
359 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
360 key->keyidx != 0)
359 return -EOPNOTSUPP; 361 return -EOPNOTSUPP;
360 362
361 /* 363 /*
@@ -492,24 +494,34 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
492} 494}
493 495
494static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev, 496static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev,
495 struct rt2x00lib_erp *erp) 497 struct rt2x00lib_erp *erp,
498 u32 changed)
496{ 499{
497 u16 reg; 500 u16 reg;
498 501
499 rt2500usb_register_read(rt2x00dev, TXRX_CSR10, &reg); 502 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
500 rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE, 503 rt2500usb_register_read(rt2x00dev, TXRX_CSR10, &reg);
501 !!erp->short_preamble); 504 rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE,
502 rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); 505 !!erp->short_preamble);
506 rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg);
507 }
503 508
504 rt2500usb_register_write(rt2x00dev, TXRX_CSR11, erp->basic_rates); 509 if (changed & BSS_CHANGED_BASIC_RATES)
510 rt2500usb_register_write(rt2x00dev, TXRX_CSR11,
511 erp->basic_rates);
505 512
506 rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg); 513 if (changed & BSS_CHANGED_BEACON_INT) {
507 rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL, erp->beacon_int * 4); 514 rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg);
508 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); 515 rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL,
516 erp->beacon_int * 4);
517 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
518 }
509 519
510 rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time); 520 if (changed & BSS_CHANGED_ERP_SLOT) {
511 rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs); 521 rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time);
512 rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs); 522 rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs);
523 rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs);
524 }
513} 525}
514 526
515static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev, 527static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
@@ -1039,12 +1051,11 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1039/* 1051/*
1040 * TX descriptor initialization 1052 * TX descriptor initialization
1041 */ 1053 */
1042static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1054static void rt2500usb_write_tx_desc(struct queue_entry *entry,
1043 struct sk_buff *skb,
1044 struct txentry_desc *txdesc) 1055 struct txentry_desc *txdesc)
1045{ 1056{
1046 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1057 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1047 __le32 *txd = (__le32 *) skb->data; 1058 __le32 *txd = (__le32 *) entry->skb->data;
1048 u32 word; 1059 u32 word;
1049 1060
1050 /* 1061 /*
@@ -1127,7 +1138,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry,
1127 /* 1138 /*
1128 * Write the TX descriptor for the beacon. 1139 * Write the TX descriptor for the beacon.
1129 */ 1140 */
1130 rt2500usb_write_tx_desc(rt2x00dev, entry->skb, txdesc); 1141 rt2500usb_write_tx_desc(entry, txdesc);
1131 1142
1132 /* 1143 /*
1133 * Dump beacon to userspace through debugfs. 1144 * Dump beacon to userspace through debugfs.
@@ -1195,6 +1206,14 @@ static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
1195 return length; 1206 return length;
1196} 1207}
1197 1208
1209static void rt2500usb_kill_tx_queue(struct data_queue *queue)
1210{
1211 if (queue->qid == QID_BEACON)
1212 rt2500usb_register_write(queue->rt2x00dev, TXRX_CSR19, 0);
1213
1214 rt2x00usb_kill_tx_queue(queue);
1215}
1216
1198/* 1217/*
1199 * RX control handlers 1218 * RX control handlers
1200 */ 1219 */
@@ -1698,19 +1717,23 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1698 /* 1717 /*
1699 * Create channel information array 1718 * Create channel information array
1700 */ 1719 */
1701 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 1720 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
1702 if (!info) 1721 if (!info)
1703 return -ENOMEM; 1722 return -ENOMEM;
1704 1723
1705 spec->channels_info = info; 1724 spec->channels_info = info;
1706 1725
1707 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); 1726 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1708 for (i = 0; i < 14; i++) 1727 for (i = 0; i < 14; i++) {
1709 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 1728 info[i].max_power = MAX_TXPOWER;
1729 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1730 }
1710 1731
1711 if (spec->num_channels > 14) { 1732 if (spec->num_channels > 14) {
1712 for (i = 14; i < spec->num_channels; i++) 1733 for (i = 14; i < spec->num_channels; i++) {
1713 info[i].tx_power1 = DEFAULT_TXPOWER; 1734 info[i].max_power = MAX_TXPOWER;
1735 info[i].default_power1 = DEFAULT_TXPOWER;
1736 }
1714 } 1737 }
1715 1738
1716 return 0; 1739 return 0;
@@ -1789,7 +1812,7 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1789 .write_beacon = rt2500usb_write_beacon, 1812 .write_beacon = rt2500usb_write_beacon,
1790 .get_tx_data_len = rt2500usb_get_tx_data_len, 1813 .get_tx_data_len = rt2500usb_get_tx_data_len,
1791 .kick_tx_queue = rt2x00usb_kick_tx_queue, 1814 .kick_tx_queue = rt2x00usb_kick_tx_queue,
1792 .kill_tx_queue = rt2x00usb_kill_tx_queue, 1815 .kill_tx_queue = rt2500usb_kill_tx_queue,
1793 .fill_rxdone = rt2500usb_fill_rxdone, 1816 .fill_rxdone = rt2500usb_fill_rxdone,
1794 .config_shared_key = rt2500usb_config_key, 1817 .config_shared_key = rt2500usb_config_key,
1795 .config_pairwise_key = rt2500usb_config_key, 1818 .config_pairwise_key = rt2500usb_config_key,
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index ed4ebcdde7c9..2edc7742a7e9 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -639,6 +639,18 @@
639#define LED_CFG_LED_POLAR FIELD32(0x40000000) 639#define LED_CFG_LED_POLAR FIELD32(0x40000000)
640 640
641/* 641/*
642 * AMPDU_BA_WINSIZE: Force BlockAck window size
643 * FORCE_WINSIZE_ENABLE:
644 * 0: Disable forcing of BlockAck window size
645 * 1: Enable forcing of BlockAck window size, overwrites values BlockAck
646 * window size values in the TXWI
647 * FORCE_WINSIZE: BlockAck window size
648 */
649#define AMPDU_BA_WINSIZE 0x1040
650#define AMPDU_BA_WINSIZE_FORCE_WINSIZE_ENABLE FIELD32(0x00000020)
651#define AMPDU_BA_WINSIZE_FORCE_WINSIZE FIELD32(0x0000001f)
652
653/*
642 * XIFS_TIME_CFG: MAC timing 654 * XIFS_TIME_CFG: MAC timing
643 * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX 655 * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX
644 * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX 656 * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX
@@ -1318,7 +1330,25 @@
1318#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT FIELD32(0xffff0000) 1330#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT FIELD32(0xffff0000)
1319 1331
1320/* 1332/*
1321 * TX_STA_FIFO: TX Result for specific PID status fifo register 1333 * TX_STA_FIFO: TX Result for specific PID status fifo register.
1334 *
1335 * This register is implemented as FIFO with 16 entries in the HW. Each
1336 * register read fetches the next tx result. If the FIFO is full because
1337 * it wasn't read fast enough after the according interrupt (TX_FIFO_STATUS)
1338 * triggered, the hw seems to simply drop further tx results.
1339 *
1340 * VALID: 1: this tx result is valid
1341 * 0: no valid tx result -> driver should stop reading
1342 * PID_TYPE: The PID latched from the PID field in the TXWI, can be used
1343 * to match a frame with its tx result (even though the PID is
1344 * only 4 bits wide).
1345 * TX_SUCCESS: Indicates tx success (1) or failure (0)
1346 * TX_AGGRE: Indicates if the frame was part of an aggregate (1) or not (0)
1347 * TX_ACK_REQUIRED: Indicates if the frame needed to get ack'ed (1) or not (0)
1348 * WCID: The wireless client ID.
1349 * MCS: The tx rate used during the last transmission of this frame, be it
1350 * successful or not.
1351 * PHYMODE: The phymode used for the transmission.
1322 */ 1352 */
1323#define TX_STA_FIFO 0x1718 1353#define TX_STA_FIFO 0x1718
1324#define TX_STA_FIFO_VALID FIELD32(0x00000001) 1354#define TX_STA_FIFO_VALID FIELD32(0x00000001)
@@ -1841,6 +1871,13 @@ struct mac_iveiv_entry {
1841#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00) 1871#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
1842 1872
1843/* 1873/*
1874 * EEPROM Maximum TX power values
1875 */
1876#define EEPROM_MAX_TX_POWER 0x0027
1877#define EEPROM_MAX_TX_POWER_24GHZ FIELD16(0x00ff)
1878#define EEPROM_MAX_TX_POWER_5GHZ FIELD16(0xff00)
1879
1880/*
1844 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power. 1881 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
1845 * This is delta in 40MHZ. 1882 * This is delta in 40MHZ.
1846 * VALUE: Tx Power dalta value (MAX=4) 1883 * VALUE: Tx Power dalta value (MAX=4)
@@ -1928,6 +1965,8 @@ struct mac_iveiv_entry {
1928 * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs 1965 * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
1929 * BW: Channel bandwidth 20MHz or 40 MHz 1966 * BW: Channel bandwidth 20MHz or 40 MHz
1930 * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED 1967 * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
1968 * AMPDU: 1: this frame is eligible for AMPDU aggregation, the hw will
1969 * aggregate consecutive frames with the same RA and QoS TID.
1931 */ 1970 */
1932#define TXWI_W0_FRAG FIELD32(0x00000001) 1971#define TXWI_W0_FRAG FIELD32(0x00000001)
1933#define TXWI_W0_MIMO_PS FIELD32(0x00000002) 1972#define TXWI_W0_MIMO_PS FIELD32(0x00000002)
@@ -1945,6 +1984,15 @@ struct mac_iveiv_entry {
1945 1984
1946/* 1985/*
1947 * Word1 1986 * Word1
1987 * ACK: 0: No Ack needed, 1: Ack needed
1988 * NSEQ: 0: Don't assign hw sequence number, 1: Assign hw sequence number
1989 * BW_WIN_SIZE: BA windows size of the recipient
1990 * WIRELESS_CLI_ID: Client ID for WCID table access
1991 * MPDU_TOTAL_BYTE_COUNT: Length of 802.11 frame
1992 * PACKETID: Will be latched into the TX_STA_FIFO register once the according
1993 * frame was processed. If multiple frames are aggregated together
1994 * (AMPDU==1) the reported tx status will always contain the packet
1995 * id of the first frame. 0: Don't report tx status for this frame.
1948 */ 1996 */
1949#define TXWI_W1_ACK FIELD32(0x00000001) 1997#define TXWI_W1_ACK FIELD32(0x00000001)
1950#define TXWI_W1_NSEQ FIELD32(0x00000002) 1998#define TXWI_W1_NSEQ FIELD32(0x00000002)
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b66e0fd8f0fa..3bb67492d754 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1,4 +1,5 @@
1/* 1/*
2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
2 Copyright (C) 2010 Ivo van Doorn <IvDoorn@gmail.com> 3 Copyright (C) 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> 4 Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
4 Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com> 5 Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com>
@@ -254,6 +255,23 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
254} 255}
255EXPORT_SYMBOL_GPL(rt2800_mcu_request); 256EXPORT_SYMBOL_GPL(rt2800_mcu_request);
256 257
258int rt2800_wait_csr_ready(struct rt2x00_dev *rt2x00dev)
259{
260 unsigned int i = 0;
261 u32 reg;
262
263 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
264 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
265 if (reg && reg != ~0)
266 return 0;
267 msleep(1);
268 }
269
270 ERROR(rt2x00dev, "Unstable hardware.\n");
271 return -EBUSY;
272}
273EXPORT_SYMBOL_GPL(rt2800_wait_csr_ready);
274
257int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev) 275int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
258{ 276{
259 unsigned int i; 277 unsigned int i;
@@ -367,19 +385,16 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
367 u32 reg; 385 u32 reg;
368 386
369 /* 387 /*
370 * Wait for stable hardware. 388 * If driver doesn't wake up firmware here,
389 * rt2800_load_firmware will hang forever when interface is up again.
371 */ 390 */
372 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 391 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
373 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
374 if (reg && reg != ~0)
375 break;
376 msleep(1);
377 }
378 392
379 if (i == REGISTER_BUSY_COUNT) { 393 /*
380 ERROR(rt2x00dev, "Unstable hardware.\n"); 394 * Wait for stable hardware.
395 */
396 if (rt2800_wait_csr_ready(rt2x00dev))
381 return -EBUSY; 397 return -EBUSY;
382 }
383 398
384 if (rt2x00_is_pci(rt2x00dev)) 399 if (rt2x00_is_pci(rt2x00dev))
385 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); 400 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
@@ -427,8 +442,10 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
427} 442}
428EXPORT_SYMBOL_GPL(rt2800_load_firmware); 443EXPORT_SYMBOL_GPL(rt2800_load_firmware);
429 444
430void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc) 445void rt2800_write_tx_data(struct queue_entry *entry,
446 struct txentry_desc *txdesc)
431{ 447{
448 __le32 *txwi = rt2800_drv_get_txwi(entry);
432 u32 word; 449 u32 word;
433 450
434 /* 451 /*
@@ -437,7 +454,8 @@ void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc)
437 rt2x00_desc_read(txwi, 0, &word); 454 rt2x00_desc_read(txwi, 0, &word);
438 rt2x00_set_field32(&word, TXWI_W0_FRAG, 455 rt2x00_set_field32(&word, TXWI_W0_FRAG,
439 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 456 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
440 rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0); 457 rt2x00_set_field32(&word, TXWI_W0_MIMO_PS,
458 test_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags));
441 rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0); 459 rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
442 rt2x00_set_field32(&word, TXWI_W0_TS, 460 rt2x00_set_field32(&word, TXWI_W0_TS,
443 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 461 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
@@ -465,7 +483,7 @@ void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc)
465 txdesc->key_idx : 0xff); 483 txdesc->key_idx : 0xff);
466 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, 484 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
467 txdesc->length); 485 txdesc->length);
468 rt2x00_set_field32(&word, TXWI_W1_PACKETID, txdesc->queue + 1); 486 rt2x00_set_field32(&word, TXWI_W1_PACKETID, txdesc->qid + 1);
469 rt2x00_desc_write(txwi, 1, word); 487 rt2x00_desc_write(txwi, 1, word);
470 488
471 /* 489 /*
@@ -478,7 +496,7 @@ void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc)
478 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */); 496 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
479 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */); 497 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
480} 498}
481EXPORT_SYMBOL_GPL(rt2800_write_txwi); 499EXPORT_SYMBOL_GPL(rt2800_write_tx_data);
482 500
483static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxwi_w2) 501static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxwi_w2)
484{ 502{
@@ -490,7 +508,7 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxwi_w2)
490 u8 offset1; 508 u8 offset1;
491 u8 offset2; 509 u8 offset2;
492 510
493 if (rt2x00dev->rx_status.band == IEEE80211_BAND_2GHZ) { 511 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
494 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom); 512 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
495 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0); 513 offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
496 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1); 514 offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
@@ -569,6 +587,148 @@ void rt2800_process_rxwi(struct queue_entry *entry,
569} 587}
570EXPORT_SYMBOL_GPL(rt2800_process_rxwi); 588EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
571 589
590static bool rt2800_txdone_entry_check(struct queue_entry *entry, u32 reg)
591{
592 __le32 *txwi;
593 u32 word;
594 int wcid, ack, pid;
595 int tx_wcid, tx_ack, tx_pid;
596
597 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
598 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
599 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
600
601 /*
602 * This frames has returned with an IO error,
603 * so the status report is not intended for this
604 * frame.
605 */
606 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) {
607 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
608 return false;
609 }
610
611 /*
612 * Validate if this TX status report is intended for
613 * this entry by comparing the WCID/ACK/PID fields.
614 */
615 txwi = rt2800_drv_get_txwi(entry);
616
617 rt2x00_desc_read(txwi, 1, &word);
618 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
619 tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
620 tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
621
622 if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid)) {
623 WARNING(entry->queue->rt2x00dev,
624 "TX status report missed for queue %d entry %d\n",
625 entry->queue->qid, entry->entry_idx);
626 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
627 return false;
628 }
629
630 return true;
631}
632
633void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
634{
635 struct data_queue *queue;
636 struct queue_entry *entry;
637 __le32 *txwi;
638 struct txdone_entry_desc txdesc;
639 u32 word;
640 u32 reg;
641 u16 mcs, real_mcs;
642 u8 pid;
643 int i;
644
645 /*
646 * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
647 * at most X times and also stop processing once the TX_STA_FIFO_VALID
648 * flag is not set anymore.
649 *
650 * The legacy drivers use X=TX_RING_SIZE but state in a comment
651 * that the TX_STA_FIFO stack has a size of 16. We stick to our
652 * tx ring size for now.
653 */
654 for (i = 0; i < TX_ENTRIES; i++) {
655 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
656 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
657 break;
658
659 /*
660 * Skip this entry when it contains an invalid
661 * queue identication number.
662 */
663 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1;
664 if (pid >= QID_RX)
665 continue;
666
667 queue = rt2x00queue_get_queue(rt2x00dev, pid);
668 if (unlikely(!queue))
669 continue;
670
671 /*
672 * Inside each queue, we process each entry in a chronological
673 * order. We first check that the queue is not empty.
674 */
675 entry = NULL;
676 txwi = NULL;
677 while (!rt2x00queue_empty(queue)) {
678 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
679 if (rt2800_txdone_entry_check(entry, reg))
680 break;
681 }
682
683 if (!entry || rt2x00queue_empty(queue))
684 break;
685
686
687 /*
688 * Obtain the status about this packet.
689 */
690 txdesc.flags = 0;
691 txwi = rt2800_drv_get_txwi(entry);
692 rt2x00_desc_read(txwi, 0, &word);
693 mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
694 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
695
696 /*
697 * Ralink has a retry mechanism using a global fallback
698 * table. We setup this fallback table to try the immediate
699 * lower rate for all rates. In the TX_STA_FIFO, the MCS field
700 * always contains the MCS used for the last transmission, be
701 * it successful or not.
702 */
703 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) {
704 /*
705 * Transmission succeeded. The number of retries is
706 * mcs - real_mcs
707 */
708 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
709 txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
710 } else {
711 /*
712 * Transmission failed. The number of retries is
713 * always 7 in this case (for a total number of 8
714 * frames sent).
715 */
716 __set_bit(TXDONE_FAILURE, &txdesc.flags);
717 txdesc.retry = rt2x00dev->long_retry;
718 }
719
720 /*
721 * the frame was retried at least once
722 * -> hw used fallback rates
723 */
724 if (txdesc.retry)
725 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
726
727 rt2x00lib_txdone(entry, &txdesc);
728 }
729}
730EXPORT_SYMBOL_GPL(rt2800_txdone);
731
572void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) 732void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
573{ 733{
574 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 734 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
@@ -600,7 +760,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
600 /* 760 /*
601 * Add the TXWI for the beacon to the skb. 761 * Add the TXWI for the beacon to the skb.
602 */ 762 */
603 rt2800_write_txwi((__le32 *)entry->skb->data, txdesc); 763 rt2800_write_tx_data(entry, txdesc);
604 764
605 /* 765 /*
606 * Dump beacon to userspace through debugfs. 766 * Dump beacon to userspace through debugfs.
@@ -975,19 +1135,23 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
975 } 1135 }
976 1136
977 if (flags & CONFIG_UPDATE_MAC) { 1137 if (flags & CONFIG_UPDATE_MAC) {
978 reg = le32_to_cpu(conf->mac[1]); 1138 if (!is_zero_ether_addr((const u8 *)conf->mac)) {
979 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff); 1139 reg = le32_to_cpu(conf->mac[1]);
980 conf->mac[1] = cpu_to_le32(reg); 1140 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
1141 conf->mac[1] = cpu_to_le32(reg);
1142 }
981 1143
982 rt2800_register_multiwrite(rt2x00dev, MAC_ADDR_DW0, 1144 rt2800_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
983 conf->mac, sizeof(conf->mac)); 1145 conf->mac, sizeof(conf->mac));
984 } 1146 }
985 1147
986 if (flags & CONFIG_UPDATE_BSSID) { 1148 if (flags & CONFIG_UPDATE_BSSID) {
987 reg = le32_to_cpu(conf->bssid[1]); 1149 if (!is_zero_ether_addr((const u8 *)conf->bssid)) {
988 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3); 1150 reg = le32_to_cpu(conf->bssid[1]);
989 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 7); 1151 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
990 conf->bssid[1] = cpu_to_le32(reg); 1152 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 7);
1153 conf->bssid[1] = cpu_to_le32(reg);
1154 }
991 1155
992 rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0, 1156 rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
993 conf->bssid, sizeof(conf->bssid)); 1157 conf->bssid, sizeof(conf->bssid));
@@ -995,38 +1159,50 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
995} 1159}
996EXPORT_SYMBOL_GPL(rt2800_config_intf); 1160EXPORT_SYMBOL_GPL(rt2800_config_intf);
997 1161
998void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp) 1162void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
1163 u32 changed)
999{ 1164{
1000 u32 reg; 1165 u32 reg;
1001 1166
1002 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg); 1167 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1003 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY, 1168 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
1004 !!erp->short_preamble); 1169 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
1005 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE, 1170 !!erp->short_preamble);
1006 !!erp->short_preamble); 1171 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
1007 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg); 1172 !!erp->short_preamble);
1173 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
1174 }
1008 1175
1009 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg); 1176 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1010 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 1177 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
1011 erp->cts_protection ? 2 : 0); 1178 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL,
1012 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg); 1179 erp->cts_protection ? 2 : 0);
1180 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
1181 }
1013 1182
1014 rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 1183 if (changed & BSS_CHANGED_BASIC_RATES) {
1015 erp->basic_rates); 1184 rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE,
1016 rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003); 1185 erp->basic_rates);
1186 rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
1187 }
1017 1188
1018 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg); 1189 if (changed & BSS_CHANGED_ERP_SLOT) {
1019 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time); 1190 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
1020 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg); 1191 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME,
1192 erp->slot_time);
1193 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
1021 1194
1022 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg); 1195 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
1023 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs); 1196 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
1024 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg); 1197 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
1198 }
1025 1199
1026 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 1200 if (changed & BSS_CHANGED_BEACON_INT) {
1027 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 1201 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
1028 erp->beacon_int * 16); 1202 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
1029 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1203 erp->beacon_int * 16);
1204 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1205 }
1030} 1206}
1031EXPORT_SYMBOL_GPL(rt2800_config_erp); 1207EXPORT_SYMBOL_GPL(rt2800_config_erp);
1032 1208
@@ -1120,27 +1296,23 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
1120 * double meaning, and we should set a 7DBm boost flag. 1296 * double meaning, and we should set a 7DBm boost flag.
1121 */ 1297 */
1122 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST, 1298 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
1123 (info->tx_power1 >= 0)); 1299 (info->default_power1 >= 0));
1124 1300
1125 if (info->tx_power1 < 0) 1301 if (info->default_power1 < 0)
1126 info->tx_power1 += 7; 1302 info->default_power1 += 7;
1127 1303
1128 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A, 1304 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A, info->default_power1);
1129 TXPOWER_A_TO_DEV(info->tx_power1));
1130 1305
1131 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST, 1306 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
1132 (info->tx_power2 >= 0)); 1307 (info->default_power2 >= 0));
1133 1308
1134 if (info->tx_power2 < 0) 1309 if (info->default_power2 < 0)
1135 info->tx_power2 += 7; 1310 info->default_power2 += 7;
1136 1311
1137 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A, 1312 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A, info->default_power2);
1138 TXPOWER_A_TO_DEV(info->tx_power2));
1139 } else { 1313 } else {
1140 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G, 1314 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G, info->default_power1);
1141 TXPOWER_G_TO_DEV(info->tx_power1)); 1315 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G, info->default_power2);
1142 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
1143 TXPOWER_G_TO_DEV(info->tx_power2));
1144 } 1316 }
1145 1317
1146 rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf)); 1318 rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
@@ -1180,13 +1352,11 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
1180 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); 1352 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
1181 1353
1182 rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr); 1354 rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
1183 rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, 1355 rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, info->default_power1);
1184 TXPOWER_G_TO_DEV(info->tx_power1));
1185 rt2800_rfcsr_write(rt2x00dev, 12, rfcsr); 1356 rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
1186 1357
1187 rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr); 1358 rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
1188 rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, 1359 rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2);
1189 TXPOWER_G_TO_DEV(info->tx_power2));
1190 rt2800_rfcsr_write(rt2x00dev, 13, rfcsr); 1360 rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
1191 1361
1192 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); 1362 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
@@ -1210,10 +1380,19 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1210 unsigned int tx_pin; 1380 unsigned int tx_pin;
1211 u8 bbp; 1381 u8 bbp;
1212 1382
1383 if (rf->channel <= 14) {
1384 info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1);
1385 info->default_power2 = TXPOWER_G_TO_DEV(info->default_power2);
1386 } else {
1387 info->default_power1 = TXPOWER_A_TO_DEV(info->default_power1);
1388 info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2);
1389 }
1390
1213 if (rt2x00_rf(rt2x00dev, RF2020) || 1391 if (rt2x00_rf(rt2x00dev, RF2020) ||
1214 rt2x00_rf(rt2x00dev, RF3020) || 1392 rt2x00_rf(rt2x00dev, RF3020) ||
1215 rt2x00_rf(rt2x00dev, RF3021) || 1393 rt2x00_rf(rt2x00dev, RF3021) ||
1216 rt2x00_rf(rt2x00dev, RF3022)) 1394 rt2x00_rf(rt2x00dev, RF3022) ||
1395 rt2x00_rf(rt2x00dev, RF3052))
1217 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); 1396 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
1218 else 1397 else
1219 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 1398 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
@@ -1536,7 +1715,7 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner);
1536/* 1715/*
1537 * Initialization functions. 1716 * Initialization functions.
1538 */ 1717 */
1539int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) 1718static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1540{ 1719{
1541 u32 reg; 1720 u32 reg;
1542 u16 eeprom; 1721 u16 eeprom;
@@ -1886,6 +2065,14 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1886 rt2800_register_write(rt2x00dev, LG_FBK_CFG1, reg); 2065 rt2800_register_write(rt2x00dev, LG_FBK_CFG1, reg);
1887 2066
1888 /* 2067 /*
2068 * Do not force the BA window size, we use the TXWI to set it
2069 */
2070 rt2800_register_read(rt2x00dev, AMPDU_BA_WINSIZE, &reg);
2071 rt2x00_set_field32(&reg, AMPDU_BA_WINSIZE_FORCE_WINSIZE_ENABLE, 0);
2072 rt2x00_set_field32(&reg, AMPDU_BA_WINSIZE_FORCE_WINSIZE, 0);
2073 rt2800_register_write(rt2x00dev, AMPDU_BA_WINSIZE, reg);
2074
2075 /*
1889 * We must clear the error counters. 2076 * We must clear the error counters.
1890 * These registers are cleared on read, 2077 * These registers are cleared on read,
1891 * so we may pass a useless variable to store the value. 2078 * so we may pass a useless variable to store the value.
@@ -1906,7 +2093,6 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1906 2093
1907 return 0; 2094 return 0;
1908} 2095}
1909EXPORT_SYMBOL_GPL(rt2800_init_registers);
1910 2096
1911static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev) 2097static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
1912{ 2098{
@@ -1949,7 +2135,7 @@ static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
1949 return -EACCES; 2135 return -EACCES;
1950} 2136}
1951 2137
1952int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) 2138static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1953{ 2139{
1954 unsigned int i; 2140 unsigned int i;
1955 u16 eeprom; 2141 u16 eeprom;
@@ -2044,7 +2230,6 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2044 2230
2045 return 0; 2231 return 0;
2046} 2232}
2047EXPORT_SYMBOL_GPL(rt2800_init_bbp);
2048 2233
2049static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev, 2234static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
2050 bool bw40, u8 rfcsr24, u8 filter_target) 2235 bool bw40, u8 rfcsr24, u8 filter_target)
@@ -2106,7 +2291,7 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
2106 return rfcsr24; 2291 return rfcsr24;
2107} 2292}
2108 2293
2109int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) 2294static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2110{ 2295{
2111 u8 rfcsr; 2296 u8 rfcsr;
2112 u8 bbp; 2297 u8 bbp;
@@ -2360,7 +2545,100 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2360 2545
2361 return 0; 2546 return 0;
2362} 2547}
2363EXPORT_SYMBOL_GPL(rt2800_init_rfcsr); 2548
2549int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
2550{
2551 u32 reg;
2552 u16 word;
2553
2554 /*
2555 * Initialize all registers.
2556 */
2557 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
2558 rt2800_init_registers(rt2x00dev) ||
2559 rt2800_init_bbp(rt2x00dev) ||
2560 rt2800_init_rfcsr(rt2x00dev)))
2561 return -EIO;
2562
2563 /*
2564 * Send signal to firmware during boot time.
2565 */
2566 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
2567
2568 if (rt2x00_is_usb(rt2x00dev) &&
2569 (rt2x00_rt(rt2x00dev, RT3070) ||
2570 rt2x00_rt(rt2x00dev, RT3071) ||
2571 rt2x00_rt(rt2x00dev, RT3572))) {
2572 udelay(200);
2573 rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
2574 udelay(10);
2575 }
2576
2577 /*
2578 * Enable RX.
2579 */
2580 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
2581 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
2582 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
2583 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
2584
2585 udelay(50);
2586
2587 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
2588 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
2589 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
2590 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
2591 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
2592 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2593
2594 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
2595 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
2596 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
2597 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
2598
2599 /*
2600 * Initialize LED control
2601 */
2602 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
2603 rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
2604 word & 0xff, (word >> 8) & 0xff);
2605
2606 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
2607 rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
2608 word & 0xff, (word >> 8) & 0xff);
2609
2610 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
2611 rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
2612 word & 0xff, (word >> 8) & 0xff);
2613
2614 return 0;
2615}
2616EXPORT_SYMBOL_GPL(rt2800_enable_radio);
2617
2618void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
2619{
2620 u32 reg;
2621
2622 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
2623 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
2624 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
2625 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
2626 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
2627 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
2628 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2629
2630 /* Wait for DMA, ignore error */
2631 rt2800_wait_wpdma_ready(rt2x00dev);
2632
2633 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
2634 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0);
2635 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
2636 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
2637
2638 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
2639 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
2640}
2641EXPORT_SYMBOL_GPL(rt2800_disable_radio);
2364 2642
2365int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev) 2643int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev)
2366{ 2644{
@@ -2516,6 +2794,13 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2516 default_lna_gain); 2794 default_lna_gain);
2517 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); 2795 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
2518 2796
2797 rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word);
2798 if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff)
2799 rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER);
2800 if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff)
2801 rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER);
2802 rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word);
2803
2519 return 0; 2804 return 0;
2520} 2805}
2521EXPORT_SYMBOL_GPL(rt2800_validate_eeprom); 2806EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
@@ -2755,9 +3040,10 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2755{ 3040{
2756 struct hw_mode_spec *spec = &rt2x00dev->spec; 3041 struct hw_mode_spec *spec = &rt2x00dev->spec;
2757 struct channel_info *info; 3042 struct channel_info *info;
2758 char *tx_power1; 3043 char *default_power1;
2759 char *tx_power2; 3044 char *default_power2;
2760 unsigned int i; 3045 unsigned int i;
3046 unsigned short max_power;
2761 u16 eeprom; 3047 u16 eeprom;
2762 3048
2763 /* 3049 /*
@@ -2865,27 +3151,32 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2865 /* 3151 /*
2866 * Create channel information array 3152 * Create channel information array
2867 */ 3153 */
2868 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 3154 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
2869 if (!info) 3155 if (!info)
2870 return -ENOMEM; 3156 return -ENOMEM;
2871 3157
2872 spec->channels_info = info; 3158 spec->channels_info = info;
2873 3159
2874 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); 3160 rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom);
2875 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); 3161 max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ);
3162 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
3163 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
2876 3164
2877 for (i = 0; i < 14; i++) { 3165 for (i = 0; i < 14; i++) {
2878 info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]); 3166 info[i].max_power = max_power;
2879 info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]); 3167 info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]);
3168 info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]);
2880 } 3169 }
2881 3170
2882 if (spec->num_channels > 14) { 3171 if (spec->num_channels > 14) {
2883 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1); 3172 max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ);
2884 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); 3173 default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
3174 default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
2885 3175
2886 for (i = 14; i < spec->num_channels; i++) { 3176 for (i = 14; i < spec->num_channels; i++) {
2887 info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]); 3177 info[i].max_power = max_power;
2888 info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]); 3178 info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]);
3179 info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]);
2889 } 3180 }
2890 } 3181 }
2891 3182
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 091641e3c5e2..600c5eb25c41 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -1,4 +1,6 @@
1/* 1/*
2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2010 Ivo van Doorn <IvDoorn@gmail.com>
2 Copyright (C) 2009 Bartlomiej Zolnierkiewicz 4 Copyright (C) 2009 Bartlomiej Zolnierkiewicz
3 5
4 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
@@ -44,6 +46,7 @@ struct rt2800_ops {
44 int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev, 46 int (*drv_write_firmware)(struct rt2x00_dev *rt2x00dev,
45 const u8 *data, const size_t len); 47 const u8 *data, const size_t len);
46 int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev); 48 int (*drv_init_registers)(struct rt2x00_dev *rt2x00dev);
49 __le32 *(*drv_get_txwi)(struct queue_entry *entry);
47}; 50};
48 51
49static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev, 52static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev,
@@ -126,18 +129,31 @@ static inline int rt2800_drv_init_registers(struct rt2x00_dev *rt2x00dev)
126 return rt2800ops->drv_init_registers(rt2x00dev); 129 return rt2800ops->drv_init_registers(rt2x00dev);
127} 130}
128 131
132static inline __le32 *rt2800_drv_get_txwi(struct queue_entry *entry)
133{
134 const struct rt2800_ops *rt2800ops = entry->queue->rt2x00dev->ops->drv;
135
136 return rt2800ops->drv_get_txwi(entry);
137}
138
129void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, 139void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
130 const u8 command, const u8 token, 140 const u8 command, const u8 token,
131 const u8 arg0, const u8 arg1); 141 const u8 arg0, const u8 arg1);
132 142
143int rt2800_wait_csr_ready(struct rt2x00_dev *rt2x00dev);
144int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
145
133int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev, 146int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev,
134 const u8 *data, const size_t len); 147 const u8 *data, const size_t len);
135int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, 148int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
136 const u8 *data, const size_t len); 149 const u8 *data, const size_t len);
137 150
138void rt2800_write_txwi(__le32 *txwi, struct txentry_desc *txdesc); 151void rt2800_write_tx_data(struct queue_entry *entry,
152 struct txentry_desc *txdesc);
139void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc); 153void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc);
140 154
155void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
156
141void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc); 157void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
142 158
143extern const struct rt2x00debug rt2800_rt2x00debug; 159extern const struct rt2x00debug rt2800_rt2x00debug;
@@ -153,7 +169,8 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
153 const unsigned int filter_flags); 169 const unsigned int filter_flags);
154void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, 170void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
155 struct rt2x00intf_conf *conf, const unsigned int flags); 171 struct rt2x00intf_conf *conf, const unsigned int flags);
156void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp); 172void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
173 u32 changed);
157void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant); 174void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant);
158void rt2800_config(struct rt2x00_dev *rt2x00dev, 175void rt2800_config(struct rt2x00_dev *rt2x00dev,
159 struct rt2x00lib_conf *libconf, 176 struct rt2x00lib_conf *libconf,
@@ -163,10 +180,8 @@ void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
163void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, 180void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
164 const u32 count); 181 const u32 count);
165 182
166int rt2800_init_registers(struct rt2x00_dev *rt2x00dev); 183int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev);
167int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev); 184void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev);
168int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
169int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
170 185
171int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev); 186int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
172void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev); 187void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 39b3846fa340..005ee153e0cc 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com> 3 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> 4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
5 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com> 5 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
@@ -196,8 +196,6 @@ static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
196{ 196{
197 u32 reg; 197 u32 reg;
198 198
199 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
200
201 /* 199 /*
202 * enable Host program ram write selection 200 * enable Host program ram write selection
203 */ 201 */
@@ -344,24 +342,24 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
344 } 342 }
345 343
346 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg); 344 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
347 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, mask); 345 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
348 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, mask); 346 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
349 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask); 347 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
350 rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, mask); 348 rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
351 rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, mask); 349 rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
352 rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, mask); 350 rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
353 rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, mask); 351 rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
354 rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, mask); 352 rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
355 rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, mask); 353 rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
356 rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, mask); 354 rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
357 rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, mask); 355 rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
358 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask); 356 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
359 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask); 357 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
360 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask); 358 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
361 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask); 359 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
362 rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, mask); 360 rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
363 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, mask); 361 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
364 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, mask); 362 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
365 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 363 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
366} 364}
367 365
@@ -399,78 +397,18 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
399 397
400static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) 398static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
401{ 399{
402 u32 reg;
403 u16 word;
404
405 /*
406 * Initialize all registers.
407 */
408 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) || 400 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
409 rt2800pci_init_queues(rt2x00dev) || 401 rt2800pci_init_queues(rt2x00dev)))
410 rt2800_init_registers(rt2x00dev) ||
411 rt2800_wait_wpdma_ready(rt2x00dev) ||
412 rt2800_init_bbp(rt2x00dev) ||
413 rt2800_init_rfcsr(rt2x00dev)))
414 return -EIO; 402 return -EIO;
415 403
416 /* 404 return rt2800_enable_radio(rt2x00dev);
417 * Send signal to firmware during boot time.
418 */
419 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
420
421 /*
422 * Enable RX.
423 */
424 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
425 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
426 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
427 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
428
429 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
430 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
431 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
432 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
433 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
434 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
435
436 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
437 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
438 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
439 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
440
441 /*
442 * Initialize LED control
443 */
444 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
445 rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
446 word & 0xff, (word >> 8) & 0xff);
447
448 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
449 rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
450 word & 0xff, (word >> 8) & 0xff);
451
452 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
453 rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
454 word & 0xff, (word >> 8) & 0xff);
455
456 return 0;
457} 405}
458 406
459static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev) 407static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
460{ 408{
461 u32 reg; 409 u32 reg;
462 410
463 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 411 rt2800_disable_radio(rt2x00dev);
464 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
465 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
466 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
467 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
468 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
469 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
470
471 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
472 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
473 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
474 412
475 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280); 413 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
476 414
@@ -486,9 +424,6 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
486 424
487 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); 425 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
488 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 426 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
489
490 /* Wait for DMA, ignore error */
491 rt2800_wait_wpdma_ready(rt2x00dev);
492} 427}
493 428
494static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, 429static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -566,21 +501,16 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
566/* 501/*
567 * TX descriptor initialization 502 * TX descriptor initialization
568 */ 503 */
569static void rt2800pci_write_tx_data(struct queue_entry* entry, 504static __le32 *rt2800pci_get_txwi(struct queue_entry *entry)
570 struct txentry_desc *txdesc)
571{ 505{
572 __le32 *txwi = (__le32 *) entry->skb->data; 506 return (__le32 *) entry->skb->data;
573
574 rt2800_write_txwi(txwi, txdesc);
575} 507}
576 508
577 509static void rt2800pci_write_tx_desc(struct queue_entry *entry,
578static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
579 struct sk_buff *skb,
580 struct txentry_desc *txdesc) 510 struct txentry_desc *txdesc)
581{ 511{
582 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 512 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
583 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; 513 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
584 __le32 *txd = entry_priv->desc; 514 __le32 *txd = entry_priv->desc;
585 u32 word; 515 u32 word;
586 516
@@ -600,7 +530,7 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
600 rt2x00_desc_write(txd, 0, word); 530 rt2x00_desc_write(txd, 0, word);
601 531
602 rt2x00_desc_read(txd, 1, &word); 532 rt2x00_desc_read(txd, 1, &word);
603 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, skb->len); 533 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
604 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1, 534 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
605 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 535 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
606 rt2x00_set_field32(&word, TXD_W1_BURST, 536 rt2x00_set_field32(&word, TXD_W1_BURST,
@@ -631,41 +561,35 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
631/* 561/*
632 * TX data initialization 562 * TX data initialization
633 */ 563 */
634static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 564static void rt2800pci_kick_tx_queue(struct data_queue *queue)
635 const enum data_queue_qid queue_idx)
636{ 565{
637 struct data_queue *queue; 566 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
638 unsigned int idx, qidx = 0; 567 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
639 568 unsigned int qidx = 0;
640 if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
641 return;
642
643 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
644 idx = queue->index[Q_INDEX];
645 569
646 if (queue_idx == QID_MGMT) 570 if (queue->qid == QID_MGMT)
647 qidx = 5; 571 qidx = 5;
648 else 572 else
649 qidx = queue_idx; 573 qidx = queue->qid;
650 574
651 rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), idx); 575 rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), entry->entry_idx);
652} 576}
653 577
654static void rt2800pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 578static void rt2800pci_kill_tx_queue(struct data_queue *queue)
655 const enum data_queue_qid qid)
656{ 579{
580 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
657 u32 reg; 581 u32 reg;
658 582
659 if (qid == QID_BEACON) { 583 if (queue->qid == QID_BEACON) {
660 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, 0); 584 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, 0);
661 return; 585 return;
662 } 586 }
663 587
664 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg); 588 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
665 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (qid == QID_AC_BE)); 589 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (queue->qid == QID_AC_BE));
666 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (qid == QID_AC_BK)); 590 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (queue->qid == QID_AC_BK));
667 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (qid == QID_AC_VI)); 591 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (queue->qid == QID_AC_VI));
668 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (qid == QID_AC_VO)); 592 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (queue->qid == QID_AC_VO));
669 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); 593 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
670} 594}
671 595
@@ -728,110 +652,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
728/* 652/*
729 * Interrupt functions. 653 * Interrupt functions.
730 */ 654 */
731static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
732{
733 struct data_queue *queue;
734 struct queue_entry *entry;
735 __le32 *txwi;
736 struct txdone_entry_desc txdesc;
737 u32 word;
738 u32 reg;
739 int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
740 u16 mcs, real_mcs;
741 int i;
742
743 /*
744 * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO
745 * at most X times and also stop processing once the TX_STA_FIFO_VALID
746 * flag is not set anymore.
747 *
748 * The legacy drivers use X=TX_RING_SIZE but state in a comment
749 * that the TX_STA_FIFO stack has a size of 16. We stick to our
750 * tx ring size for now.
751 */
752 for (i = 0; i < TX_ENTRIES; i++) {
753 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
754 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
755 break;
756
757 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
758 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
759 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
760
761 /*
762 * Skip this entry when it contains an invalid
763 * queue identication number.
764 */
765 if (pid <= 0 || pid > QID_RX)
766 continue;
767
768 queue = rt2x00queue_get_queue(rt2x00dev, pid - 1);
769 if (unlikely(!queue))
770 continue;
771
772 /*
773 * Inside each queue, we process each entry in a chronological
774 * order. We first check that the queue is not empty.
775 */
776 if (rt2x00queue_empty(queue))
777 continue;
778 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
779
780 /* Check if we got a match by looking at WCID/ACK/PID
781 * fields */
782 txwi = (__le32 *) entry->skb->data;
783
784 rt2x00_desc_read(txwi, 1, &word);
785 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
786 tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
787 tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
788
789 if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid))
790 WARNING(rt2x00dev, "invalid TX_STA_FIFO content\n");
791
792 /*
793 * Obtain the status about this packet.
794 */
795 txdesc.flags = 0;
796 rt2x00_desc_read(txwi, 0, &word);
797 mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
798 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
799
800 /*
801 * Ralink has a retry mechanism using a global fallback
802 * table. We setup this fallback table to try the immediate
803 * lower rate for all rates. In the TX_STA_FIFO, the MCS field
804 * always contains the MCS used for the last transmission, be
805 * it successful or not.
806 */
807 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) {
808 /*
809 * Transmission succeeded. The number of retries is
810 * mcs - real_mcs
811 */
812 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
813 txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
814 } else {
815 /*
816 * Transmission failed. The number of retries is
817 * always 7 in this case (for a total number of 8
818 * frames sent).
819 */
820 __set_bit(TXDONE_FAILURE, &txdesc.flags);
821 txdesc.retry = 7;
822 }
823
824 /*
825 * the frame was retried at least once
826 * -> hw used fallback rates
827 */
828 if (txdesc.retry)
829 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
830
831 rt2x00lib_txdone(entry, &txdesc);
832 }
833}
834
835static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev) 655static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
836{ 656{
837 struct ieee80211_conf conf = { .flags = 0 }; 657 struct ieee80211_conf conf = { .flags = 0 };
@@ -867,7 +687,7 @@ static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
867 * 4 - Tx done interrupt. 687 * 4 - Tx done interrupt.
868 */ 688 */
869 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) 689 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
870 rt2800pci_txdone(rt2x00dev); 690 rt2800_txdone(rt2x00dev);
871 691
872 /* 692 /*
873 * 5 - Auto wakeup interrupt. 693 * 5 - Auto wakeup interrupt.
@@ -1011,6 +831,7 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
1011 .regbusy_read = rt2x00pci_regbusy_read, 831 .regbusy_read = rt2x00pci_regbusy_read,
1012 .drv_write_firmware = rt2800pci_write_firmware, 832 .drv_write_firmware = rt2800pci_write_firmware,
1013 .drv_init_registers = rt2800pci_init_registers, 833 .drv_init_registers = rt2800pci_init_registers,
834 .drv_get_txwi = rt2800pci_get_txwi,
1014}; 835};
1015 836
1016static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { 837static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
@@ -1030,7 +851,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1030 .reset_tuner = rt2800_reset_tuner, 851 .reset_tuner = rt2800_reset_tuner,
1031 .link_tuner = rt2800_link_tuner, 852 .link_tuner = rt2800_link_tuner,
1032 .write_tx_desc = rt2800pci_write_tx_desc, 853 .write_tx_desc = rt2800pci_write_tx_desc,
1033 .write_tx_data = rt2800pci_write_tx_data, 854 .write_tx_data = rt2800_write_tx_data,
1034 .write_beacon = rt2800_write_beacon, 855 .write_beacon = rt2800_write_beacon,
1035 .kick_tx_queue = rt2800pci_kick_tx_queue, 856 .kick_tx_queue = rt2800pci_kick_tx_queue,
1036 .kill_tx_queue = rt2800pci_kill_tx_queue, 857 .kill_tx_queue = rt2800pci_kill_tx_queue,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 5a2dfe87c6b6..3dff56ec195a 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de> 4 Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> 5 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
5 Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com> 6 Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
@@ -100,19 +101,6 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
100 msleep(10); 101 msleep(10);
101 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 102 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
102 103
103 /*
104 * Send signal to firmware during boot time.
105 */
106 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
107
108 if (rt2x00_rt(rt2x00dev, RT3070) ||
109 rt2x00_rt(rt2x00dev, RT3071) ||
110 rt2x00_rt(rt2x00dev, RT3572)) {
111 udelay(200);
112 rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
113 udelay(10);
114 }
115
116 return 0; 104 return 0;
117} 105}
118 106
@@ -134,26 +122,18 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
134static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev) 122static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
135{ 123{
136 u32 reg; 124 u32 reg;
137 int i;
138 125
139 /* 126 /*
140 * Wait until BBP and RF are ready. 127 * Wait until BBP and RF are ready.
141 */ 128 */
142 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 129 if (rt2800_wait_csr_ready(rt2x00dev))
143 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
144 if (reg && reg != ~0)
145 break;
146 msleep(1);
147 }
148
149 if (i == REGISTER_BUSY_COUNT) {
150 ERROR(rt2x00dev, "Unstable hardware.\n");
151 return -EBUSY; 130 return -EBUSY;
152 }
153 131
154 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg); 132 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
155 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000); 133 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
156 134
135 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
136
157 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 137 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
158 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1); 138 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
159 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1); 139 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
@@ -172,30 +152,10 @@ static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
172static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) 152static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
173{ 153{
174 u32 reg; 154 u32 reg;
175 u16 word;
176 155
177 /* 156 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev)))
178 * Initialize all registers.
179 */
180 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
181 rt2800_init_registers(rt2x00dev) ||
182 rt2800_init_bbp(rt2x00dev) ||
183 rt2800_init_rfcsr(rt2x00dev)))
184 return -EIO; 157 return -EIO;
185 158
186 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
187 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
188 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
189
190 udelay(50);
191
192 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
193 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
194 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
195 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
196 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
197
198
199 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg); 159 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
200 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0); 160 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
201 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0); 161 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
@@ -210,45 +170,12 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
210 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1); 170 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
211 rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg); 171 rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg);
212 172
213 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 173 return rt2800_enable_radio(rt2x00dev);
214 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
215 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
216 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
217
218 /*
219 * Initialize LED control
220 */
221 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
222 rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
223 word & 0xff, (word >> 8) & 0xff);
224
225 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
226 rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
227 word & 0xff, (word >> 8) & 0xff);
228
229 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
230 rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
231 word & 0xff, (word >> 8) & 0xff);
232
233 return 0;
234} 174}
235 175
236static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev) 176static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
237{ 177{
238 u32 reg; 178 rt2800_disable_radio(rt2x00dev);
239
240 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
241 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
242 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
243 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
244
245 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
246 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
247 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
248
249 /* Wait for DMA, ignore error */
250 rt2800_wait_wpdma_ready(rt2x00dev);
251
252 rt2x00usb_disable_radio(rt2x00dev); 179 rt2x00usb_disable_radio(rt2x00dev);
253} 180}
254 181
@@ -320,21 +247,19 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
320/* 247/*
321 * TX descriptor initialization 248 * TX descriptor initialization
322 */ 249 */
323static void rt2800usb_write_tx_data(struct queue_entry* entry, 250static __le32 *rt2800usb_get_txwi(struct queue_entry *entry)
324 struct txentry_desc *txdesc)
325{ 251{
326 __le32 *txwi = (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE); 252 if (entry->queue->qid == QID_BEACON)
327 253 return (__le32 *) (entry->skb->data);
328 rt2800_write_txwi(txwi, txdesc); 254 else
255 return (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE);
329} 256}
330 257
331 258static void rt2800usb_write_tx_desc(struct queue_entry *entry,
332static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
333 struct sk_buff *skb,
334 struct txentry_desc *txdesc) 259 struct txentry_desc *txdesc)
335{ 260{
336 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 261 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
337 __le32 *txi = (__le32 *) skb->data; 262 __le32 *txi = (__le32 *) entry->skb->data;
338 u32 word; 263 u32 word;
339 264
340 /* 265 /*
@@ -342,7 +267,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
342 */ 267 */
343 rt2x00_desc_read(txi, 0, &word); 268 rt2x00_desc_read(txi, 0, &word);
344 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 269 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
345 skb->len - TXINFO_DESC_SIZE); 270 entry->skb->len - TXINFO_DESC_SIZE);
346 rt2x00_set_field32(&word, TXINFO_W0_WIV, 271 rt2x00_set_field32(&word, TXINFO_W0_WIV,
347 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 272 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
348 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); 273 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -379,6 +304,46 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
379} 304}
380 305
381/* 306/*
307 * TX control handlers
308 */
309static void rt2800usb_work_txdone(struct work_struct *work)
310{
311 struct rt2x00_dev *rt2x00dev =
312 container_of(work, struct rt2x00_dev, txdone_work);
313 struct data_queue *queue;
314 struct queue_entry *entry;
315
316 rt2800_txdone(rt2x00dev);
317
318 /*
319 * Process any trailing TX status reports for IO failures,
320 * we loop until we find the first non-IO error entry. This
321 * can either be a frame which is free, is being uploaded,
322 * or has completed the upload but didn't have an entry
323 * in the TX_STAT_FIFO register yet.
324 */
325 tx_queue_for_each(rt2x00dev, queue) {
326 while (!rt2x00queue_empty(queue)) {
327 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
328
329 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
330 !test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
331 break;
332
333 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
334 }
335 }
336}
337
338static void rt2800usb_kill_tx_queue(struct data_queue *queue)
339{
340 if (queue->qid == QID_BEACON)
341 rt2x00usb_register_write(queue->rt2x00dev, BCN_TIME_CFG, 0);
342
343 rt2x00usb_kill_tx_queue(queue);
344}
345
346/*
382 * RX control handlers 347 * RX control handlers
383 */ 348 */
384static void rt2800usb_fill_rxdone(struct queue_entry *entry, 349static void rt2800usb_fill_rxdone(struct queue_entry *entry,
@@ -514,6 +479,11 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
514 */ 479 */
515 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; 480 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
516 481
482 /*
483 * Overwrite TX done handler
484 */
485 PREPARE_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);
486
517 return 0; 487 return 0;
518} 488}
519 489
@@ -549,6 +519,7 @@ static const struct rt2800_ops rt2800usb_rt2800_ops = {
549 .regbusy_read = rt2x00usb_regbusy_read, 519 .regbusy_read = rt2x00usb_regbusy_read,
550 .drv_write_firmware = rt2800usb_write_firmware, 520 .drv_write_firmware = rt2800usb_write_firmware,
551 .drv_init_registers = rt2800usb_init_registers, 521 .drv_init_registers = rt2800usb_init_registers,
522 .drv_get_txwi = rt2800usb_get_txwi,
552}; 523};
553 524
554static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { 525static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
@@ -566,11 +537,11 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
566 .link_tuner = rt2800_link_tuner, 537 .link_tuner = rt2800_link_tuner,
567 .watchdog = rt2x00usb_watchdog, 538 .watchdog = rt2x00usb_watchdog,
568 .write_tx_desc = rt2800usb_write_tx_desc, 539 .write_tx_desc = rt2800usb_write_tx_desc,
569 .write_tx_data = rt2800usb_write_tx_data, 540 .write_tx_data = rt2800_write_tx_data,
570 .write_beacon = rt2800_write_beacon, 541 .write_beacon = rt2800_write_beacon,
571 .get_tx_data_len = rt2800usb_get_tx_data_len, 542 .get_tx_data_len = rt2800usb_get_tx_data_len,
572 .kick_tx_queue = rt2x00usb_kick_tx_queue, 543 .kick_tx_queue = rt2x00usb_kick_tx_queue,
573 .kill_tx_queue = rt2x00usb_kill_tx_queue, 544 .kill_tx_queue = rt2800usb_kill_tx_queue,
574 .fill_rxdone = rt2800usb_fill_rxdone, 545 .fill_rxdone = rt2800usb_fill_rxdone,
575 .config_shared_key = rt2800_config_shared_key, 546 .config_shared_key = rt2800_config_shared_key,
576 .config_pairwise_key = rt2800_config_pairwise_key, 547 .config_pairwise_key = rt2800_config_pairwise_key,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index c21af38cc5af..7832a5996a8c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> 4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
4 <http://rt2x00.serialmonkey.com> 5 <http://rt2x00.serialmonkey.com>
5 6
@@ -212,8 +213,9 @@ struct channel_info {
212 unsigned int flags; 213 unsigned int flags;
213#define GEOGRAPHY_ALLOWED 0x00000001 214#define GEOGRAPHY_ALLOWED 0x00000001
214 215
215 short tx_power1; 216 short max_power;
216 short tx_power2; 217 short default_power1;
218 short default_power2;
217}; 219};
218 220
219/* 221/*
@@ -558,18 +560,15 @@ struct rt2x00lib_ops {
558 /* 560 /*
559 * TX control handlers 561 * TX control handlers
560 */ 562 */
561 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, 563 void (*write_tx_desc) (struct queue_entry *entry,
562 struct sk_buff *skb,
563 struct txentry_desc *txdesc); 564 struct txentry_desc *txdesc);
564 void (*write_tx_data) (struct queue_entry *entry, 565 void (*write_tx_data) (struct queue_entry *entry,
565 struct txentry_desc *txdesc); 566 struct txentry_desc *txdesc);
566 void (*write_beacon) (struct queue_entry *entry, 567 void (*write_beacon) (struct queue_entry *entry,
567 struct txentry_desc *txdesc); 568 struct txentry_desc *txdesc);
568 int (*get_tx_data_len) (struct queue_entry *entry); 569 int (*get_tx_data_len) (struct queue_entry *entry);
569 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, 570 void (*kick_tx_queue) (struct data_queue *queue);
570 const enum data_queue_qid queue); 571 void (*kill_tx_queue) (struct data_queue *queue);
571 void (*kill_tx_queue) (struct rt2x00_dev *rt2x00dev,
572 const enum data_queue_qid queue);
573 572
574 /* 573 /*
575 * RX control handlers 574 * RX control handlers
@@ -597,7 +596,8 @@ struct rt2x00lib_ops {
597#define CONFIG_UPDATE_BSSID ( 1 << 3 ) 596#define CONFIG_UPDATE_BSSID ( 1 << 3 )
598 597
599 void (*config_erp) (struct rt2x00_dev *rt2x00dev, 598 void (*config_erp) (struct rt2x00_dev *rt2x00dev,
600 struct rt2x00lib_erp *erp); 599 struct rt2x00lib_erp *erp,
600 u32 changed);
601 void (*config_ant) (struct rt2x00_dev *rt2x00dev, 601 void (*config_ant) (struct rt2x00_dev *rt2x00dev,
602 struct antenna_setup *ant); 602 struct antenna_setup *ant);
603 void (*config) (struct rt2x00_dev *rt2x00dev, 603 void (*config) (struct rt2x00_dev *rt2x00dev,
@@ -698,6 +698,7 @@ struct rt2x00_dev {
698 struct ieee80211_hw *hw; 698 struct ieee80211_hw *hw;
699 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 699 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
700 enum ieee80211_band curr_band; 700 enum ieee80211_band curr_band;
701 int curr_freq;
701 702
702 /* 703 /*
703 * If enabled, the debugfs interface structures 704 * If enabled, the debugfs interface structures
@@ -850,11 +851,6 @@ struct rt2x00_dev {
850 struct ieee80211_low_level_stats low_level_stats; 851 struct ieee80211_low_level_stats low_level_stats;
851 852
852 /* 853 /*
853 * RX configuration information.
854 */
855 struct ieee80211_rx_status rx_status;
856
857 /*
858 * Scheduled work. 854 * Scheduled work.
859 * NOTE: intf_work will use ieee80211_iterate_active_interfaces() 855 * NOTE: intf_work will use ieee80211_iterate_active_interfaces()
860 * which means it cannot be placed on the hw->workqueue 856 * which means it cannot be placed on the hw->workqueue
@@ -862,6 +858,12 @@ struct rt2x00_dev {
862 */ 858 */
863 struct work_struct intf_work; 859 struct work_struct intf_work;
864 860
861 /**
862 * Scheduled work for TX/RX done handling (USB devices)
863 */
864 struct work_struct rxdone_work;
865 struct work_struct txdone_work;
866
865 /* 867 /*
866 * Data queue arrays for RX, TX and Beacon. 868 * Data queue arrays for RX, TX and Beacon.
867 * The Beacon array also contains the Atim queue 869 * The Beacon array also contains the Atim queue
@@ -1069,8 +1071,10 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
1069 */ 1071 */
1070void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev); 1072void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
1071void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev); 1073void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev);
1074void rt2x00lib_dmadone(struct queue_entry *entry);
1072void rt2x00lib_txdone(struct queue_entry *entry, 1075void rt2x00lib_txdone(struct queue_entry *entry,
1073 struct txdone_entry_desc *txdesc); 1076 struct txdone_entry_desc *txdesc);
1077void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status);
1074void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev, 1078void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
1075 struct queue_entry *entry); 1079 struct queue_entry *entry);
1076 1080
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 953dc4f2c6af..4c7ff765a8bf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -81,7 +81,8 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
81 81
82void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev, 82void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
83 struct rt2x00_intf *intf, 83 struct rt2x00_intf *intf,
84 struct ieee80211_bss_conf *bss_conf) 84 struct ieee80211_bss_conf *bss_conf,
85 u32 changed)
85{ 86{
86 struct rt2x00lib_erp erp; 87 struct rt2x00lib_erp erp;
87 88
@@ -102,7 +103,7 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
102 /* Update global beacon interval time, this is needed for PS support */ 103 /* Update global beacon interval time, this is needed for PS support */
103 rt2x00dev->beacon_int = bss_conf->beacon_int; 104 rt2x00dev->beacon_int = bss_conf->beacon_int;
104 105
105 rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp); 106 rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp, changed);
106} 107}
107 108
108static inline 109static inline
@@ -126,25 +127,17 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
126 * ANTENNA_SW_DIVERSITY state to the driver. 127 * ANTENNA_SW_DIVERSITY state to the driver.
127 * If that happens, fallback to hardware defaults, 128 * If that happens, fallback to hardware defaults,
128 * or our own default. 129 * or our own default.
129 * If diversity handling is active for a particular antenna,
130 * we shouldn't overwrite that antenna.
131 * The calls to rt2x00lib_config_antenna_check()
132 * might have caused that we restore back to the already
133 * active setting. If that has happened we can quit.
134 */ 130 */
135 if (!(ant->flags & ANTENNA_RX_DIVERSITY)) 131 if (!(ant->flags & ANTENNA_RX_DIVERSITY))
136 config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx); 132 config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx);
137 else 133 else if(config.rx == ANTENNA_SW_DIVERSITY)
138 config.rx = active->rx; 134 config.rx = active->rx;
139 135
140 if (!(ant->flags & ANTENNA_TX_DIVERSITY)) 136 if (!(ant->flags & ANTENNA_TX_DIVERSITY))
141 config.tx = rt2x00lib_config_antenna_check(config.tx, def->tx); 137 config.tx = rt2x00lib_config_antenna_check(config.tx, def->tx);
142 else 138 else if (config.tx == ANTENNA_SW_DIVERSITY)
143 config.tx = active->tx; 139 config.tx = active->tx;
144 140
145 if (config.rx == active->rx && config.tx == active->tx)
146 return;
147
148 /* 141 /*
149 * Antenna setup changes require the RX to be disabled, 142 * Antenna setup changes require the RX to be disabled,
150 * else the changes will be ignored by the device. 143 * else the changes will be ignored by the device.
@@ -209,10 +202,8 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
209 rt2x00link_reset_tuner(rt2x00dev, false); 202 rt2x00link_reset_tuner(rt2x00dev, false);
210 203
211 rt2x00dev->curr_band = conf->channel->band; 204 rt2x00dev->curr_band = conf->channel->band;
205 rt2x00dev->curr_freq = conf->channel->center_freq;
212 rt2x00dev->tx_power = conf->power_level; 206 rt2x00dev->tx_power = conf->power_level;
213 rt2x00dev->short_retry = conf->short_frame_max_tx_count; 207 rt2x00dev->short_retry = conf->short_frame_max_tx_count;
214 rt2x00dev->long_retry = conf->long_frame_max_tx_count; 208 rt2x00dev->long_retry = conf->long_frame_max_tx_count;
215
216 rt2x00dev->rx_status.band = conf->channel->band;
217 rt2x00dev->rx_status.freq = conf->channel->center_freq;
218} 209}
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 583dacd8d241..5e9074bf2b8e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -31,15 +31,14 @@
31 31
32enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key) 32enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
33{ 33{
34 switch (key->alg) { 34 switch (key->cipher) {
35 case ALG_WEP: 35 case WLAN_CIPHER_SUITE_WEP40:
36 if (key->keylen == WLAN_KEY_LEN_WEP40) 36 return CIPHER_WEP64;
37 return CIPHER_WEP64; 37 case WLAN_CIPHER_SUITE_WEP104:
38 else 38 return CIPHER_WEP128;
39 return CIPHER_WEP128; 39 case WLAN_CIPHER_SUITE_TKIP:
40 case ALG_TKIP:
41 return CIPHER_TKIP; 40 return CIPHER_TKIP;
42 case ALG_CCMP: 41 case WLAN_CIPHER_SUITE_CCMP:
43 return CIPHER_AES; 42 return CIPHER_AES;
44 default: 43 default:
45 return CIPHER_NONE; 44 return CIPHER_NONE;
@@ -95,7 +94,7 @@ unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
95 overhead += key->iv_len; 94 overhead += key->iv_len;
96 95
97 if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { 96 if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
98 if (key->alg == ALG_TKIP) 97 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
99 overhead += 8; 98 overhead += 8;
100 } 99 }
101 100
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index b0498e7e7aae..c1710b27ba70 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -333,12 +333,12 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
333 if (*offset) 333 if (*offset)
334 return 0; 334 return 0;
335 335
336 data = kzalloc(lines * MAX_LINE_LENGTH, GFP_KERNEL); 336 data = kcalloc(lines, MAX_LINE_LENGTH, GFP_KERNEL);
337 if (!data) 337 if (!data)
338 return -ENOMEM; 338 return -ENOMEM;
339 339
340 temp = data + 340 temp = data +
341 sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdone\tcrypto\n"); 341 sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdma done\tdone\n");
342 342
343 queue_for_each(intf->rt2x00dev, queue) { 343 queue_for_each(intf->rt2x00dev, queue) {
344 spin_lock_irqsave(&queue->lock, irqflags); 344 spin_lock_irqsave(&queue->lock, irqflags);
@@ -346,8 +346,8 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
346 temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid, 346 temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid,
347 queue->count, queue->limit, queue->length, 347 queue->count, queue->limit, queue->length,
348 queue->index[Q_INDEX], 348 queue->index[Q_INDEX],
349 queue->index[Q_INDEX_DONE], 349 queue->index[Q_INDEX_DMA_DONE],
350 queue->index[Q_INDEX_CRYPTO]); 350 queue->index[Q_INDEX_DONE]);
351 351
352 spin_unlock_irqrestore(&queue->lock, irqflags); 352 spin_unlock_irqrestore(&queue->lock, irqflags);
353 } 353 }
@@ -380,7 +380,7 @@ static ssize_t rt2x00debug_read_crypto_stats(struct file *file,
380 loff_t *offset) 380 loff_t *offset)
381{ 381{
382 struct rt2x00debug_intf *intf = file->private_data; 382 struct rt2x00debug_intf *intf = file->private_data;
383 char *name[] = { "WEP64", "WEP128", "TKIP", "AES" }; 383 static const char * const name[] = { "WEP64", "WEP128", "TKIP", "AES" };
384 char *data; 384 char *data;
385 char *temp; 385 char *temp;
386 size_t size; 386 size_t size;
@@ -481,6 +481,9 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \
481 if (index >= debug->__name.word_count) \ 481 if (index >= debug->__name.word_count) \
482 return -EINVAL; \ 482 return -EINVAL; \
483 \ 483 \
484 if (length > sizeof(line)) \
485 return -EINVAL; \
486 \
484 if (copy_from_user(line, buf, length)) \ 487 if (copy_from_user(line, buf, length)) \
485 return -EFAULT; \ 488 return -EFAULT; \
486 \ 489 \
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 585e8166f22a..053fdd3bd720 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 4 <http://rt2x00.serialmonkey.com>
4 5
5 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
@@ -250,6 +251,12 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
250} 251}
251EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt); 252EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
252 253
254void rt2x00lib_dmadone(struct queue_entry *entry)
255{
256 rt2x00queue_index_inc(entry->queue, Q_INDEX_DMA_DONE);
257}
258EXPORT_SYMBOL_GPL(rt2x00lib_dmadone);
259
253void rt2x00lib_txdone(struct queue_entry *entry, 260void rt2x00lib_txdone(struct queue_entry *entry,
254 struct txdone_entry_desc *txdesc) 261 struct txdone_entry_desc *txdesc)
255{ 262{
@@ -383,15 +390,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
383 * send the status report back. 390 * send the status report back.
384 */ 391 */
385 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) 392 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211))
386 /* 393 ieee80211_tx_status(rt2x00dev->hw, entry->skb);
387 * Only PCI and SOC devices process the tx status in process
388 * context. Hence use ieee80211_tx_status for PCI and SOC
389 * devices and stick to ieee80211_tx_status_irqsafe for USB.
390 */
391 if (rt2x00_is_usb(rt2x00dev))
392 ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb);
393 else
394 ieee80211_tx_status(rt2x00dev->hw, entry->skb);
395 else 394 else
396 dev_kfree_skb_any(entry->skb); 395 dev_kfree_skb_any(entry->skb);
397 396
@@ -403,7 +402,6 @@ void rt2x00lib_txdone(struct queue_entry *entry,
403 402
404 rt2x00dev->ops->lib->clear_entry(entry); 403 rt2x00dev->ops->lib->clear_entry(entry);
405 404
406 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
407 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); 405 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
408 406
409 /* 407 /*
@@ -416,6 +414,18 @@ void rt2x00lib_txdone(struct queue_entry *entry,
416} 414}
417EXPORT_SYMBOL_GPL(rt2x00lib_txdone); 415EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
418 416
417void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status)
418{
419 struct txdone_entry_desc txdesc;
420
421 txdesc.flags = 0;
422 __set_bit(status, &txdesc.flags);
423 txdesc.retry = 0;
424
425 rt2x00lib_txdone(entry, &txdesc);
426}
427EXPORT_SYMBOL_GPL(rt2x00lib_txdone_noinfo);
428
419static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, 429static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
420 struct rxdone_entry_desc *rxdesc) 430 struct rxdone_entry_desc *rxdesc)
421{ 431{
@@ -460,9 +470,13 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
460{ 470{
461 struct rxdone_entry_desc rxdesc; 471 struct rxdone_entry_desc rxdesc;
462 struct sk_buff *skb; 472 struct sk_buff *skb;
463 struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status; 473 struct ieee80211_rx_status *rx_status;
464 unsigned int header_length; 474 unsigned int header_length;
465 int rate_idx; 475 int rate_idx;
476
477 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
478 goto submit_entry;
479
466 /* 480 /*
467 * Allocate a new sk_buffer. If no new buffer available, drop the 481 * Allocate a new sk_buffer. If no new buffer available, drop the
468 * received frame and reuse the existing buffer. 482 * received frame and reuse the existing buffer.
@@ -527,39 +541,32 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
527 */ 541 */
528 rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc); 542 rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
529 rt2x00debug_update_crypto(rt2x00dev, &rxdesc); 543 rt2x00debug_update_crypto(rt2x00dev, &rxdesc);
544 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
530 545
546 /*
547 * Initialize RX status information, and send frame
548 * to mac80211.
549 */
550 rx_status = IEEE80211_SKB_RXCB(entry->skb);
531 rx_status->mactime = rxdesc.timestamp; 551 rx_status->mactime = rxdesc.timestamp;
552 rx_status->band = rt2x00dev->curr_band;
553 rx_status->freq = rt2x00dev->curr_freq;
532 rx_status->rate_idx = rate_idx; 554 rx_status->rate_idx = rate_idx;
533 rx_status->signal = rxdesc.rssi; 555 rx_status->signal = rxdesc.rssi;
534 rx_status->flag = rxdesc.flags; 556 rx_status->flag = rxdesc.flags;
535 rx_status->antenna = rt2x00dev->link.ant.active.rx; 557 rx_status->antenna = rt2x00dev->link.ant.active.rx;
536 558
537 /* 559 ieee80211_rx_ni(rt2x00dev->hw, entry->skb);
538 * Send frame to mac80211 & debugfs.
539 * mac80211 will clean up the skb structure.
540 */
541 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
542 memcpy(IEEE80211_SKB_RXCB(entry->skb), rx_status, sizeof(*rx_status));
543
544 /*
545 * Currently only PCI and SOC devices handle rx interrupts in process
546 * context. Hence, use ieee80211_rx_irqsafe for USB and ieee80211_rx_ni
547 * for PCI and SOC devices.
548 */
549 if (rt2x00_is_usb(rt2x00dev))
550 ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb);
551 else
552 ieee80211_rx_ni(rt2x00dev->hw, entry->skb);
553 560
554 /* 561 /*
555 * Replace the skb with the freshly allocated one. 562 * Replace the skb with the freshly allocated one.
556 */ 563 */
557 entry->skb = skb; 564 entry->skb = skb;
558 entry->flags = 0;
559 565
566submit_entry:
560 rt2x00dev->ops->lib->clear_entry(entry); 567 rt2x00dev->ops->lib->clear_entry(entry);
561
562 rt2x00queue_index_inc(entry->queue, Q_INDEX); 568 rt2x00queue_index_inc(entry->queue, Q_INDEX);
569 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
563} 570}
564EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); 571EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
565 572
@@ -710,7 +717,7 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
710 for (i = 0; i < spec->num_channels; i++) { 717 for (i = 0; i < spec->num_channels; i++) {
711 rt2x00lib_channel(&channels[i], 718 rt2x00lib_channel(&channels[i],
712 spec->channels[i].channel, 719 spec->channels[i].channel,
713 spec->channels_info[i].tx_power1, i); 720 spec->channels_info[i].max_power, i);
714 } 721 }
715 722
716 /* 723 /*
@@ -1017,6 +1024,8 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1017 * Stop all work. 1024 * Stop all work.
1018 */ 1025 */
1019 cancel_work_sync(&rt2x00dev->intf_work); 1026 cancel_work_sync(&rt2x00dev->intf_work);
1027 cancel_work_sync(&rt2x00dev->rxdone_work);
1028 cancel_work_sync(&rt2x00dev->txdone_work);
1020 1029
1021 /* 1030 /*
1022 * Uninitialize device. 1031 * Uninitialize device.
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index b818a43c4672..f0e1eb72befc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -63,6 +63,9 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
63 63
64 INFO(rt2x00dev, "Firmware detected - version: %d.%d.\n", 64 INFO(rt2x00dev, "Firmware detected - version: %d.%d.\n",
65 fw->data[fw->size - 4], fw->data[fw->size - 3]); 65 fw->data[fw->size - 4], fw->data[fw->size - 3]);
66 snprintf(rt2x00dev->hw->wiphy->fw_version,
67 sizeof(rt2x00dev->hw->wiphy->fw_version), "%d.%d",
68 fw->data[fw->size - 4], fw->data[fw->size - 3]);
66 69
67 retval = rt2x00dev->ops->lib->check_firmware(rt2x00dev, fw->data, fw->size); 70 retval = rt2x00dev->ops->lib->check_firmware(rt2x00dev, fw->data, fw->size);
68 switch (retval) { 71 switch (retval) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index c004cd3a8847..ad3c7ff4837b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -54,6 +54,16 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
54 */ 54 */
55 if (txrate->flags & IEEE80211_TX_RC_MCS) { 55 if (txrate->flags & IEEE80211_TX_RC_MCS) {
56 txdesc->mcs = txrate->idx; 56 txdesc->mcs = txrate->idx;
57
58 /*
59 * MIMO PS should be set to 1 for STA's using dynamic SM PS
60 * when using more then one tx stream (>MCS7).
61 */
62 if (tx_info->control.sta && txdesc->mcs > 7 &&
63 (tx_info->control.sta->ht_cap.cap &
64 (WLAN_HT_CAP_SM_PS_DYNAMIC <<
65 IEEE80211_HT_CAP_SM_PS_SHIFT)))
66 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
57 } else { 67 } else {
58 txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs); 68 txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
59 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 69 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index dc5c6574aaf4..70c85ac2e53e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -86,7 +86,8 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
86 const u8 *mac, const u8 *bssid); 86 const u8 *mac, const u8 *bssid);
87void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev, 87void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
88 struct rt2x00_intf *intf, 88 struct rt2x00_intf *intf,
89 struct ieee80211_bss_conf *conf); 89 struct ieee80211_bss_conf *conf,
90 u32 changed);
90void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, 91void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
91 struct antenna_setup ant); 92 struct antenna_setup ant);
92void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, 93void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 666cef3f8472..4d534e9dc628 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -188,7 +188,6 @@ static void rt2x00lib_antenna_diversity_eval(struct rt2x00_dev *rt2x00dev)
188static bool rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev) 188static bool rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev)
189{ 189{
190 struct link_ant *ant = &rt2x00dev->link.ant; 190 struct link_ant *ant = &rt2x00dev->link.ant;
191 unsigned int flags = ant->flags;
192 191
193 /* 192 /*
194 * Determine if software diversity is enabled for 193 * Determine if software diversity is enabled for
@@ -196,13 +195,13 @@ static bool rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev)
196 * Always perform this check since within the link 195 * Always perform this check since within the link
197 * tuner interval the configuration might have changed. 196 * tuner interval the configuration might have changed.
198 */ 197 */
199 flags &= ~ANTENNA_RX_DIVERSITY; 198 ant->flags &= ~ANTENNA_RX_DIVERSITY;
200 flags &= ~ANTENNA_TX_DIVERSITY; 199 ant->flags &= ~ANTENNA_TX_DIVERSITY;
201 200
202 if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) 201 if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY)
203 flags |= ANTENNA_RX_DIVERSITY; 202 ant->flags |= ANTENNA_RX_DIVERSITY;
204 if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) 203 if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY)
205 flags |= ANTENNA_TX_DIVERSITY; 204 ant->flags |= ANTENNA_TX_DIVERSITY;
206 205
207 if (!(ant->flags & ANTENNA_RX_DIVERSITY) && 206 if (!(ant->flags & ANTENNA_RX_DIVERSITY) &&
208 !(ant->flags & ANTENNA_TX_DIVERSITY)) { 207 !(ant->flags & ANTENNA_TX_DIVERSITY)) {
@@ -210,9 +209,6 @@ static bool rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev)
210 return true; 209 return true;
211 } 210 }
212 211
213 /* Update flags */
214 ant->flags = flags;
215
216 /* 212 /*
217 * If we have only sampled the data over the last period 213 * If we have only sampled the data over the last period
218 * we should now harvest the data. Otherwise just evaluate 214 * we should now harvest the data. Otherwise just evaluate
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 235e037e6509..7862a840984a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -669,8 +669,10 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
669 * When the erp information has changed, we should perform 669 * When the erp information has changed, we should perform
670 * additional configuration steps. For all other changes we are done. 670 * additional configuration steps. For all other changes we are done.
671 */ 671 */
672 if (changes & ~(BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) 672 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE |
673 rt2x00lib_config_erp(rt2x00dev, intf, bss_conf); 673 BSS_CHANGED_ERP_SLOT | BSS_CHANGED_BASIC_RATES |
674 BSS_CHANGED_BEACON_INT))
675 rt2x00lib_config_erp(rt2x00dev, intf, bss_conf, changes);
674} 676}
675EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed); 677EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed);
676 678
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a3401d301058..eede99939db9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com> 4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
4 <http://rt2x00.serialmonkey.com> 5 <http://rt2x00.serialmonkey.com>
5 6
@@ -311,7 +312,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
311 /* 312 /*
312 * Initialize information from queue 313 * Initialize information from queue
313 */ 314 */
314 txdesc->queue = entry->queue->qid; 315 txdesc->qid = entry->queue->qid;
315 txdesc->cw_min = entry->queue->cw_min; 316 txdesc->cw_min = entry->queue->cw_min;
316 txdesc->cw_max = entry->queue->cw_max; 317 txdesc->cw_max = entry->queue->cw_max;
317 txdesc->aifs = entry->queue->aifs; 318 txdesc->aifs = entry->queue->aifs;
@@ -448,15 +449,14 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
448 struct txentry_desc *txdesc) 449 struct txentry_desc *txdesc)
449{ 450{
450 struct data_queue *queue = entry->queue; 451 struct data_queue *queue = entry->queue;
451 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
452 452
453 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc); 453 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
454 454
455 /* 455 /*
456 * All processing on the frame has been completed, this means 456 * All processing on the frame has been completed, this means
457 * it is now ready to be dumped to userspace through debugfs. 457 * it is now ready to be dumped to userspace through debugfs.
458 */ 458 */
459 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb); 459 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
460} 460}
461 461
462static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, 462static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
@@ -476,7 +476,7 @@ static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
476 */ 476 */
477 if (rt2x00queue_threshold(queue) || 477 if (rt2x00queue_threshold(queue) ||
478 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) 478 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
479 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid); 479 rt2x00dev->ops->lib->kick_tx_queue(queue);
480} 480}
481 481
482int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 482int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
@@ -590,7 +590,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
590 intf->beacon->skb = NULL; 590 intf->beacon->skb = NULL;
591 591
592 if (!enable_beacon) { 592 if (!enable_beacon) {
593 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON); 593 rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue);
594 mutex_unlock(&intf->beacon_skb_mutex); 594 mutex_unlock(&intf->beacon_skb_mutex);
595 return 0; 595 return 0;
596 } 596 }
@@ -625,6 +625,51 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
625 return 0; 625 return 0;
626} 626}
627 627
628void rt2x00queue_for_each_entry(struct data_queue *queue,
629 enum queue_index start,
630 enum queue_index end,
631 void (*fn)(struct queue_entry *entry))
632{
633 unsigned long irqflags;
634 unsigned int index_start;
635 unsigned int index_end;
636 unsigned int i;
637
638 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
639 ERROR(queue->rt2x00dev,
640 "Entry requested from invalid index range (%d - %d)\n",
641 start, end);
642 return;
643 }
644
645 /*
646 * Only protect the range we are going to loop over,
647 * if during our loop a extra entry is set to pending
648 * it should not be kicked during this run, since it
649 * is part of another TX operation.
650 */
651 spin_lock_irqsave(&queue->lock, irqflags);
652 index_start = queue->index[start];
653 index_end = queue->index[end];
654 spin_unlock_irqrestore(&queue->lock, irqflags);
655
656 /*
657 * Start from the TX done pointer, this guarentees that we will
658 * send out all frames in the correct order.
659 */
660 if (index_start < index_end) {
661 for (i = index_start; i < index_end; i++)
662 fn(&queue->entries[i]);
663 } else {
664 for (i = index_start; i < queue->limit; i++)
665 fn(&queue->entries[i]);
666
667 for (i = 0; i < index_end; i++)
668 fn(&queue->entries[i]);
669 }
670}
671EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
672
628struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, 673struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
629 const enum data_queue_qid queue) 674 const enum data_queue_qid queue)
630{ 675{
@@ -686,13 +731,13 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
686 if (queue->index[index] >= queue->limit) 731 if (queue->index[index] >= queue->limit)
687 queue->index[index] = 0; 732 queue->index[index] = 0;
688 733
734 queue->last_action[index] = jiffies;
735
689 if (index == Q_INDEX) { 736 if (index == Q_INDEX) {
690 queue->length++; 737 queue->length++;
691 queue->last_index = jiffies;
692 } else if (index == Q_INDEX_DONE) { 738 } else if (index == Q_INDEX_DONE) {
693 queue->length--; 739 queue->length--;
694 queue->count++; 740 queue->count++;
695 queue->last_index_done = jiffies;
696 } 741 }
697 742
698 spin_unlock_irqrestore(&queue->lock, irqflags); 743 spin_unlock_irqrestore(&queue->lock, irqflags);
@@ -701,14 +746,17 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
701static void rt2x00queue_reset(struct data_queue *queue) 746static void rt2x00queue_reset(struct data_queue *queue)
702{ 747{
703 unsigned long irqflags; 748 unsigned long irqflags;
749 unsigned int i;
704 750
705 spin_lock_irqsave(&queue->lock, irqflags); 751 spin_lock_irqsave(&queue->lock, irqflags);
706 752
707 queue->count = 0; 753 queue->count = 0;
708 queue->length = 0; 754 queue->length = 0;
709 queue->last_index = jiffies; 755
710 queue->last_index_done = jiffies; 756 for (i = 0; i < Q_INDEX_MAX; i++) {
711 memset(queue->index, 0, sizeof(queue->index)); 757 queue->index[i] = 0;
758 queue->last_action[i] = jiffies;
759 }
712 760
713 spin_unlock_irqrestore(&queue->lock, irqflags); 761 spin_unlock_irqrestore(&queue->lock, irqflags);
714} 762}
@@ -718,7 +766,7 @@ void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
718 struct data_queue *queue; 766 struct data_queue *queue;
719 767
720 txall_queue_for_each(rt2x00dev, queue) 768 txall_queue_for_each(rt2x00dev, queue)
721 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid); 769 rt2x00dev->ops->lib->kill_tx_queue(queue);
722} 770}
723 771
724void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) 772void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
@@ -730,9 +778,9 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
730 rt2x00queue_reset(queue); 778 rt2x00queue_reset(queue);
731 779
732 for (i = 0; i < queue->limit; i++) { 780 for (i = 0; i < queue->limit; i++) {
733 queue->entries[i].flags = 0;
734
735 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); 781 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
782 if (queue->qid == QID_RX)
783 rt2x00queue_index_inc(queue, Q_INDEX);
736 } 784 }
737 } 785 }
738} 786}
@@ -755,7 +803,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
755 * Allocate all queue entries. 803 * Allocate all queue entries.
756 */ 804 */
757 entry_size = sizeof(*entries) + qdesc->priv_size; 805 entry_size = sizeof(*entries) + qdesc->priv_size;
758 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL); 806 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
759 if (!entries) 807 if (!entries)
760 return -ENOMEM; 808 return -ENOMEM;
761 809
@@ -891,7 +939,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
891 */ 939 */
892 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; 940 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
893 941
894 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL); 942 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
895 if (!queue) { 943 if (!queue) {
896 ERROR(rt2x00dev, "Queue allocation failed.\n"); 944 ERROR(rt2x00dev, "Queue allocation failed.\n");
897 return -ENOMEM; 945 return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 191e7775a9c0..d81d85f34866 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -268,6 +268,7 @@ struct txdone_entry_desc {
268 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU. 268 * @ENTRY_TXD_HT_AMPDU: This frame is part of an AMPDU.
269 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth. 269 * @ENTRY_TXD_HT_BW_40: Use 40MHz Bandwidth.
270 * @ENTRY_TXD_HT_SHORT_GI: Use short GI. 270 * @ENTRY_TXD_HT_SHORT_GI: Use short GI.
271 * @ENTRY_TXD_HT_MIMO_PS: The receiving STA is in dynamic SM PS mode.
271 */ 272 */
272enum txentry_desc_flags { 273enum txentry_desc_flags {
273 ENTRY_TXD_RTS_FRAME, 274 ENTRY_TXD_RTS_FRAME,
@@ -286,6 +287,7 @@ enum txentry_desc_flags {
286 ENTRY_TXD_HT_AMPDU, 287 ENTRY_TXD_HT_AMPDU,
287 ENTRY_TXD_HT_BW_40, 288 ENTRY_TXD_HT_BW_40,
288 ENTRY_TXD_HT_SHORT_GI, 289 ENTRY_TXD_HT_SHORT_GI,
290 ENTRY_TXD_HT_MIMO_PS,
289}; 291};
290 292
291/** 293/**
@@ -294,7 +296,7 @@ enum txentry_desc_flags {
294 * Summary of information for the frame descriptor before sending a TX frame. 296 * Summary of information for the frame descriptor before sending a TX frame.
295 * 297 *
296 * @flags: Descriptor flags (See &enum queue_entry_flags). 298 * @flags: Descriptor flags (See &enum queue_entry_flags).
297 * @queue: Queue identification (See &enum data_queue_qid). 299 * @qid: Queue identification (See &enum data_queue_qid).
298 * @length: Length of the entire frame. 300 * @length: Length of the entire frame.
299 * @header_length: Length of 802.11 header. 301 * @header_length: Length of 802.11 header.
300 * @length_high: PLCP length high word. 302 * @length_high: PLCP length high word.
@@ -320,7 +322,7 @@ enum txentry_desc_flags {
320struct txentry_desc { 322struct txentry_desc {
321 unsigned long flags; 323 unsigned long flags;
322 324
323 enum data_queue_qid queue; 325 enum data_queue_qid qid;
324 326
325 u16 length; 327 u16 length;
326 u16 header_length; 328 u16 header_length;
@@ -358,17 +360,17 @@ struct txentry_desc {
358 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data 360 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
359 * transfer (either TX or RX depending on the queue). The entry should 361 * transfer (either TX or RX depending on the queue). The entry should
360 * only be touched after the device has signaled it is done with it. 362 * only be touched after the device has signaled it is done with it.
361 * @ENTRY_OWNER_DEVICE_CRYPTO: This entry is owned by the device for data
362 * encryption or decryption. The entry should only be touched after
363 * the device has signaled it is done with it.
364 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting 363 * @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
365 * for the signal to start sending. 364 * for the signal to start sending.
365 * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occured
366 * while transfering the data to the hardware. No TX status report will
367 * be expected from the hardware.
366 */ 368 */
367enum queue_entry_flags { 369enum queue_entry_flags {
368 ENTRY_BCN_ASSIGNED, 370 ENTRY_BCN_ASSIGNED,
369 ENTRY_OWNER_DEVICE_DATA, 371 ENTRY_OWNER_DEVICE_DATA,
370 ENTRY_OWNER_DEVICE_CRYPTO,
371 ENTRY_DATA_PENDING, 372 ENTRY_DATA_PENDING,
373 ENTRY_DATA_IO_FAILED
372}; 374};
373 375
374/** 376/**
@@ -399,18 +401,18 @@ struct queue_entry {
399 * 401 *
400 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is 402 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
401 * owned by the hardware then the queue is considered to be full. 403 * owned by the hardware then the queue is considered to be full.
404 * @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
405 * transfered to the hardware.
402 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by 406 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
403 * the hardware and for which we need to run the txdone handler. If this 407 * the hardware and for which we need to run the txdone handler. If this
404 * entry is not owned by the hardware the queue is considered to be empty. 408 * entry is not owned by the hardware the queue is considered to be empty.
405 * @Q_INDEX_CRYPTO: Index pointer to the next entry which encryption/decription
406 * will be completed by the hardware next.
407 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size 409 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
408 * of the index array. 410 * of the index array.
409 */ 411 */
410enum queue_index { 412enum queue_index {
411 Q_INDEX, 413 Q_INDEX,
414 Q_INDEX_DMA_DONE,
412 Q_INDEX_DONE, 415 Q_INDEX_DONE,
413 Q_INDEX_CRYPTO,
414 Q_INDEX_MAX, 416 Q_INDEX_MAX,
415}; 417};
416 418
@@ -446,13 +448,12 @@ struct data_queue {
446 enum data_queue_qid qid; 448 enum data_queue_qid qid;
447 449
448 spinlock_t lock; 450 spinlock_t lock;
449 unsigned long last_index;
450 unsigned long last_index_done;
451 unsigned int count; 451 unsigned int count;
452 unsigned short limit; 452 unsigned short limit;
453 unsigned short threshold; 453 unsigned short threshold;
454 unsigned short length; 454 unsigned short length;
455 unsigned short index[Q_INDEX_MAX]; 455 unsigned short index[Q_INDEX_MAX];
456 unsigned long last_action[Q_INDEX_MAX];
456 457
457 unsigned short txop; 458 unsigned short txop;
458 unsigned short aifs; 459 unsigned short aifs;
@@ -565,6 +566,22 @@ struct data_queue_desc {
565 queue_loop(__entry, (__dev)->tx, queue_end(__dev)) 566 queue_loop(__entry, (__dev)->tx, queue_end(__dev))
566 567
567/** 568/**
569 * rt2x00queue_for_each_entry - Loop through all entries in the queue
570 * @queue: Pointer to @data_queue
571 * @start: &enum queue_index Pointer to start index
572 * @end: &enum queue_index Pointer to end index
573 * @fn: The function to call for each &struct queue_entry
574 *
575 * This will walk through all entries in the queue, in chronological
576 * order. This means it will start at the current @start pointer
577 * and will walk through the queue until it reaches the @end pointer.
578 */
579void rt2x00queue_for_each_entry(struct data_queue *queue,
580 enum queue_index start,
581 enum queue_index end,
582 void (*fn)(struct queue_entry *entry));
583
584/**
568 * rt2x00queue_empty - Check if the queue is empty. 585 * rt2x00queue_empty - Check if the queue is empty.
569 * @queue: Queue to check if empty. 586 * @queue: Queue to check if empty.
570 */ 587 */
@@ -601,12 +618,23 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
601} 618}
602 619
603/** 620/**
604 * rt2x00queue_timeout - Check if a timeout occured for this queue 621 * rt2x00queue_timeout - Check if a timeout occured for STATUS reorts
605 * @queue: Queue to check. 622 * @queue: Queue to check.
606 */ 623 */
607static inline int rt2x00queue_timeout(struct data_queue *queue) 624static inline int rt2x00queue_timeout(struct data_queue *queue)
608{ 625{
609 return time_after(queue->last_index, queue->last_index_done + (HZ / 10)); 626 return time_after(queue->last_action[Q_INDEX_DMA_DONE],
627 queue->last_action[Q_INDEX_DONE] + (HZ / 10));
628}
629
630/**
631 * rt2x00queue_timeout - Check if a timeout occured for DMA transfers
632 * @queue: Queue to check.
633 */
634static inline int rt2x00queue_dma_timeout(struct data_queue *queue)
635{
636 return time_after(queue->last_action[Q_INDEX],
637 queue->last_action[Q_INDEX_DMA_DONE] + (HZ / 10));
610} 638}
611 639
612/** 640/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index ff3a36622d1b..4c5ae3d45625 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 4 <http://rt2x00.serialmonkey.com>
4 5
5 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
@@ -167,137 +168,142 @@ EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
167/* 168/*
168 * TX data handlers. 169 * TX data handlers.
169 */ 170 */
170static void rt2x00usb_interrupt_txdone(struct urb *urb) 171static void rt2x00usb_work_txdone_entry(struct queue_entry *entry)
171{ 172{
172 struct queue_entry *entry = (struct queue_entry *)urb->context;
173 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
174 struct txdone_entry_desc txdesc;
175
176 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
177 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
178 return;
179
180 /* 173 /*
181 * Obtain the status about this packet. 174 * If the transfer to hardware succeeded, it does not mean the
182 * Note that when the status is 0 it does not mean the
183 * frame was send out correctly. It only means the frame 175 * frame was send out correctly. It only means the frame
184 * was succesfully pushed to the hardware, we have no 176 * was succesfully pushed to the hardware, we have no
185 * way to determine the transmission status right now. 177 * way to determine the transmission status right now.
186 * (Only indirectly by looking at the failed TX counters 178 * (Only indirectly by looking at the failed TX counters
187 * in the register). 179 * in the register).
188 */ 180 */
189 txdesc.flags = 0; 181 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
190 if (!urb->status) 182 rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
191 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
192 else 183 else
193 __set_bit(TXDONE_FAILURE, &txdesc.flags); 184 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
194 txdesc.retry = 0;
195
196 rt2x00lib_txdone(entry, &txdesc);
197} 185}
198 186
199static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry) 187static void rt2x00usb_work_txdone(struct work_struct *work)
200{ 188{
201 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 189 struct rt2x00_dev *rt2x00dev =
202 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 190 container_of(work, struct rt2x00_dev, txdone_work);
203 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 191 struct data_queue *queue;
204 u32 length; 192 struct queue_entry *entry;
205 193
206 if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) { 194 tx_queue_for_each(rt2x00dev, queue) {
207 /* 195 while (!rt2x00queue_empty(queue)) {
208 * USB devices cannot blindly pass the skb->len as the 196 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
209 * length of the data to usb_fill_bulk_urb. Pass the skb
210 * to the driver to determine what the length should be.
211 */
212 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
213 197
214 usb_fill_bulk_urb(entry_priv->urb, usb_dev, 198 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
215 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), 199 break;
216 entry->skb->data, length,
217 rt2x00usb_interrupt_txdone, entry);
218 200
219 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 201 rt2x00usb_work_txdone_entry(entry);
202 }
220 } 203 }
221} 204}
222 205
223void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 206static void rt2x00usb_interrupt_txdone(struct urb *urb)
224 const enum data_queue_qid qid)
225{ 207{
226 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid); 208 struct queue_entry *entry = (struct queue_entry *)urb->context;
227 unsigned long irqflags; 209 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
228 unsigned int index; 210
229 unsigned int index_done; 211 if (!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
230 unsigned int i; 212 return;
231 213
232 /* 214 /*
233 * Only protect the range we are going to loop over, 215 * Report the frame as DMA done
234 * if during our loop a extra entry is set to pending
235 * it should not be kicked during this run, since it
236 * is part of another TX operation.
237 */ 216 */
238 spin_lock_irqsave(&queue->lock, irqflags); 217 rt2x00lib_dmadone(entry);
239 index = queue->index[Q_INDEX];
240 index_done = queue->index[Q_INDEX_DONE];
241 spin_unlock_irqrestore(&queue->lock, irqflags);
242 218
243 /* 219 /*
244 * Start from the TX done pointer, this guarentees that we will 220 * Check if the frame was correctly uploaded
245 * send out all frames in the correct order.
246 */ 221 */
247 if (index_done < index) { 222 if (urb->status)
248 for (i = index_done; i < index; i++) 223 __set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
249 rt2x00usb_kick_tx_entry(&queue->entries[i]);
250 } else {
251 for (i = index_done; i < queue->limit; i++)
252 rt2x00usb_kick_tx_entry(&queue->entries[i]);
253 224
254 for (i = 0; i < index; i++) 225 /*
255 rt2x00usb_kick_tx_entry(&queue->entries[i]); 226 * Schedule the delayed work for reading the TX status
256 } 227 * from the device.
228 */
229 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
230 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
231 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
232}
233
234static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
235{
236 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
237 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
238 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
239 u32 length;
240
241 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
242 return;
243
244 /*
245 * USB devices cannot blindly pass the skb->len as the
246 * length of the data to usb_fill_bulk_urb. Pass the skb
247 * to the driver to determine what the length should be.
248 */
249 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
250
251 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
252 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
253 entry->skb->data, length,
254 rt2x00usb_interrupt_txdone, entry);
255
256 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
257}
258
259void rt2x00usb_kick_tx_queue(struct data_queue *queue)
260{
261 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
262 rt2x00usb_kick_tx_entry);
257} 263}
258EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue); 264EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue);
259 265
260void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 266static void rt2x00usb_kill_tx_entry(struct queue_entry *entry)
261 const enum data_queue_qid qid)
262{ 267{
263 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid); 268 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
264 struct queue_entry_priv_usb *entry_priv; 269 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
265 struct queue_entry_priv_usb_bcn *bcn_priv; 270 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
266 unsigned int i; 271
267 bool kill_guard; 272 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
273 return;
274
275 usb_kill_urb(entry_priv->urb);
268 276
269 /* 277 /*
270 * When killing the beacon queue, we must also kill 278 * Kill guardian urb (if required by driver).
271 * the beacon guard byte.
272 */ 279 */
273 kill_guard = 280 if ((entry->queue->qid == QID_BEACON) &&
274 (qid == QID_BEACON) && 281 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
275 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)); 282 usb_kill_urb(bcn_priv->guardian_urb);
276 283
277 /* 284 /*
278 * Cancel all entries. 285 * We need a short delay here to wait for
286 * the URB to be canceled
279 */ 287 */
280 for (i = 0; i < queue->limit; i++) { 288 do {
281 entry_priv = queue->entries[i].priv_data; 289 udelay(100);
282 usb_kill_urb(entry_priv->urb); 290 } while (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags));
291}
283 292
284 /* 293void rt2x00usb_kill_tx_queue(struct data_queue *queue)
285 * Kill guardian urb (if required by driver). 294{
286 */ 295 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
287 if (kill_guard) { 296 rt2x00usb_kill_tx_entry);
288 bcn_priv = queue->entries[i].priv_data;
289 usb_kill_urb(bcn_priv->guardian_urb);
290 }
291 }
292} 297}
293EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue); 298EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue);
294 299
295static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue) 300static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
296{ 301{
297 struct queue_entry_priv_usb *entry_priv; 302 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
298 unsigned short threshold = queue->threshold; 303 unsigned short threshold = queue->threshold;
299 304
300 WARNING(queue->rt2x00dev, "TX queue %d timed out, invoke reset", queue->qid); 305 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
306 " invoke forced forced reset", queue->qid);
301 307
302 /* 308 /*
303 * Temporarily disable the TX queue, this will force mac80211 309 * Temporarily disable the TX queue, this will force mac80211
@@ -307,20 +313,33 @@ static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
307 * queue from being enabled during the txdone handler. 313 * queue from being enabled during the txdone handler.
308 */ 314 */
309 queue->threshold = queue->limit; 315 queue->threshold = queue->limit;
310 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); 316 ieee80211_stop_queue(rt2x00dev->hw, queue->qid);
311 317
312 /* 318 /*
313 * Reset all currently uploaded TX frames. 319 * Kill all entries in the queue, afterwards we need to
320 * wait a bit for all URBs to be cancelled.
314 */ 321 */
315 while (!rt2x00queue_empty(queue)) { 322 rt2x00usb_kill_tx_queue(queue);
316 entry_priv = rt2x00queue_get_entry(queue, Q_INDEX_DONE)->priv_data;
317 usb_kill_urb(entry_priv->urb);
318 323
319 /* 324 /*
320 * We need a short delay here to wait for 325 * In case that a driver has overriden the txdone_work
321 * the URB to be canceled and invoked the tx_done handler. 326 * function, we invoke the TX done through there.
322 */ 327 */
323 udelay(200); 328 rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);
329
330 /*
331 * Security measure: if the driver did override the
332 * txdone_work function, and the hardware did arrive
333 * in a state which causes it to malfunction, it is
334 * possible that the driver couldn't handle the txdone
335 * event correctly. So after giving the driver the
336 * chance to cleanup, we now force a cleanup of any
337 * leftovers.
338 */
339 if (!rt2x00queue_empty(queue)) {
340 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
341 " status handling failed, invoke hard reset", queue->qid);
342 rt2x00usb_work_txdone(&rt2x00dev->txdone_work);
324 } 343 }
325 344
326 /* 345 /*
@@ -328,7 +347,15 @@ static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
328 * queue again. 347 * queue again.
329 */ 348 */
330 queue->threshold = threshold; 349 queue->threshold = threshold;
331 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); 350 ieee80211_wake_queue(rt2x00dev->hw, queue->qid);
351}
352
353static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
354{
355 WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
356 " invoke forced tx handler", queue->qid);
357
358 ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
332} 359}
333 360
334void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) 361void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -336,8 +363,10 @@ void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
336 struct data_queue *queue; 363 struct data_queue *queue;
337 364
338 tx_queue_for_each(rt2x00dev, queue) { 365 tx_queue_for_each(rt2x00dev, queue) {
366 if (rt2x00queue_dma_timeout(queue))
367 rt2x00usb_watchdog_tx_dma(queue);
339 if (rt2x00queue_timeout(queue)) 368 if (rt2x00queue_timeout(queue))
340 rt2x00usb_watchdog_reset_tx(queue); 369 rt2x00usb_watchdog_tx_status(queue);
341 } 370 }
342} 371}
343EXPORT_SYMBOL_GPL(rt2x00usb_watchdog); 372EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
@@ -345,38 +374,62 @@ EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
345/* 374/*
346 * RX data handlers. 375 * RX data handlers.
347 */ 376 */
377static void rt2x00usb_work_rxdone(struct work_struct *work)
378{
379 struct rt2x00_dev *rt2x00dev =
380 container_of(work, struct rt2x00_dev, rxdone_work);
381 struct queue_entry *entry;
382 struct skb_frame_desc *skbdesc;
383 u8 rxd[32];
384
385 while (!rt2x00queue_empty(rt2x00dev->rx)) {
386 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
387
388 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
389 break;
390
391 /*
392 * Fill in desc fields of the skb descriptor
393 */
394 skbdesc = get_skb_frame_desc(entry->skb);
395 skbdesc->desc = rxd;
396 skbdesc->desc_len = entry->queue->desc_size;
397
398 /*
399 * Send the frame to rt2x00lib for further processing.
400 */
401 rt2x00lib_rxdone(rt2x00dev, entry);
402 }
403}
404
348static void rt2x00usb_interrupt_rxdone(struct urb *urb) 405static void rt2x00usb_interrupt_rxdone(struct urb *urb)
349{ 406{
350 struct queue_entry *entry = (struct queue_entry *)urb->context; 407 struct queue_entry *entry = (struct queue_entry *)urb->context;
351 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 408 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
352 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
353 u8 rxd[32];
354 409
355 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || 410 if (!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
356 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
357 return; 411 return;
358 412
359 /* 413 /*
360 * Check if the received data is simply too small 414 * Report the frame as DMA done
361 * to be actually valid, or if the urb is signaling
362 * a problem.
363 */ 415 */
364 if (urb->actual_length < entry->queue->desc_size || urb->status) { 416 rt2x00lib_dmadone(entry);
365 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
366 usb_submit_urb(urb, GFP_ATOMIC);
367 return;
368 }
369 417
370 /* 418 /*
371 * Fill in desc fields of the skb descriptor 419 * Check if the received data is simply too small
420 * to be actually valid, or if the urb is signaling
421 * a problem.
372 */ 422 */
373 skbdesc->desc = rxd; 423 if (urb->actual_length < entry->queue->desc_size || urb->status)
374 skbdesc->desc_len = entry->queue->desc_size; 424 __set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
375 425
376 /* 426 /*
377 * Send the frame to rt2x00lib for further processing. 427 * Schedule the delayed work for reading the RX status
428 * from the device.
378 */ 429 */
379 rt2x00lib_rxdone(rt2x00dev, entry); 430 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
431 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
432 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
380} 433}
381 434
382/* 435/*
@@ -391,7 +444,7 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
391 * The USB version of kill_tx_queue also works 444 * The USB version of kill_tx_queue also works
392 * on the RX queue. 445 * on the RX queue.
393 */ 446 */
394 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_RX); 447 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev->rx);
395} 448}
396EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 449EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
397 450
@@ -405,6 +458,8 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
405 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 458 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
406 int pipe; 459 int pipe;
407 460
461 entry->flags = 0;
462
408 if (entry->queue->qid == QID_RX) { 463 if (entry->queue->qid == QID_RX) {
409 pipe = usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint); 464 pipe = usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint);
410 usb_fill_bulk_urb(entry_priv->urb, usb_dev, pipe, 465 usb_fill_bulk_urb(entry_priv->urb, usb_dev, pipe,
@@ -413,8 +468,6 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
413 468
414 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 469 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
415 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 470 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
416 } else {
417 entry->flags = 0;
418 } 471 }
419} 472}
420EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); 473EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
@@ -659,6 +712,9 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
659 712
660 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB); 713 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
661 714
715 INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone);
716 INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone);
717
662 retval = rt2x00usb_alloc_reg(rt2x00dev); 718 retval = rt2x00usb_alloc_reg(rt2x00dev);
663 if (retval) 719 if (retval)
664 goto exit_free_device; 720 goto exit_free_device;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index d3d3ddc40875..c2d997f67b3e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -379,25 +379,21 @@ struct queue_entry_priv_usb_bcn {
379 379
380/** 380/**
381 * rt2x00usb_kick_tx_queue - Kick data queue 381 * rt2x00usb_kick_tx_queue - Kick data queue
382 * @rt2x00dev: Pointer to &struct rt2x00_dev 382 * @queue: Data queue to kick
383 * @qid: Data queue to kick
384 * 383 *
385 * This will walk through all entries of the queue and push all pending 384 * This will walk through all entries of the queue and push all pending
386 * frames to the hardware as a single burst. 385 * frames to the hardware as a single burst.
387 */ 386 */
388void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 387void rt2x00usb_kick_tx_queue(struct data_queue *queue);
389 const enum data_queue_qid qid);
390 388
391/** 389/**
392 * rt2x00usb_kill_tx_queue - Kill data queue 390 * rt2x00usb_kill_tx_queue - Kill data queue
393 * @rt2x00dev: Pointer to &struct rt2x00_dev 391 * @queue: Data queue to kill
394 * @qid: Data queue to kill
395 * 392 *
396 * This will walk through all entries of the queue and kill all 393 * This will walk through all entries of the queue and kill all
397 * previously kicked frames before they can be send. 394 * previously kicked frames before they can be send.
398 */ 395 */
399void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 396void rt2x00usb_kill_tx_queue(struct data_queue *queue);
400 const enum data_queue_qid qid);
401 397
402/** 398/**
403 * rt2x00usb_watchdog - Watchdog for USB communication 399 * rt2x00usb_watchdog - Watchdog for USB communication
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index e539c6cb636f..97b3935f615b 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -594,7 +594,8 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
594} 594}
595 595
596static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev, 596static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev,
597 struct rt2x00lib_erp *erp) 597 struct rt2x00lib_erp *erp,
598 u32 changed)
598{ 599{
599 u32 reg; 600 u32 reg;
600 601
@@ -603,28 +604,36 @@ static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev,
603 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); 604 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
604 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); 605 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
605 606
606 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg); 607 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
607 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1); 608 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg);
608 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, 609 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
609 !!erp->short_preamble); 610 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE,
610 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); 611 !!erp->short_preamble);
612 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
613 }
611 614
612 rt2x00pci_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates); 615 if (changed & BSS_CHANGED_BASIC_RATES)
616 rt2x00pci_register_write(rt2x00dev, TXRX_CSR5,
617 erp->basic_rates);
613 618
614 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 619 if (changed & BSS_CHANGED_BEACON_INT) {
615 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 620 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
616 erp->beacon_int * 16); 621 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL,
617 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 622 erp->beacon_int * 16);
623 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
624 }
618 625
619 rt2x00pci_register_read(rt2x00dev, MAC_CSR9, &reg); 626 if (changed & BSS_CHANGED_ERP_SLOT) {
620 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time); 627 rt2x00pci_register_read(rt2x00dev, MAC_CSR9, &reg);
621 rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg); 628 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time);
629 rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg);
622 630
623 rt2x00pci_register_read(rt2x00dev, MAC_CSR8, &reg); 631 rt2x00pci_register_read(rt2x00dev, MAC_CSR8, &reg);
624 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs); 632 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs);
625 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); 633 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3);
626 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs); 634 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs);
627 rt2x00pci_register_write(rt2x00dev, MAC_CSR8, reg); 635 rt2x00pci_register_write(rt2x00dev, MAC_CSR8, reg);
636 }
628} 637}
629 638
630static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev, 639static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
@@ -1050,7 +1059,7 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
1050 /* 1059 /*
1051 * Determine r17 bounds. 1060 * Determine r17 bounds.
1052 */ 1061 */
1053 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { 1062 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
1054 low_bound = 0x28; 1063 low_bound = 0x28;
1055 up_bound = 0x48; 1064 up_bound = 0x48;
1056 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { 1065 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) {
@@ -1645,6 +1654,7 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1645 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 1654 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
1646 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask); 1655 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask);
1647 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask); 1656 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask);
1657 rt2x00_set_field32(&reg, INT_MASK_CSR_BEACON_DONE, mask);
1648 rt2x00_set_field32(&reg, INT_MASK_CSR_ENABLE_MITIGATION, mask); 1658 rt2x00_set_field32(&reg, INT_MASK_CSR_ENABLE_MITIGATION, mask);
1649 rt2x00_set_field32(&reg, INT_MASK_CSR_MITIGATION_PERIOD, 0xff); 1659 rt2x00_set_field32(&reg, INT_MASK_CSR_MITIGATION_PERIOD, 0xff);
1650 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); 1660 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
@@ -1658,6 +1668,7 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1658 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_5, mask); 1668 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_5, mask);
1659 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_6, mask); 1669 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_6, mask);
1660 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask); 1670 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask);
1671 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask);
1661 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); 1672 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
1662} 1673}
1663 1674
@@ -1766,12 +1777,11 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1766/* 1777/*
1767 * TX descriptor initialization 1778 * TX descriptor initialization
1768 */ 1779 */
1769static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1780static void rt61pci_write_tx_desc(struct queue_entry *entry,
1770 struct sk_buff *skb,
1771 struct txentry_desc *txdesc) 1781 struct txentry_desc *txdesc)
1772{ 1782{
1773 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1783 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1774 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; 1784 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1775 __le32 *txd = entry_priv->desc; 1785 __le32 *txd = entry_priv->desc;
1776 u32 word; 1786 u32 word;
1777 1787
@@ -1779,7 +1789,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1779 * Start writing the descriptor words. 1789 * Start writing the descriptor words.
1780 */ 1790 */
1781 rt2x00_desc_read(txd, 1, &word); 1791 rt2x00_desc_read(txd, 1, &word);
1782 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue); 1792 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid);
1783 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1793 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
1784 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1794 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1785 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1795 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
@@ -1802,15 +1812,15 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1802 } 1812 }
1803 1813
1804 rt2x00_desc_read(txd, 5, &word); 1814 rt2x00_desc_read(txd, 5, &word);
1805 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, skbdesc->entry->queue->qid); 1815 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
1806 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, 1816 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE,
1807 skbdesc->entry->entry_idx); 1817 skbdesc->entry->entry_idx);
1808 rt2x00_set_field32(&word, TXD_W5_TX_POWER, 1818 rt2x00_set_field32(&word, TXD_W5_TX_POWER,
1809 TXPOWER_TO_DEV(rt2x00dev->tx_power)); 1819 TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
1810 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1820 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1811 rt2x00_desc_write(txd, 5, word); 1821 rt2x00_desc_write(txd, 5, word);
1812 1822
1813 if (txdesc->queue != QID_BEACON) { 1823 if (txdesc->qid != QID_BEACON) {
1814 rt2x00_desc_read(txd, 6, &word); 1824 rt2x00_desc_read(txd, 6, &word);
1815 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS, 1825 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
1816 skbdesc->skb_dma); 1826 skbdesc->skb_dma);
@@ -1857,7 +1867,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1857 */ 1867 */
1858 skbdesc->desc = txd; 1868 skbdesc->desc = txd;
1859 skbdesc->desc_len = 1869 skbdesc->desc_len =
1860 (txdesc->queue == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE; 1870 (txdesc->qid == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE;
1861} 1871}
1862 1872
1863/* 1873/*
@@ -1882,7 +1892,7 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1882 /* 1892 /*
1883 * Write the TX descriptor for the beacon. 1893 * Write the TX descriptor for the beacon.
1884 */ 1894 */
1885 rt61pci_write_tx_desc(rt2x00dev, entry->skb, txdesc); 1895 rt61pci_write_tx_desc(entry, txdesc);
1886 1896
1887 /* 1897 /*
1888 * Dump beacon to userspace through debugfs. 1898 * Dump beacon to userspace through debugfs.
@@ -1918,34 +1928,34 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1918 entry->skb = NULL; 1928 entry->skb = NULL;
1919} 1929}
1920 1930
1921static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1931static void rt61pci_kick_tx_queue(struct data_queue *queue)
1922 const enum data_queue_qid queue)
1923{ 1932{
1933 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1924 u32 reg; 1934 u32 reg;
1925 1935
1926 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1936 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1927 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue == QID_AC_BE)); 1937 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue->qid == QID_AC_BE));
1928 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue == QID_AC_BK)); 1938 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue->qid == QID_AC_BK));
1929 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, (queue == QID_AC_VI)); 1939 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, (queue->qid == QID_AC_VI));
1930 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, (queue == QID_AC_VO)); 1940 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, (queue->qid == QID_AC_VO));
1931 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1941 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1932} 1942}
1933 1943
1934static void rt61pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev, 1944static void rt61pci_kill_tx_queue(struct data_queue *queue)
1935 const enum data_queue_qid qid)
1936{ 1945{
1946 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1937 u32 reg; 1947 u32 reg;
1938 1948
1939 if (qid == QID_BEACON) { 1949 if (queue->qid == QID_BEACON) {
1940 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0); 1950 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0);
1941 return; 1951 return;
1942 } 1952 }
1943 1953
1944 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1954 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1945 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, (qid == QID_AC_BE)); 1955 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, (queue->qid == QID_AC_BE));
1946 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, (qid == QID_AC_BK)); 1956 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, (queue->qid == QID_AC_BK));
1947 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, (qid == QID_AC_VI)); 1957 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, (queue->qid == QID_AC_VI));
1948 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, (qid == QID_AC_VO)); 1958 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, (queue->qid == QID_AC_VO));
1949 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1959 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1950} 1960}
1951 1961
@@ -1972,7 +1982,7 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1972 return 0; 1982 return 0;
1973 } 1983 }
1974 1984
1975 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { 1985 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
1976 if (lna == 3 || lna == 2) 1986 if (lna == 3 || lna == 2)
1977 offset += 10; 1987 offset += 10;
1978 } 1988 }
@@ -2107,11 +2117,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2107 "TX status report missed for entry %d\n", 2117 "TX status report missed for entry %d\n",
2108 entry_done->entry_idx); 2118 entry_done->entry_idx);
2109 2119
2110 txdesc.flags = 0; 2120 rt2x00lib_txdone_noinfo(entry_done, TXDONE_UNKNOWN);
2111 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
2112 txdesc.retry = 0;
2113
2114 rt2x00lib_txdone(entry_done, &txdesc);
2115 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 2121 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
2116 } 2122 }
2117 2123
@@ -2654,20 +2660,24 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2654 /* 2660 /*
2655 * Create channel information array 2661 * Create channel information array
2656 */ 2662 */
2657 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 2663 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
2658 if (!info) 2664 if (!info)
2659 return -ENOMEM; 2665 return -ENOMEM;
2660 2666
2661 spec->channels_info = info; 2667 spec->channels_info = info;
2662 2668
2663 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); 2669 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
2664 for (i = 0; i < 14; i++) 2670 for (i = 0; i < 14; i++) {
2665 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 2671 info[i].max_power = MAX_TXPOWER;
2672 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2673 }
2666 2674
2667 if (spec->num_channels > 14) { 2675 if (spec->num_channels > 14) {
2668 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 2676 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2669 for (i = 14; i < spec->num_channels; i++) 2677 for (i = 14; i < spec->num_channels; i++) {
2670 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 2678 info[i].max_power = MAX_TXPOWER;
2679 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2680 }
2671 } 2681 }
2672 2682
2673 return 0; 2683 return 0;
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index aa9de18fd410..e22f01c1818e 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -545,7 +545,8 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
545} 545}
546 546
547static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev, 547static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev,
548 struct rt2x00lib_erp *erp) 548 struct rt2x00lib_erp *erp,
549 u32 changed)
549{ 550{
550 u32 reg; 551 u32 reg;
551 552
@@ -554,28 +555,36 @@ static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev,
554 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); 555 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
555 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); 556 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
556 557
557 rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg); 558 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
558 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1); 559 rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
559 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, 560 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
560 !!erp->short_preamble); 561 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE,
561 rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg); 562 !!erp->short_preamble);
563 rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg);
564 }
562 565
563 rt2x00usb_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates); 566 if (changed & BSS_CHANGED_BASIC_RATES)
567 rt2x00usb_register_write(rt2x00dev, TXRX_CSR5,
568 erp->basic_rates);
564 569
565 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 570 if (changed & BSS_CHANGED_BEACON_INT) {
566 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 571 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
567 erp->beacon_int * 16); 572 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL,
568 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 573 erp->beacon_int * 16);
574 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
575 }
569 576
570 rt2x00usb_register_read(rt2x00dev, MAC_CSR9, &reg); 577 if (changed & BSS_CHANGED_ERP_SLOT) {
571 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time); 578 rt2x00usb_register_read(rt2x00dev, MAC_CSR9, &reg);
572 rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg); 579 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time);
580 rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg);
573 581
574 rt2x00usb_register_read(rt2x00dev, MAC_CSR8, &reg); 582 rt2x00usb_register_read(rt2x00dev, MAC_CSR8, &reg);
575 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs); 583 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs);
576 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); 584 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3);
577 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs); 585 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs);
578 rt2x00usb_register_write(rt2x00dev, MAC_CSR8, reg); 586 rt2x00usb_register_write(rt2x00dev, MAC_CSR8, reg);
587 }
579} 588}
580 589
581static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev, 590static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
@@ -929,7 +938,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
929 /* 938 /*
930 * Determine r17 bounds. 939 * Determine r17 bounds.
931 */ 940 */
932 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { 941 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
933 low_bound = 0x28; 942 low_bound = 0x28;
934 up_bound = 0x48; 943 up_bound = 0x48;
935 944
@@ -1426,12 +1435,11 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1426/* 1435/*
1427 * TX descriptor initialization 1436 * TX descriptor initialization
1428 */ 1437 */
1429static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1438static void rt73usb_write_tx_desc(struct queue_entry *entry,
1430 struct sk_buff *skb,
1431 struct txentry_desc *txdesc) 1439 struct txentry_desc *txdesc)
1432{ 1440{
1433 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1441 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1434 __le32 *txd = (__le32 *) skb->data; 1442 __le32 *txd = (__le32 *) entry->skb->data;
1435 u32 word; 1443 u32 word;
1436 1444
1437 /* 1445 /*
@@ -1464,7 +1472,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1464 rt2x00_desc_write(txd, 0, word); 1472 rt2x00_desc_write(txd, 0, word);
1465 1473
1466 rt2x00_desc_read(txd, 1, &word); 1474 rt2x00_desc_read(txd, 1, &word);
1467 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue); 1475 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid);
1468 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1476 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
1469 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1477 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1470 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1478 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
@@ -1487,7 +1495,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1487 1495
1488 rt2x00_desc_read(txd, 5, &word); 1496 rt2x00_desc_read(txd, 5, &word);
1489 rt2x00_set_field32(&word, TXD_W5_TX_POWER, 1497 rt2x00_set_field32(&word, TXD_W5_TX_POWER,
1490 TXPOWER_TO_DEV(rt2x00dev->tx_power)); 1498 TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
1491 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1499 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1492 rt2x00_desc_write(txd, 5, word); 1500 rt2x00_desc_write(txd, 5, word);
1493 1501
@@ -1526,7 +1534,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1526 /* 1534 /*
1527 * Write the TX descriptor for the beacon. 1535 * Write the TX descriptor for the beacon.
1528 */ 1536 */
1529 rt73usb_write_tx_desc(rt2x00dev, entry->skb, txdesc); 1537 rt73usb_write_tx_desc(entry, txdesc);
1530 1538
1531 /* 1539 /*
1532 * Dump beacon to userspace through debugfs. 1540 * Dump beacon to userspace through debugfs.
@@ -1574,6 +1582,14 @@ static int rt73usb_get_tx_data_len(struct queue_entry *entry)
1574 return length; 1582 return length;
1575} 1583}
1576 1584
1585static void rt73usb_kill_tx_queue(struct data_queue *queue)
1586{
1587 if (queue->qid == QID_BEACON)
1588 rt2x00usb_register_write(queue->rt2x00dev, TXRX_CSR9, 0);
1589
1590 rt2x00usb_kill_tx_queue(queue);
1591}
1592
1577/* 1593/*
1578 * RX control handlers 1594 * RX control handlers
1579 */ 1595 */
@@ -1597,7 +1613,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1597 return 0; 1613 return 0;
1598 } 1614 }
1599 1615
1600 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { 1616 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
1601 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { 1617 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) {
1602 if (lna == 3 || lna == 2) 1618 if (lna == 3 || lna == 2)
1603 offset += 10; 1619 offset += 10;
@@ -2084,20 +2100,24 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2084 /* 2100 /*
2085 * Create channel information array 2101 * Create channel information array
2086 */ 2102 */
2087 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL); 2103 info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL);
2088 if (!info) 2104 if (!info)
2089 return -ENOMEM; 2105 return -ENOMEM;
2090 2106
2091 spec->channels_info = info; 2107 spec->channels_info = info;
2092 2108
2093 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); 2109 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
2094 for (i = 0; i < 14; i++) 2110 for (i = 0; i < 14; i++) {
2095 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 2111 info[i].max_power = MAX_TXPOWER;
2112 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2113 }
2096 2114
2097 if (spec->num_channels > 14) { 2115 if (spec->num_channels > 14) {
2098 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 2116 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2099 for (i = 14; i < spec->num_channels; i++) 2117 for (i = 14; i < spec->num_channels; i++) {
2100 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); 2118 info[i].max_power = MAX_TXPOWER;
2119 info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2120 }
2101 } 2121 }
2102 2122
2103 return 0; 2123 return 0;
@@ -2259,7 +2279,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2259 .write_beacon = rt73usb_write_beacon, 2279 .write_beacon = rt73usb_write_beacon,
2260 .get_tx_data_len = rt73usb_get_tx_data_len, 2280 .get_tx_data_len = rt73usb_get_tx_data_len,
2261 .kick_tx_queue = rt2x00usb_kick_tx_queue, 2281 .kick_tx_queue = rt2x00usb_kick_tx_queue,
2262 .kill_tx_queue = rt2x00usb_kill_tx_queue, 2282 .kill_tx_queue = rt73usb_kill_tx_queue,
2263 .fill_rxdone = rt73usb_fill_rxdone, 2283 .fill_rxdone = rt73usb_fill_rxdone,
2264 .config_shared_key = rt73usb_config_shared_key, 2284 .config_shared_key = rt73usb_config_shared_key,
2265 .config_pairwise_key = rt73usb_config_pairwise_key, 2285 .config_pairwise_key = rt73usb_config_pairwise_key,
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 30107ce78dfb..05c6badbe201 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -99,19 +99,66 @@ void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
99 } 99 }
100} 100}
101 101
102static void rtl8180_handle_rx(struct ieee80211_hw *dev) 102static void rtl8180_handle_tx(struct ieee80211_hw *dev)
103{ 103{
104 struct rtl8180_priv *priv = dev->priv; 104 struct rtl8180_priv *priv = dev->priv;
105 unsigned int count = 32; 105 struct rtl8180_tx_ring *ring;
106 int prio;
107
108 spin_lock(&priv->lock);
109
110 for (prio = 3; prio >= 0; prio--) {
111 ring = &priv->tx_ring[prio];
112
113 while (skb_queue_len(&ring->queue)) {
114 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
115 struct sk_buff *skb;
116 struct ieee80211_tx_info *info;
117 u32 flags = le32_to_cpu(entry->flags);
118
119 if (flags & RTL818X_TX_DESC_FLAG_OWN)
120 break;
121
122 ring->idx = (ring->idx + 1) % ring->entries;
123 skb = __skb_dequeue(&ring->queue);
124 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
125 skb->len, PCI_DMA_TODEVICE);
126
127 info = IEEE80211_SKB_CB(skb);
128 ieee80211_tx_info_clear_status(info);
129
130 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
131 (flags & RTL818X_TX_DESC_FLAG_TX_OK))
132 info->flags |= IEEE80211_TX_STAT_ACK;
133
134 info->status.rates[0].count = (flags & 0xFF) + 1;
135 info->status.rates[1].idx = -1;
136
137 ieee80211_tx_status(dev, skb);
138 if (ring->entries - skb_queue_len(&ring->queue) == 2)
139 ieee80211_wake_queue(dev, prio);
140 }
141 }
142
143 spin_unlock(&priv->lock);
144}
145
146static int rtl8180_poll(struct ieee80211_hw *dev, int budget)
147{
148 struct rtl8180_priv *priv = dev->priv;
149 unsigned int count = 0;
106 u8 signal, agc, sq; 150 u8 signal, agc, sq;
107 151
108 while (count--) { 152 /* handle pending Tx queue cleanup */
153 rtl8180_handle_tx(dev);
154
155 while (count++ < budget) {
109 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx]; 156 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
110 struct sk_buff *skb = priv->rx_buf[priv->rx_idx]; 157 struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
111 u32 flags = le32_to_cpu(entry->flags); 158 u32 flags = le32_to_cpu(entry->flags);
112 159
113 if (flags & RTL818X_RX_DESC_FLAG_OWN) 160 if (flags & RTL818X_RX_DESC_FLAG_OWN)
114 return; 161 break;
115 162
116 if (unlikely(flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL | 163 if (unlikely(flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL |
117 RTL818X_RX_DESC_FLAG_FOF | 164 RTL818X_RX_DESC_FLAG_FOF |
@@ -151,7 +198,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
151 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 198 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
152 199
153 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 200 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
154 ieee80211_rx_irqsafe(dev, skb); 201 ieee80211_rx(dev, skb);
155 202
156 skb = new_skb; 203 skb = new_skb;
157 priv->rx_buf[priv->rx_idx] = skb; 204 priv->rx_buf[priv->rx_idx] = skb;
@@ -168,41 +215,16 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
168 entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR); 215 entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
169 priv->rx_idx = (priv->rx_idx + 1) % 32; 216 priv->rx_idx = (priv->rx_idx + 1) % 32;
170 } 217 }
171}
172 218
173static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio) 219 if (count < budget) {
174{ 220 /* disable polling */
175 struct rtl8180_priv *priv = dev->priv; 221 ieee80211_napi_complete(dev);
176 struct rtl8180_tx_ring *ring = &priv->tx_ring[prio];
177 222
178 while (skb_queue_len(&ring->queue)) { 223 /* enable interrupts */
179 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx]; 224 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
180 struct sk_buff *skb;
181 struct ieee80211_tx_info *info;
182 u32 flags = le32_to_cpu(entry->flags);
183
184 if (flags & RTL818X_TX_DESC_FLAG_OWN)
185 return;
186
187 ring->idx = (ring->idx + 1) % ring->entries;
188 skb = __skb_dequeue(&ring->queue);
189 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
190 skb->len, PCI_DMA_TODEVICE);
191
192 info = IEEE80211_SKB_CB(skb);
193 ieee80211_tx_info_clear_status(info);
194
195 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
196 (flags & RTL818X_TX_DESC_FLAG_TX_OK))
197 info->flags |= IEEE80211_TX_STAT_ACK;
198
199 info->status.rates[0].count = (flags & 0xFF) + 1;
200 info->status.rates[1].idx = -1;
201
202 ieee80211_tx_status_irqsafe(dev, skb);
203 if (ring->entries - skb_queue_len(&ring->queue) == 2)
204 ieee80211_wake_queue(dev, prio);
205 } 225 }
226
227 return count;
206} 228}
207 229
208static irqreturn_t rtl8180_interrupt(int irq, void *dev_id) 230static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
@@ -211,31 +233,17 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
211 struct rtl8180_priv *priv = dev->priv; 233 struct rtl8180_priv *priv = dev->priv;
212 u16 reg; 234 u16 reg;
213 235
214 spin_lock(&priv->lock);
215 reg = rtl818x_ioread16(priv, &priv->map->INT_STATUS); 236 reg = rtl818x_ioread16(priv, &priv->map->INT_STATUS);
216 if (unlikely(reg == 0xFFFF)) { 237 if (unlikely(reg == 0xFFFF))
217 spin_unlock(&priv->lock);
218 return IRQ_HANDLED; 238 return IRQ_HANDLED;
219 }
220 239
221 rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg); 240 rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg);
222 241
223 if (reg & (RTL818X_INT_TXB_OK | RTL818X_INT_TXB_ERR)) 242 /* disable interrupts */
224 rtl8180_handle_tx(dev, 3); 243 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
225
226 if (reg & (RTL818X_INT_TXH_OK | RTL818X_INT_TXH_ERR))
227 rtl8180_handle_tx(dev, 2);
228
229 if (reg & (RTL818X_INT_TXN_OK | RTL818X_INT_TXN_ERR))
230 rtl8180_handle_tx(dev, 1);
231
232 if (reg & (RTL818X_INT_TXL_OK | RTL818X_INT_TXL_ERR))
233 rtl8180_handle_tx(dev, 0);
234
235 if (reg & (RTL818X_INT_RX_OK | RTL818X_INT_RX_ERR))
236 rtl8180_handle_rx(dev);
237 244
238 spin_unlock(&priv->lock); 245 /* enable polling */
246 ieee80211_napi_schedule(dev);
239 247
240 return IRQ_HANDLED; 248 return IRQ_HANDLED;
241} 249}
@@ -247,7 +255,6 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
247 struct rtl8180_priv *priv = dev->priv; 255 struct rtl8180_priv *priv = dev->priv;
248 struct rtl8180_tx_ring *ring; 256 struct rtl8180_tx_ring *ring;
249 struct rtl8180_tx_desc *entry; 257 struct rtl8180_tx_desc *entry;
250 unsigned long flags;
251 unsigned int idx, prio; 258 unsigned int idx, prio;
252 dma_addr_t mapping; 259 dma_addr_t mapping;
253 u32 tx_flags; 260 u32 tx_flags;
@@ -294,7 +301,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
294 plcp_len |= 1 << 15; 301 plcp_len |= 1 << 15;
295 } 302 }
296 303
297 spin_lock_irqsave(&priv->lock, flags); 304 spin_lock(&priv->lock);
298 305
299 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 306 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
300 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 307 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
@@ -318,7 +325,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
318 if (ring->entries - skb_queue_len(&ring->queue) < 2) 325 if (ring->entries - skb_queue_len(&ring->queue) < 2)
319 ieee80211_stop_queue(dev, prio); 326 ieee80211_stop_queue(dev, prio);
320 327
321 spin_unlock_irqrestore(&priv->lock, flags); 328 spin_unlock(&priv->lock);
322 329
323 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); 330 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
324 331
@@ -783,6 +790,7 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
783 struct rtl8180_priv *priv = dev->priv; 790 struct rtl8180_priv *priv = dev->priv;
784 struct rtl8180_vif *vif_priv; 791 struct rtl8180_vif *vif_priv;
785 int i; 792 int i;
793 u8 reg;
786 794
787 vif_priv = (struct rtl8180_vif *)&vif->drv_priv; 795 vif_priv = (struct rtl8180_vif *)&vif->drv_priv;
788 796
@@ -791,12 +799,14 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
791 rtl818x_iowrite8(priv, &priv->map->BSSID[i], 799 rtl818x_iowrite8(priv, &priv->map->BSSID[i],
792 info->bssid[i]); 800 info->bssid[i]);
793 801
794 if (is_valid_ether_addr(info->bssid)) 802 if (is_valid_ether_addr(info->bssid)) {
795 rtl818x_iowrite8(priv, &priv->map->MSR, 803 if (vif->type == NL80211_IFTYPE_ADHOC)
796 RTL818X_MSR_INFRA); 804 reg = RTL818X_MSR_ADHOC;
797 else 805 else
798 rtl818x_iowrite8(priv, &priv->map->MSR, 806 reg = RTL818X_MSR_INFRA;
799 RTL818X_MSR_NO_LINK); 807 } else
808 reg = RTL818X_MSR_NO_LINK;
809 rtl818x_iowrite8(priv, &priv->map->MSR, reg);
800 } 810 }
801 811
802 if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp) 812 if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp)
@@ -861,6 +871,7 @@ static const struct ieee80211_ops rtl8180_ops = {
861 .prepare_multicast = rtl8180_prepare_multicast, 871 .prepare_multicast = rtl8180_prepare_multicast,
862 .configure_filter = rtl8180_configure_filter, 872 .configure_filter = rtl8180_configure_filter,
863 .get_tsf = rtl8180_get_tsf, 873 .get_tsf = rtl8180_get_tsf,
874 .napi_poll = rtl8180_poll,
864}; 875};
865 876
866static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom) 877static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
@@ -992,6 +1003,8 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
992 dev->queues = 1; 1003 dev->queues = 1;
993 dev->max_signal = 65; 1004 dev->max_signal = 65;
994 1005
1006 dev->napi_weight = 64;
1007
995 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); 1008 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
996 reg &= RTL818X_TX_CONF_HWVER_MASK; 1009 reg &= RTL818X_TX_CONF_HWVER_MASK;
997 switch (reg) { 1010 switch (reg) {
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 98e0351c1dd6..38fa8244cc96 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1176,13 +1176,12 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
1176 else 1176 else
1177 reg = 0; 1177 reg = 0;
1178 1178
1179 if (is_valid_ether_addr(info->bssid)) { 1179 if (is_valid_ether_addr(info->bssid))
1180 reg |= RTL818X_MSR_INFRA; 1180 reg |= RTL818X_MSR_INFRA;
1181 rtl818x_iowrite8(priv, &priv->map->MSR, reg); 1181 else
1182 } else {
1183 reg |= RTL818X_MSR_NO_LINK; 1182 reg |= RTL818X_MSR_NO_LINK;
1184 rtl818x_iowrite8(priv, &priv->map->MSR, reg); 1183
1185 } 1184 rtl818x_iowrite8(priv, &priv->map->MSR, reg);
1186 1185
1187 mutex_unlock(&priv->conf_mutex); 1186 mutex_unlock(&priv->conf_mutex);
1188 } 1187 }
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 2f98058be451..4a8bb25c1739 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -74,4 +74,7 @@ config WL1271_SDIO
74 If you choose to build a module, it'll be called 74 If you choose to build a module, it'll be called
75 wl1271_sdio. Say N if unsure. 75 wl1271_sdio. Say N if unsure.
76 76
77 77config WL12XX_PLATFORM_DATA
78 bool
79 depends on WL1271_SDIO != n
80 default y
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 6b942a28e6a5..e113d4c1fb35 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008-2009 Nokia Corporation 5 * Copyright (C) 2008-2009 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -274,6 +272,8 @@ struct wl1251 {
274 int irq; 272 int irq;
275 bool use_eeprom; 273 bool use_eeprom;
276 274
275 spinlock_t wl_lock;
276
277 enum wl1251_state state; 277 enum wl1251_state state;
278 struct mutex mutex; 278 struct mutex mutex;
279 279
@@ -401,7 +401,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
401 401
402#define WL1251_DEFAULT_POWER_LEVEL 20 402#define WL1251_DEFAULT_POWER_LEVEL 20
403 403
404#define WL1251_TX_QUEUE_MAX_LENGTH 20 404#define WL1251_TX_QUEUE_LOW_WATERMARK 10
405#define WL1251_TX_QUEUE_HIGH_WATERMARK 25
405 406
406#define WL1251_DEFAULT_BEACON_INT 100 407#define WL1251_DEFAULT_BEACON_INT 100
407#define WL1251_DEFAULT_DTIM_PERIOD 1 408#define WL1251_DEFAULT_DTIM_PERIOD 1
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c
index 91891f928070..2f8a2ba744dc 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.c
@@ -380,7 +380,7 @@ int wl1251_acx_pd_threshold(struct wl1251 *wl)
380 380
381out: 381out:
382 kfree(pd); 382 kfree(pd);
383 return 0; 383 return ret;
384} 384}
385 385
386int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time) 386int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time)
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 842df310d92a..c7cc5c1e8a75 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -37,7 +35,7 @@ struct acx_header {
37 35
38 /* payload length (not including headers */ 36 /* payload length (not including headers */
39 u16 len; 37 u16 len;
40}; 38} __packed;
41 39
42struct acx_error_counter { 40struct acx_error_counter {
43 struct acx_header header; 41 struct acx_header header;
@@ -459,8 +457,8 @@ struct acx_beacon_filter_ie_table {
459 struct acx_header header; 457 struct acx_header header;
460 458
461 u8 num_ie; 459 u8 num_ie;
462 u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
463 u8 pad[3]; 460 u8 pad[3];
461 u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
464} __packed; 462} __packed;
465 463
466#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */ 464#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */
@@ -471,7 +469,7 @@ struct acx_conn_monit_params {
471 469
472 u32 synch_fail_thold; /* number of beacons missed */ 470 u32 synch_fail_thold; /* number of beacons missed */
473 u32 bss_lose_timeout; /* number of TU's from synch fail */ 471 u32 bss_lose_timeout; /* number of TU's from synch fail */
474}; 472} __packed;
475 473
476enum { 474enum {
477 SG_ENABLE = 0, 475 SG_ENABLE = 0,
@@ -1056,7 +1054,7 @@ struct acx_rate_class {
1056 u8 long_retry_limit; 1054 u8 long_retry_limit;
1057 u8 aflags; 1055 u8 aflags;
1058 u8 reserved; 1056 u8 reserved;
1059}; 1057} __packed;
1060 1058
1061struct acx_rate_policy { 1059struct acx_rate_policy {
1062 struct acx_header header; 1060 struct acx_header header;
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index 65e0416be5b6..468b47b0328a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
@@ -302,7 +300,7 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
302 ROAMING_TRIGGER_LOW_RSSI_EVENT_ID | 300 ROAMING_TRIGGER_LOW_RSSI_EVENT_ID |
303 ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID | 301 ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID |
304 REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID | 302 REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID |
305 BT_PTA_PREDICTION_EVENT_ID; 303 BT_PTA_PREDICTION_EVENT_ID | JOIN_EVENT_COMPLETE_ID;
306 304
307 ret = wl1251_event_unmask(wl); 305 ret = wl1251_event_unmask(wl);
308 if (ret < 0) { 306 if (ret < 0) {
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.h b/drivers/net/wireless/wl12xx/wl1251_boot.h
index 90063697e8f2..7661bc5e4662 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.h
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
index ce3722f4c3e3..15fb68c6b542 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -200,7 +200,7 @@ int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity,
200 200
201out: 201out:
202 kfree(vbm); 202 kfree(vbm);
203 return 0; 203 return ret;
204} 204}
205 205
206int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable) 206int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable)
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index a9e4991369be..e5c74c631374 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -111,7 +109,7 @@ struct wl1251_cmd_header {
111struct wl1251_command { 109struct wl1251_command {
112 struct wl1251_cmd_header header; 110 struct wl1251_cmd_header header;
113 u8 parameters[MAX_CMD_PARAMS]; 111 u8 parameters[MAX_CMD_PARAMS];
114}; 112} __packed;
115 113
116enum { 114enum {
117 CMD_MAILBOX_IDLE = 0, 115 CMD_MAILBOX_IDLE = 0,
@@ -164,7 +162,7 @@ struct cmd_read_write_memory {
164 of this field is the Host in WRITE command or the Wilink in READ 162 of this field is the Host in WRITE command or the Wilink in READ
165 command. */ 163 command. */
166 u8 value[MAX_READ_SIZE]; 164 u8 value[MAX_READ_SIZE];
167}; 165} __packed;
168 166
169#define CMDMBOX_HEADER_LEN 4 167#define CMDMBOX_HEADER_LEN 4
170#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 168#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
@@ -339,7 +337,7 @@ struct wl1251_cmd_trigger_scan_to {
339 struct wl1251_cmd_header header; 337 struct wl1251_cmd_header header;
340 338
341 u32 timeout; 339 u32 timeout;
342}; 340} __packed;
343 341
344/* HW encryption keys */ 342/* HW encryption keys */
345#define NUM_ACCESS_CATEGORIES_COPY 4 343#define NUM_ACCESS_CATEGORIES_COPY 4
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
index 5e4465ac08fa..6ffe4cd58561 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.h b/drivers/net/wireless/wl12xx/wl1251_debugfs.h
index 6dc3d080853c..b3417c02a218 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.h
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.h
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.c b/drivers/net/wireless/wl12xx/wl1251_event.c
index 020d764f9c13..54223556b308 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.c
+++ b/drivers/net/wireless/wl12xx/wl1251_event.c
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -36,9 +34,7 @@ static int wl1251_event_scan_complete(struct wl1251 *wl,
36 mbox->scheduled_scan_channels); 34 mbox->scheduled_scan_channels);
37 35
38 if (wl->scanning) { 36 if (wl->scanning) {
39 mutex_unlock(&wl->mutex);
40 ieee80211_scan_completed(wl->hw, false); 37 ieee80211_scan_completed(wl->hw, false);
41 mutex_lock(&wl->mutex);
42 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed"); 38 wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed");
43 wl->scanning = false; 39 wl->scanning = false;
44 } 40 }
@@ -97,6 +93,35 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
97 return 0; 93 return 0;
98} 94}
99 95
96/*
97 * Poll the mailbox event field until any of the bits in the mask is set or a
98 * timeout occurs (WL1251_EVENT_TIMEOUT in msecs)
99 */
100int wl1251_event_wait(struct wl1251 *wl, u32 mask, int timeout_ms)
101{
102 u32 events_vector, event;
103 unsigned long timeout;
104
105 timeout = jiffies + msecs_to_jiffies(timeout_ms);
106
107 do {
108 if (time_after(jiffies, timeout))
109 return -ETIMEDOUT;
110
111 msleep(1);
112
113 /* read from both event fields */
114 wl1251_mem_read(wl, wl->mbox_ptr[0], &events_vector,
115 sizeof(events_vector));
116 event = events_vector & mask;
117 wl1251_mem_read(wl, wl->mbox_ptr[1], &events_vector,
118 sizeof(events_vector));
119 event |= events_vector & mask;
120 } while (!event);
121
122 return 0;
123}
124
100int wl1251_event_unmask(struct wl1251 *wl) 125int wl1251_event_unmask(struct wl1251 *wl)
101{ 126{
102 int ret; 127 int ret;
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.h b/drivers/net/wireless/wl12xx/wl1251_event.h
index f48a2b66bc5a..30eb5d150bf7 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.h
+++ b/drivers/net/wireless/wl12xx/wl1251_event.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -117,5 +115,6 @@ struct event_mailbox {
117int wl1251_event_unmask(struct wl1251 *wl); 115int wl1251_event_unmask(struct wl1251 *wl);
118void wl1251_event_mbox_config(struct wl1251 *wl); 116void wl1251_event_mbox_config(struct wl1251 *wl);
119int wl1251_event_handle(struct wl1251 *wl, u8 mbox); 117int wl1251_event_handle(struct wl1251 *wl, u8 mbox);
118int wl1251_event_wait(struct wl1251 *wl, u32 mask, int timeout_ms);
120 119
121#endif 120#endif
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c
index b538bdd7b320..c5daec05d9ee 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl12xx/wl1251_init.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.h b/drivers/net/wireless/wl12xx/wl1251_init.h
index 269cefb3e7d4..543f17582ead 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.h
+++ b/drivers/net/wireless/wl12xx/wl1251_init.h
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_io.c b/drivers/net/wireless/wl12xx/wl1251_io.c
index f1c232e0887f..ad6ca68b303f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_io.c
+++ b/drivers/net/wireless/wl12xx/wl1251_io.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 861a5f33761e..faf221ca3f41 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008-2009 Nokia Corporation 4 * Copyright (C) 2008-2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
@@ -293,14 +291,14 @@ static void wl1251_irq_work(struct work_struct *work)
293 wl1251_tx_complete(wl); 291 wl1251_tx_complete(wl);
294 } 292 }
295 293
296 if (intr & (WL1251_ACX_INTR_EVENT_A | 294 if (intr & WL1251_ACX_INTR_EVENT_A) {
297 WL1251_ACX_INTR_EVENT_B)) { 295 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT_A");
298 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT (0x%x)", 296 wl1251_event_handle(wl, 0);
299 intr); 297 }
300 if (intr & WL1251_ACX_INTR_EVENT_A) 298
301 wl1251_event_handle(wl, 0); 299 if (intr & WL1251_ACX_INTR_EVENT_B) {
302 else 300 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT_B");
303 wl1251_event_handle(wl, 1); 301 wl1251_event_handle(wl, 1);
304 } 302 }
305 303
306 if (intr & WL1251_ACX_INTR_INIT_COMPLETE) 304 if (intr & WL1251_ACX_INTR_INIT_COMPLETE)
@@ -339,11 +337,9 @@ static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel,
339 if (ret < 0) 337 if (ret < 0)
340 goto out; 338 goto out;
341 339
342 /* 340 ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
343 * FIXME: we should wait for JOIN_EVENT_COMPLETE_ID but to simplify 341 if (ret < 0)
344 * locking we just sleep instead, for now 342 wl1251_warning("join timeout");
345 */
346 msleep(10);
347 343
348out: 344out:
349 return ret; 345 return ret;
@@ -379,6 +375,7 @@ out:
379static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 375static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
380{ 376{
381 struct wl1251 *wl = hw->priv; 377 struct wl1251 *wl = hw->priv;
378 unsigned long flags;
382 379
383 skb_queue_tail(&wl->tx_queue, skb); 380 skb_queue_tail(&wl->tx_queue, skb);
384 381
@@ -393,16 +390,13 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
393 * The workqueue is slow to process the tx_queue and we need stop 390 * The workqueue is slow to process the tx_queue and we need stop
394 * the queue here, otherwise the queue will get too long. 391 * the queue here, otherwise the queue will get too long.
395 */ 392 */
396 if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) { 393 if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_HIGH_WATERMARK) {
397 wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues"); 394 wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
398 ieee80211_stop_queues(wl->hw);
399 395
400 /* 396 spin_lock_irqsave(&wl->wl_lock, flags);
401 * FIXME: this is racy, the variable is not properly 397 ieee80211_stop_queues(wl->hw);
402 * protected. Maybe fix this by removing the stupid
403 * variable altogether and checking the real queue state?
404 */
405 wl->tx_queue_stopped = true; 398 wl->tx_queue_stopped = true;
399 spin_unlock_irqrestore(&wl->wl_lock, flags);
406 } 400 }
407 401
408 return NETDEV_TX_OK; 402 return NETDEV_TX_OK;
@@ -471,9 +465,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
471 WARN_ON(wl->state != WL1251_STATE_ON); 465 WARN_ON(wl->state != WL1251_STATE_ON);
472 466
473 if (wl->scanning) { 467 if (wl->scanning) {
474 mutex_unlock(&wl->mutex);
475 ieee80211_scan_completed(wl->hw, true); 468 ieee80211_scan_completed(wl->hw, true);
476 mutex_lock(&wl->mutex);
477 wl->scanning = false; 469 wl->scanning = false;
478 } 470 }
479 471
@@ -725,8 +717,9 @@ static int wl1251_set_key_type(struct wl1251 *wl,
725 struct ieee80211_key_conf *mac80211_key, 717 struct ieee80211_key_conf *mac80211_key,
726 const u8 *addr) 718 const u8 *addr)
727{ 719{
728 switch (mac80211_key->alg) { 720 switch (mac80211_key->cipher) {
729 case ALG_WEP: 721 case WLAN_CIPHER_SUITE_WEP40:
722 case WLAN_CIPHER_SUITE_WEP104:
730 if (is_broadcast_ether_addr(addr)) 723 if (is_broadcast_ether_addr(addr))
731 key->key_type = KEY_WEP_DEFAULT; 724 key->key_type = KEY_WEP_DEFAULT;
732 else 725 else
@@ -734,7 +727,7 @@ static int wl1251_set_key_type(struct wl1251 *wl,
734 727
735 mac80211_key->hw_key_idx = mac80211_key->keyidx; 728 mac80211_key->hw_key_idx = mac80211_key->keyidx;
736 break; 729 break;
737 case ALG_TKIP: 730 case WLAN_CIPHER_SUITE_TKIP:
738 if (is_broadcast_ether_addr(addr)) 731 if (is_broadcast_ether_addr(addr))
739 key->key_type = KEY_TKIP_MIC_GROUP; 732 key->key_type = KEY_TKIP_MIC_GROUP;
740 else 733 else
@@ -742,7 +735,7 @@ static int wl1251_set_key_type(struct wl1251 *wl,
742 735
743 mac80211_key->hw_key_idx = mac80211_key->keyidx; 736 mac80211_key->hw_key_idx = mac80211_key->keyidx;
744 break; 737 break;
745 case ALG_CCMP: 738 case WLAN_CIPHER_SUITE_CCMP:
746 if (is_broadcast_ether_addr(addr)) 739 if (is_broadcast_ether_addr(addr))
747 key->key_type = KEY_AES_GROUP; 740 key->key_type = KEY_AES_GROUP;
748 else 741 else
@@ -750,7 +743,7 @@ static int wl1251_set_key_type(struct wl1251 *wl,
750 mac80211_key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 743 mac80211_key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
751 break; 744 break;
752 default: 745 default:
753 wl1251_error("Unknown key algo 0x%x", mac80211_key->alg); 746 wl1251_error("Unknown key cipher 0x%x", mac80211_key->cipher);
754 return -EOPNOTSUPP; 747 return -EOPNOTSUPP;
755 } 748 }
756 749
@@ -783,7 +776,7 @@ static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
783 wl1251_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd); 776 wl1251_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
784 wl1251_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN); 777 wl1251_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
785 wl1251_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", 778 wl1251_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
786 key->alg, key->keyidx, key->keylen, key->flags); 779 key->cipher, key->keyidx, key->keylen, key->flags);
787 wl1251_dump(DEBUG_CRYPT, "KEY: ", key->key, key->keylen); 780 wl1251_dump(DEBUG_CRYPT, "KEY: ", key->key, key->keylen);
788 781
789 if (is_zero_ether_addr(addr)) { 782 if (is_zero_ether_addr(addr)) {
@@ -1438,5 +1431,5 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw);
1438 1431
1439MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core"); 1432MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
1440MODULE_LICENSE("GPL"); 1433MODULE_LICENSE("GPL");
1441MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 1434MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
1442MODULE_FIRMWARE(WL1251_FW_NAME); 1435MODULE_FIRMWARE(WL1251_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index b55cb2bd459a..0b997bdfec09 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.h b/drivers/net/wireless/wl12xx/wl1251_ps.h
index c688ac57aee4..e5db81fc1dfc 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.h
@@ -1,14 +1,9 @@
1#ifndef __WL1251_PS_H__
2#define __WL1251_PS_H__
3
4/* 1/*
5 * This file is part of wl1251 2 * This file is part of wl1251
6 * 3 *
7 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
8 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
9 * 6 *
10 * Contact: Kalle Valo <kalle.valo@nokia.com>
11 *
12 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -25,6 +20,9 @@
25 * 20 *
26 */ 21 */
27 22
23#ifndef __WL1251_PS_H__
24#define __WL1251_PS_H__
25
28#include "wl1251.h" 26#include "wl1251.h"
29#include "wl1251_acx.h" 27#include "wl1251_acx.h"
30 28
diff --git a/drivers/net/wireless/wl12xx/wl1251_reg.h b/drivers/net/wireless/wl12xx/wl1251_reg.h
index d16edd9bf06c..a5809019c5c1 100644
--- a/drivers/net/wireless/wl12xx/wl1251_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1251_reg.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 1b6294b3b996..25764592a596 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.h b/drivers/net/wireless/wl12xx/wl1251_rx.h
index da4e53406a0e..4448f635a4d8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index b901b6135654..74ba9ced5393 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -24,7 +24,7 @@
24#include <linux/mmc/sdio_func.h> 24#include <linux/mmc/sdio_func.h>
25#include <linux/mmc/sdio_ids.h> 25#include <linux/mmc/sdio_ids.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/spi/wl12xx.h> 27#include <linux/wl12xx.h>
28#include <linux/irq.h> 28#include <linux/irq.h>
29 29
30#include "wl1251.h" 30#include "wl1251.h"
@@ -339,4 +339,4 @@ module_init(wl1251_sdio_init);
339module_exit(wl1251_sdio_exit); 339module_exit(wl1251_sdio_exit);
340 340
341MODULE_LICENSE("GPL"); 341MODULE_LICENSE("GPL");
342MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 342MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 27fdfaaeb074..320de79667a6 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2008 Nokia Corporation 4 * Copyright (C) 2008 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 8 * version 2 as published by the Free Software Foundation.
@@ -26,7 +24,7 @@
26#include <linux/slab.h> 24#include <linux/slab.h>
27#include <linux/crc7.h> 25#include <linux/crc7.h>
28#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
29#include <linux/spi/wl12xx.h> 27#include <linux/wl12xx.h>
30 28
31#include "wl1251.h" 29#include "wl1251.h"
32#include "wl1251_reg.h" 30#include "wl1251_reg.h"
@@ -344,5 +342,5 @@ module_init(wl1251_spi_init);
344module_exit(wl1251_spi_exit); 342module_exit(wl1251_spi_exit);
345 343
346MODULE_LICENSE("GPL"); 344MODULE_LICENSE("GPL");
347MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 345MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
348MODULE_ALIAS("spi:wl1251"); 346MODULE_ALIAS("spi:wl1251");
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.h b/drivers/net/wireless/wl12xx/wl1251_spi.h
index 2e273a97e7f3..7dcf3cf7ae40 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.h
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c
index a38ec199187a..388492a7f41f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.c
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
@@ -189,7 +187,7 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
189 tx_hdr = (struct tx_double_buffer_desc *) skb->data; 187 tx_hdr = (struct tx_double_buffer_desc *) skb->data;
190 188
191 if (control->control.hw_key && 189 if (control->control.hw_key &&
192 control->control.hw_key->alg == ALG_TKIP) { 190 control->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
193 int hdrlen; 191 int hdrlen;
194 __le16 fc; 192 __le16 fc;
195 u16 length; 193 u16 length;
@@ -322,11 +320,6 @@ void wl1251_tx_work(struct work_struct *work)
322 320
323 ret = wl1251_tx_frame(wl, skb); 321 ret = wl1251_tx_frame(wl, skb);
324 if (ret == -EBUSY) { 322 if (ret == -EBUSY) {
325 /* firmware buffer is full, stop queues */
326 wl1251_debug(DEBUG_TX, "tx_work: fw buffer full, "
327 "stop queues");
328 ieee80211_stop_queues(wl->hw);
329 wl->tx_queue_stopped = true;
330 skb_queue_head(&wl->tx_queue, skb); 323 skb_queue_head(&wl->tx_queue, skb);
331 goto out; 324 goto out;
332 } else if (ret < 0) { 325 } else if (ret < 0) {
@@ -399,7 +392,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
399 */ 392 */
400 frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc)); 393 frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc));
401 if (info->control.hw_key && 394 if (info->control.hw_key &&
402 info->control.hw_key->alg == ALG_TKIP) { 395 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
403 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 396 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
404 memmove(frame + WL1251_TKIP_IV_SPACE, frame, hdrlen); 397 memmove(frame + WL1251_TKIP_IV_SPACE, frame, hdrlen);
405 skb_pull(skb, WL1251_TKIP_IV_SPACE); 398 skb_pull(skb, WL1251_TKIP_IV_SPACE);
@@ -449,6 +442,7 @@ void wl1251_tx_complete(struct wl1251 *wl)
449{ 442{
450 int i, result_index, num_complete = 0; 443 int i, result_index, num_complete = 0;
451 struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr; 444 struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
445 unsigned long flags;
452 446
453 if (unlikely(wl->state != WL1251_STATE_ON)) 447 if (unlikely(wl->state != WL1251_STATE_ON))
454 return; 448 return;
@@ -477,6 +471,20 @@ void wl1251_tx_complete(struct wl1251 *wl)
477 } 471 }
478 } 472 }
479 473
474 if (wl->tx_queue_stopped
475 &&
476 skb_queue_len(&wl->tx_queue) <= WL1251_TX_QUEUE_LOW_WATERMARK){
477
478 /* firmware buffer has space, restart queues */
479 wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
480 spin_lock_irqsave(&wl->wl_lock, flags);
481 ieee80211_wake_queues(wl->hw);
482 wl->tx_queue_stopped = false;
483 spin_unlock_irqrestore(&wl->wl_lock, flags);
484 ieee80211_queue_work(wl->hw, &wl->tx_work);
485
486 }
487
480 /* Every completed frame needs to be acknowledged */ 488 /* Every completed frame needs to be acknowledged */
481 if (num_complete) { 489 if (num_complete) {
482 /* 490 /*
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index f40eeb37f5aa..96011e78cd5a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -4,8 +4,6 @@
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated 4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation 5 * Copyright (C) 2008 Nokia Corporation
6 * 6 *
7 * Contact: Kalle Valo <kalle.valo@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 9 * version 2 as published by the Free Software Foundation.
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index dd3cee6ea5bb..4134f4495b95 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -313,7 +313,7 @@ struct wl1271_if_operations {
313 bool fixed); 313 bool fixed);
314 void (*reset)(struct wl1271 *wl); 314 void (*reset)(struct wl1271 *wl);
315 void (*init)(struct wl1271 *wl); 315 void (*init)(struct wl1271 *wl);
316 void (*power)(struct wl1271 *wl, bool enable); 316 int (*power)(struct wl1271 *wl, bool enable);
317 struct device* (*dev)(struct wl1271 *wl); 317 struct device* (*dev)(struct wl1271 *wl);
318 void (*enable_irq)(struct wl1271 *wl); 318 void (*enable_irq)(struct wl1271 *wl);
319 void (*disable_irq)(struct wl1271 *wl); 319 void (*disable_irq)(struct wl1271 *wl);
@@ -330,6 +330,7 @@ struct wl1271 {
330 330
331 void (*set_power)(bool enable); 331 void (*set_power)(bool enable);
332 int irq; 332 int irq;
333 int ref_clock;
333 334
334 spinlock_t wl_lock; 335 spinlock_t wl_lock;
335 336
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index bb245f05af49..f03ad088db8b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -269,7 +269,7 @@ int wl1271_acx_pd_threshold(struct wl1271 *wl)
269 269
270out: 270out:
271 kfree(pd); 271 kfree(pd);
272 return 0; 272 return ret;
273} 273}
274 274
275int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time) 275int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index f36430b0336d..fc21db810812 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -457,17 +457,20 @@ int wl1271_boot(struct wl1271 *wl)
457{ 457{
458 int ret = 0; 458 int ret = 0;
459 u32 tmp, clk, pause; 459 u32 tmp, clk, pause;
460 int ref_clock = wl->ref_clock;
460 461
461 wl1271_boot_hw_version(wl); 462 wl1271_boot_hw_version(wl);
462 463
463 if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4) 464 if (ref_clock == 0 || ref_clock == 2 || ref_clock == 4)
464 /* ref clk: 19.2/38.4/38.4-XTAL */ 465 /* ref clk: 19.2/38.4/38.4-XTAL */
465 clk = 0x3; 466 clk = 0x3;
466 else if (REF_CLOCK == 1 || REF_CLOCK == 3) 467 else if (ref_clock == 1 || ref_clock == 3)
467 /* ref clk: 26/52 */ 468 /* ref clk: 26/52 */
468 clk = 0x5; 469 clk = 0x5;
470 else
471 return -EINVAL;
469 472
470 if (REF_CLOCK != 0) { 473 if (ref_clock != 0) {
471 u16 val; 474 u16 val;
472 /* Set clock type (open drain) */ 475 /* Set clock type (open drain) */
473 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE); 476 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
@@ -516,7 +519,7 @@ int wl1271_boot(struct wl1271 *wl)
516 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); 519 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
517 520
518 /* 2 */ 521 /* 2 */
519 clk |= (REF_CLOCK << 1) << 4; 522 clk |= (ref_clock << 1) << 4;
520 wl1271_write32(wl, DRPW_SCRATCH_START, clk); 523 wl1271_write32(wl, DRPW_SCRATCH_START, clk);
521 524
522 wl1271_set_partition(wl, &part_table[PART_WORK]); 525 wl1271_set_partition(wl, &part_table[PART_WORK]);
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index f829699d597e..f73b0b15a280 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -46,7 +46,6 @@ struct wl1271_static_data {
46/* delay between retries */ 46/* delay between retries */
47#define INIT_LOOP_DELAY 50 47#define INIT_LOOP_DELAY 50
48 48
49#define REF_CLOCK 2
50#define WU_COUNTER_PAUSE_VAL 0x3FF 49#define WU_COUNTER_PAUSE_VAL 0x3FF
51#define WELP_ARM_COMMAND_VAL 0x4 50#define WELP_ARM_COMMAND_VAL 0x4
52 51
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/wl1271_io.h
index bc806c74c63a..c1f92e65ded0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.h
+++ b/drivers/net/wireless/wl12xx/wl1271_io.h
@@ -144,10 +144,13 @@ static inline void wl1271_power_off(struct wl1271 *wl)
144 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); 144 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
145} 145}
146 146
147static inline void wl1271_power_on(struct wl1271 *wl) 147static inline int wl1271_power_on(struct wl1271 *wl)
148{ 148{
149 wl->if_ops->power(wl, true); 149 int ret = wl->if_ops->power(wl, true);
150 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); 150 if (ret == 0)
151 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
152
153 return ret;
151} 154}
152 155
153 156
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 9d68f0012f05..776cd7c41148 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -621,7 +621,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
621 int ret = 0; 621 int ret = 0;
622 622
623 msleep(WL1271_PRE_POWER_ON_SLEEP); 623 msleep(WL1271_PRE_POWER_ON_SLEEP);
624 wl1271_power_on(wl); 624 ret = wl1271_power_on(wl);
625 if (ret < 0)
626 goto out;
625 msleep(WL1271_POWER_ON_SLEEP); 627 msleep(WL1271_POWER_ON_SLEEP);
626 wl1271_io_reset(wl); 628 wl1271_io_reset(wl);
627 wl1271_io_init(wl); 629 wl1271_io_init(wl);
@@ -948,9 +950,7 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
948 ieee80211_enable_dyn_ps(wl->vif); 950 ieee80211_enable_dyn_ps(wl->vif);
949 951
950 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { 952 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
951 mutex_unlock(&wl->mutex);
952 ieee80211_scan_completed(wl->hw, true); 953 ieee80211_scan_completed(wl->hw, true);
953 mutex_lock(&wl->mutex);
954 wl->scan.state = WL1271_SCAN_STATE_IDLE; 954 wl->scan.state = WL1271_SCAN_STATE_IDLE;
955 kfree(wl->scan.scanned_ch); 955 kfree(wl->scan.scanned_ch);
956 wl->scan.scanned_ch = NULL; 956 wl->scan.scanned_ch = NULL;
@@ -1439,7 +1439,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1439 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd); 1439 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
1440 wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN); 1440 wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
1441 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", 1441 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
1442 key_conf->alg, key_conf->keyidx, 1442 key_conf->cipher, key_conf->keyidx,
1443 key_conf->keylen, key_conf->flags); 1443 key_conf->keylen, key_conf->flags);
1444 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); 1444 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
1445 1445
@@ -1455,20 +1455,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1455 if (ret < 0) 1455 if (ret < 0)
1456 goto out_unlock; 1456 goto out_unlock;
1457 1457
1458 switch (key_conf->alg) { 1458 switch (key_conf->cipher) {
1459 case ALG_WEP: 1459 case WLAN_CIPHER_SUITE_WEP40:
1460 case WLAN_CIPHER_SUITE_WEP104:
1460 key_type = KEY_WEP; 1461 key_type = KEY_WEP;
1461 1462
1462 key_conf->hw_key_idx = key_conf->keyidx; 1463 key_conf->hw_key_idx = key_conf->keyidx;
1463 break; 1464 break;
1464 case ALG_TKIP: 1465 case WLAN_CIPHER_SUITE_TKIP:
1465 key_type = KEY_TKIP; 1466 key_type = KEY_TKIP;
1466 1467
1467 key_conf->hw_key_idx = key_conf->keyidx; 1468 key_conf->hw_key_idx = key_conf->keyidx;
1468 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); 1469 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
1469 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); 1470 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
1470 break; 1471 break;
1471 case ALG_CCMP: 1472 case WLAN_CIPHER_SUITE_CCMP:
1472 key_type = KEY_AES; 1473 key_type = KEY_AES;
1473 1474
1474 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1475 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -1476,7 +1477,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1476 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); 1477 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
1477 break; 1478 break;
1478 default: 1479 default:
1479 wl1271_error("Unknown key algo 0x%x", key_conf->alg); 1480 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
1480 1481
1481 ret = -EOPNOTSUPP; 1482 ret = -EOPNOTSUPP;
1482 goto out_sleep; 1483 goto out_sleep;
@@ -1633,7 +1634,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1633 if (ret < 0) 1634 if (ret < 0)
1634 goto out; 1635 goto out;
1635 1636
1636 if ((changed && BSS_CHANGED_BEACON_INT) && 1637 if ((changed & BSS_CHANGED_BEACON_INT) &&
1637 (wl->bss_type == BSS_TYPE_IBSS)) { 1638 (wl->bss_type == BSS_TYPE_IBSS)) {
1638 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d", 1639 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
1639 bss_conf->beacon_int); 1640 bss_conf->beacon_int);
@@ -1642,7 +1643,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1642 do_join = true; 1643 do_join = true;
1643 } 1644 }
1644 1645
1645 if ((changed && BSS_CHANGED_BEACON) && 1646 if ((changed & BSS_CHANGED_BEACON) &&
1646 (wl->bss_type == BSS_TYPE_IBSS)) { 1647 (wl->bss_type == BSS_TYPE_IBSS)) {
1647 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 1648 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
1648 1649
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index 019aa79cd9df..94da5dd7723c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -76,7 +76,6 @@ static void wl1271_rx_status(struct wl1271 *wl,
76 76
77static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length) 77static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
78{ 78{
79 struct ieee80211_rx_status rx_status;
80 struct wl1271_rx_descriptor *desc; 79 struct wl1271_rx_descriptor *desc;
81 struct sk_buff *skb; 80 struct sk_buff *skb;
82 u16 *fc; 81 u16 *fc;
@@ -109,14 +108,13 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
109 if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) 108 if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
110 beacon = 1; 109 beacon = 1;
111 110
112 wl1271_rx_status(wl, desc, &rx_status, beacon); 111 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
113 112
114 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, 113 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len,
115 beacon ? "beacon" : ""); 114 beacon ? "beacon" : "");
116 115
117 skb_trim(skb, skb->len - desc->pad_len); 116 skb_trim(skb, skb->len - desc->pad_len);
118 117
119 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
120 ieee80211_rx_ni(wl->hw, skb); 118 ieee80211_rx_ni(wl->hw, skb);
121} 119}
122 120
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.c b/drivers/net/wireless/wl12xx/wl1271_scan.c
index fec43eed8c55..e4950c8e396e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.c
+++ b/drivers/net/wireless/wl12xx/wl1271_scan.c
@@ -215,9 +215,7 @@ void wl1271_scan_stm(struct wl1271 *wl)
215 break; 215 break;
216 216
217 case WL1271_SCAN_STATE_DONE: 217 case WL1271_SCAN_STATE_DONE:
218 mutex_unlock(&wl->mutex);
219 ieee80211_scan_completed(wl->hw, false); 218 ieee80211_scan_completed(wl->hw, false);
220 mutex_lock(&wl->mutex);
221 219
222 kfree(wl->scan.scanned_ch); 220 kfree(wl->scan.scanned_ch);
223 wl->scan.scanned_ch = NULL; 221 wl->scan.scanned_ch = NULL;
@@ -248,7 +246,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
248 246
249 wl->scan.req = req; 247 wl->scan.req = req;
250 248
251 wl->scan.scanned_ch = kzalloc(req->n_channels * 249 wl->scan.scanned_ch = kcalloc(req->n_channels,
252 sizeof(*wl->scan.scanned_ch), 250 sizeof(*wl->scan.scanned_ch),
253 GFP_KERNEL); 251 GFP_KERNEL);
254 wl1271_scan_stm(wl); 252 wl1271_scan_stm(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/wl1271_sdio.c
index 7059b5cccf0f..f2f04663627c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1271_sdio.c
@@ -29,14 +29,12 @@
29#include <linux/mmc/sdio_ids.h> 29#include <linux/mmc/sdio_ids.h>
30#include <linux/mmc/card.h> 30#include <linux/mmc/card.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#include <linux/wl12xx.h>
32 33
33#include "wl1271.h" 34#include "wl1271.h"
34#include "wl12xx_80211.h" 35#include "wl12xx_80211.h"
35#include "wl1271_io.h" 36#include "wl1271_io.h"
36 37
37
38#define RX71_WL1271_IRQ_GPIO 42
39
40#ifndef SDIO_VENDOR_ID_TI 38#ifndef SDIO_VENDOR_ID_TI
41#define SDIO_VENDOR_ID_TI 0x0097 39#define SDIO_VENDOR_ID_TI 0x0097
42#endif 40#endif
@@ -107,6 +105,8 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
107 int ret; 105 int ret;
108 struct sdio_func *func = wl_to_func(wl); 106 struct sdio_func *func = wl_to_func(wl);
109 107
108 sdio_claim_host(func);
109
110 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 110 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
111 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); 111 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
112 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", 112 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
@@ -122,9 +122,10 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
122 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); 122 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
123 } 123 }
124 124
125 sdio_release_host(func);
126
125 if (ret) 127 if (ret)
126 wl1271_error("sdio read failed (%d)", ret); 128 wl1271_error("sdio read failed (%d)", ret);
127
128} 129}
129 130
130static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf, 131static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
@@ -133,6 +134,8 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
133 int ret; 134 int ret;
134 struct sdio_func *func = wl_to_func(wl); 135 struct sdio_func *func = wl_to_func(wl);
135 136
137 sdio_claim_host(func);
138
136 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 139 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
137 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); 140 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
138 wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x", 141 wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
@@ -147,26 +150,45 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
147 else 150 else
148 ret = sdio_memcpy_toio(func, addr, buf, len); 151 ret = sdio_memcpy_toio(func, addr, buf, len);
149 } 152 }
153
154 sdio_release_host(func);
155
150 if (ret) 156 if (ret)
151 wl1271_error("sdio write failed (%d)", ret); 157 wl1271_error("sdio write failed (%d)", ret);
158}
159
160static int wl1271_sdio_power_on(struct wl1271 *wl)
161{
162 struct sdio_func *func = wl_to_func(wl);
163
164 sdio_claim_host(func);
165 sdio_enable_func(func);
166 sdio_release_host(func);
152 167
168 return 0;
153} 169}
154 170
155static void wl1271_sdio_set_power(struct wl1271 *wl, bool enable) 171static int wl1271_sdio_power_off(struct wl1271 *wl)
156{ 172{
157 struct sdio_func *func = wl_to_func(wl); 173 struct sdio_func *func = wl_to_func(wl);
158 174
175 sdio_claim_host(func);
176 sdio_disable_func(func);
177 sdio_release_host(func);
178
179 return 0;
180}
181
182static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
183{
159 /* Let the SDIO stack handle wlan_enable control, so we 184 /* Let the SDIO stack handle wlan_enable control, so we
160 * keep host claimed while wlan is in use to keep wl1271 185 * keep host claimed while wlan is in use to keep wl1271
161 * alive. 186 * alive.
162 */ 187 */
163 if (enable) { 188 if (enable)
164 sdio_claim_host(func); 189 return wl1271_sdio_power_on(wl);
165 sdio_enable_func(func); 190 else
166 } else { 191 return wl1271_sdio_power_off(wl);
167 sdio_disable_func(func);
168 sdio_release_host(func);
169 }
170} 192}
171 193
172static struct wl1271_if_operations sdio_ops = { 194static struct wl1271_if_operations sdio_ops = {
@@ -184,6 +206,7 @@ static int __devinit wl1271_probe(struct sdio_func *func,
184 const struct sdio_device_id *id) 206 const struct sdio_device_id *id)
185{ 207{
186 struct ieee80211_hw *hw; 208 struct ieee80211_hw *hw;
209 const struct wl12xx_platform_data *wlan_data;
187 struct wl1271 *wl; 210 struct wl1271 *wl;
188 int ret; 211 int ret;
189 212
@@ -203,13 +226,16 @@ static int __devinit wl1271_probe(struct sdio_func *func,
203 /* Grab access to FN0 for ELP reg. */ 226 /* Grab access to FN0 for ELP reg. */
204 func->card->quirks |= MMC_QUIRK_LENIENT_FN0; 227 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
205 228
206 wl->irq = gpio_to_irq(RX71_WL1271_IRQ_GPIO); 229 wlan_data = wl12xx_get_platform_data();
207 if (wl->irq < 0) { 230 if (IS_ERR(wlan_data)) {
208 ret = wl->irq; 231 ret = PTR_ERR(wlan_data);
209 wl1271_error("could not get irq!"); 232 wl1271_error("missing wlan platform data: %d", ret);
210 goto out_free; 233 goto out_free;
211 } 234 }
212 235
236 wl->irq = wlan_data->irq;
237 wl->ref_clock = wlan_data->board_ref_clock;
238
213 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); 239 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
214 if (ret < 0) { 240 if (ret < 0) {
215 wl1271_error("request_irq() failed: %d", ret); 241 wl1271_error("request_irq() failed: %d", ret);
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 4cb99c541e2a..ced0a9e2c7e1 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -25,7 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/crc7.h> 26#include <linux/crc7.h>
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28#include <linux/spi/wl12xx.h> 28#include <linux/wl12xx.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "wl1271.h" 31#include "wl1271.h"
@@ -312,10 +312,12 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
312 return IRQ_HANDLED; 312 return IRQ_HANDLED;
313} 313}
314 314
315static void wl1271_spi_set_power(struct wl1271 *wl, bool enable) 315static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
316{ 316{
317 if (wl->set_power) 317 if (wl->set_power)
318 wl->set_power(enable); 318 wl->set_power(enable);
319
320 return 0;
319} 321}
320 322
321static struct wl1271_if_operations spi_ops = { 323static struct wl1271_if_operations spi_ops = {
@@ -370,6 +372,8 @@ static int __devinit wl1271_probe(struct spi_device *spi)
370 goto out_free; 372 goto out_free;
371 } 373 }
372 374
375 wl->ref_clock = pdata->board_ref_clock;
376
373 wl->irq = spi->irq; 377 wl->irq = spi->irq;
374 if (wl->irq < 0) { 378 if (wl->irq < 0) {
375 wl1271_error("irq missing in platform data"); 379 wl1271_error("irq missing in platform data");
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index c592cc2e9fe8..dc0b46c93c4b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -193,7 +193,7 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
193 info = IEEE80211_SKB_CB(skb); 193 info = IEEE80211_SKB_CB(skb);
194 194
195 if (info->control.hw_key && 195 if (info->control.hw_key &&
196 info->control.hw_key->alg == ALG_TKIP) 196 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
197 extra = WL1271_TKIP_IV_SPACE; 197 extra = WL1271_TKIP_IV_SPACE;
198 198
199 if (info->control.hw_key) { 199 if (info->control.hw_key) {
@@ -347,7 +347,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
347 347
348 /* remove TKIP header space if present */ 348 /* remove TKIP header space if present */
349 if (info->control.hw_key && 349 if (info->control.hw_key &&
350 info->control.hw_key->alg == ALG_TKIP) { 350 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
351 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 351 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
352 memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen); 352 memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
353 skb_pull(skb, WL1271_TKIP_IV_SPACE); 353 skb_pull(skb, WL1271_TKIP_IV_SPACE);
diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
new file mode 100644
index 000000000000..973b11060a8f
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
@@ -0,0 +1,28 @@
1#include <linux/module.h>
2#include <linux/err.h>
3#include <linux/wl12xx.h>
4
5static const struct wl12xx_platform_data *platform_data;
6
7int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
8{
9 if (platform_data)
10 return -EBUSY;
11 if (!data)
12 return -EINVAL;
13
14 platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
15 if (!platform_data)
16 return -ENOMEM;
17
18 return 0;
19}
20
21const struct wl12xx_platform_data *wl12xx_get_platform_data(void)
22{
23 if (!platform_data)
24 return ERR_PTR(-ENODEV);
25
26 return platform_data;
27}
28EXPORT_SYMBOL(wl12xx_get_platform_data);
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index a1cc2d498a1c..420e9e986a18 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -29,7 +29,6 @@
29 29
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/ethtool.h>
33#include <linux/init.h> 32#include <linux/init.h>
34#include <linux/interrupt.h> 33#include <linux/interrupt.h>
35#include <linux/in.h> 34#include <linux/in.h>
@@ -1411,15 +1410,6 @@ static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
1411 return wstats; 1410 return wstats;
1412} 1411}
1413 1412
1414static void wl3501_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1415{
1416 strlcpy(info->driver, "wl3501_cs", sizeof(info->driver));
1417}
1418
1419static const struct ethtool_ops ops = {
1420 .get_drvinfo = wl3501_get_drvinfo
1421};
1422
1423/** 1413/**
1424 * wl3501_detach - deletes a driver "instance" 1414 * wl3501_detach - deletes a driver "instance"
1425 * @link - FILL_IN 1415 * @link - FILL_IN
@@ -1905,7 +1895,6 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
1905 this->p_dev = p_dev; 1895 this->p_dev = p_dev;
1906 dev->wireless_data = &this->wireless_data; 1896 dev->wireless_data = &this->wireless_data;
1907 dev->wireless_handlers = &wl3501_handler_def; 1897 dev->wireless_handlers = &wl3501_handler_def;
1908 SET_ETHTOOL_OPS(dev, &ops);
1909 netif_stop_queue(dev); 1898 netif_stop_queue(dev);
1910 p_dev->priv = dev; 1899 p_dev->priv = dev;
1911 1900
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index b2af3c549bb3..87a95bcfee57 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -973,6 +973,7 @@ static void dump_fw_registers(struct zd_chip *chip)
973 973
974static int print_fw_version(struct zd_chip *chip) 974static int print_fw_version(struct zd_chip *chip)
975{ 975{
976 struct wiphy *wiphy = zd_chip_to_mac(chip)->hw->wiphy;
976 int r; 977 int r;
977 u16 version; 978 u16 version;
978 979
@@ -982,6 +983,10 @@ static int print_fw_version(struct zd_chip *chip)
982 return r; 983 return r;
983 984
984 dev_info(zd_chip_dev(chip),"firmware version %04hx\n", version); 985 dev_info(zd_chip_dev(chip),"firmware version %04hx\n", version);
986
987 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version),
988 "%04hx", version);
989
985 return 0; 990 return 0;
986} 991}
987 992
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index b50fedcef8ac..630fb8664768 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -135,7 +135,7 @@ static void skb_entry_set_link(union skb_entry *list, unsigned short id)
135static int skb_entry_is_link(const union skb_entry *list) 135static int skb_entry_is_link(const union skb_entry *list)
136{ 136{
137 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); 137 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
138 return ((unsigned long)list->skb < PAGE_OFFSET); 138 return (unsigned long)list->skb < PAGE_OFFSET;
139} 139}
140 140
141/* 141/*
@@ -203,8 +203,8 @@ static void rx_refill_timeout(unsigned long data)
203 203
204static int netfront_tx_slot_available(struct netfront_info *np) 204static int netfront_tx_slot_available(struct netfront_info *np)
205{ 205{
206 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < 206 return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
207 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); 207 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
208} 208}
209 209
210static void xennet_maybe_wake_tx(struct net_device *dev) 210static void xennet_maybe_wake_tx(struct net_device *dev)
@@ -1395,7 +1395,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1395} 1395}
1396 1396
1397/* Common code used when first setting up, and when resuming. */ 1397/* Common code used when first setting up, and when resuming. */
1398static int talk_to_backend(struct xenbus_device *dev, 1398static int talk_to_netback(struct xenbus_device *dev,
1399 struct netfront_info *info) 1399 struct netfront_info *info)
1400{ 1400{
1401 const char *message; 1401 const char *message;
@@ -1545,7 +1545,7 @@ static int xennet_connect(struct net_device *dev)
1545 return -ENODEV; 1545 return -ENODEV;
1546 } 1546 }
1547 1547
1548 err = talk_to_backend(np->xbdev, np); 1548 err = talk_to_netback(np->xbdev, np);
1549 if (err) 1549 if (err)
1550 return err; 1550 return err;
1551 1551
@@ -1599,7 +1599,7 @@ static int xennet_connect(struct net_device *dev)
1599/** 1599/**
1600 * Callback received when the backend's state changes. 1600 * Callback received when the backend's state changes.
1601 */ 1601 */
1602static void backend_changed(struct xenbus_device *dev, 1602static void netback_changed(struct xenbus_device *dev,
1603 enum xenbus_state backend_state) 1603 enum xenbus_state backend_state)
1604{ 1604{
1605 struct netfront_info *np = dev_get_drvdata(&dev->dev); 1605 struct netfront_info *np = dev_get_drvdata(&dev->dev);
@@ -1801,7 +1801,7 @@ static struct xenbus_driver netfront_driver = {
1801 .probe = netfront_probe, 1801 .probe = netfront_probe,
1802 .remove = __devexit_p(xennet_remove), 1802 .remove = __devexit_p(xennet_remove),
1803 .resume = netfront_resume, 1803 .resume = netfront_resume,
1804 .otherend_changed = backend_changed, 1804 .otherend_changed = netback_changed,
1805}; 1805};
1806 1806
1807static int __init netif_init(void) 1807static int __init netif_init(void)
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index ecbbb688eba0..f3f8be5a35fa 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -641,7 +641,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
641 skb_put(skb, len); /* Tell the skb how much data we got */ 641 skb_put(skb, len); /* Tell the skb how much data we got */
642 642
643 skb->protocol = eth_type_trans(skb, dev); 643 skb->protocol = eth_type_trans(skb, dev);
644 skb->ip_summed = CHECKSUM_NONE; 644 skb_checksum_none_assert(skb);
645 645
646 dev->stats.rx_packets++; 646 dev->stats.rx_packets++;
647 dev->stats.rx_bytes += len; 647 dev->stats.rx_bytes += len;
@@ -1269,6 +1269,16 @@ static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
1269 return 0; 1269 return 0;
1270} 1270}
1271 1271
1272#ifdef CONFIG_NET_POLL_CONTROLLER
1273static void
1274xemaclite_poll_controller(struct net_device *ndev)
1275{
1276 disable_irq(ndev->irq);
1277 xemaclite_interrupt(ndev->irq, ndev);
1278 enable_irq(ndev->irq);
1279}
1280#endif
1281
1272static struct net_device_ops xemaclite_netdev_ops = { 1282static struct net_device_ops xemaclite_netdev_ops = {
1273 .ndo_open = xemaclite_open, 1283 .ndo_open = xemaclite_open,
1274 .ndo_stop = xemaclite_close, 1284 .ndo_stop = xemaclite_close,
@@ -1276,6 +1286,9 @@ static struct net_device_ops xemaclite_netdev_ops = {
1276 .ndo_set_mac_address = xemaclite_set_mac_address, 1286 .ndo_set_mac_address = xemaclite_set_mac_address,
1277 .ndo_tx_timeout = xemaclite_tx_timeout, 1287 .ndo_tx_timeout = xemaclite_tx_timeout,
1278 .ndo_get_stats = xemaclite_get_stats, 1288 .ndo_get_stats = xemaclite_get_stats,
1289#ifdef CONFIG_NET_POLL_CONTROLLER
1290 .ndo_poll_controller = xemaclite_poll_controller,
1291#endif
1279}; 1292};
1280 1293
1281/* Match table for OF platform binding */ 1294/* Match table for OF platform binding */
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 4eb67aed68dd..cd1b3dcd61db 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -646,7 +646,7 @@ static int yellowfin_open(struct net_device *dev)
646 init_timer(&yp->timer); 646 init_timer(&yp->timer);
647 yp->timer.expires = jiffies + 3*HZ; 647 yp->timer.expires = jiffies + 3*HZ;
648 yp->timer.data = (unsigned long)dev; 648 yp->timer.data = (unsigned long)dev;
649 yp->timer.function = &yellowfin_timer; /* timer handler */ 649 yp->timer.function = yellowfin_timer; /* timer handler */
650 add_timer(&yp->timer); 650 add_timer(&yp->timer);
651 651
652 return 0; 652 return 0;
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index f0037eefd44e..0f4ef8769a3d 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -208,6 +208,7 @@ struct qdio_dev_perf_stat {
208 unsigned int eqbs_partial; 208 unsigned int eqbs_partial;
209 unsigned int sqbs; 209 unsigned int sqbs;
210 unsigned int sqbs_partial; 210 unsigned int sqbs_partial;
211 unsigned int int_discarded;
211} ____cacheline_aligned; 212} ____cacheline_aligned;
212 213
213struct qdio_queue_perf_stat { 214struct qdio_queue_perf_stat {
@@ -222,6 +223,10 @@ struct qdio_queue_perf_stat {
222 unsigned int nr_sbal_total; 223 unsigned int nr_sbal_total;
223}; 224};
224 225
226enum qdio_queue_irq_states {
227 QDIO_QUEUE_IRQS_DISABLED,
228};
229
225struct qdio_input_q { 230struct qdio_input_q {
226 /* input buffer acknowledgement flag */ 231 /* input buffer acknowledgement flag */
227 int polling; 232 int polling;
@@ -231,6 +236,10 @@ struct qdio_input_q {
231 int ack_count; 236 int ack_count;
232 /* last time of noticing incoming data */ 237 /* last time of noticing incoming data */
233 u64 timestamp; 238 u64 timestamp;
239 /* upper-layer polling flag */
240 unsigned long queue_irq_state;
241 /* callback to start upper-layer polling */
242 void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
234}; 243};
235 244
236struct qdio_output_q { 245struct qdio_output_q {
@@ -399,6 +408,26 @@ static inline int multicast_outbound(struct qdio_q *q)
399#define sub_buf(bufnr, dec) \ 408#define sub_buf(bufnr, dec) \
400 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) 409 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
401 410
411#define queue_irqs_enabled(q) \
412 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
413#define queue_irqs_disabled(q) \
414 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
415
416#define TIQDIO_SHARED_IND 63
417
418/* device state change indicators */
419struct indicator_t {
420 u32 ind; /* u32 because of compare-and-swap performance */
421 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
422};
423
424extern struct indicator_t *q_indicators;
425
426static inline int shared_ind(struct qdio_irq *irq_ptr)
427{
428 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
429}
430
402/* prototypes for thin interrupt */ 431/* prototypes for thin interrupt */
403void qdio_setup_thinint(struct qdio_irq *irq_ptr); 432void qdio_setup_thinint(struct qdio_irq *irq_ptr);
404int qdio_establish_thinint(struct qdio_irq *irq_ptr); 433int qdio_establish_thinint(struct qdio_irq *irq_ptr);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 6ce83f56d537..28868e7471a5 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -56,9 +56,16 @@ static int qstat_show(struct seq_file *m, void *v)
56 56
57 seq_printf(m, "DSCI: %d nr_used: %d\n", 57 seq_printf(m, "DSCI: %d nr_used: %d\n",
58 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); 58 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
59 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); 59 seq_printf(m, "ftc: %d last_move: %d\n",
60 seq_printf(m, "polling: %d ack start: %d ack count: %d\n", 60 q->first_to_check, q->last_move);
61 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); 61 if (q->is_input_q) {
62 seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
63 q->u.in.polling, q->u.in.ack_start,
64 q->u.in.ack_count);
65 seq_printf(m, "IRQs disabled: %u\n",
66 test_bit(QDIO_QUEUE_IRQS_DISABLED,
67 &q->u.in.queue_irq_state));
68 }
62 seq_printf(m, "SBAL states:\n"); 69 seq_printf(m, "SBAL states:\n");
63 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 70 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
64 71
@@ -113,22 +120,6 @@ static int qstat_show(struct seq_file *m, void *v)
113 return 0; 120 return 0;
114} 121}
115 122
116static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
117 size_t count, loff_t *off)
118{
119 struct seq_file *seq = file->private_data;
120 struct qdio_q *q = seq->private;
121
122 if (!q)
123 return 0;
124 if (q->is_input_q)
125 xchg(q->irq_ptr->dsci, 1);
126 local_bh_disable();
127 tasklet_schedule(&q->tasklet);
128 local_bh_enable();
129 return count;
130}
131
132static int qstat_seq_open(struct inode *inode, struct file *filp) 123static int qstat_seq_open(struct inode *inode, struct file *filp)
133{ 124{
134 return single_open(filp, qstat_show, 125 return single_open(filp, qstat_show,
@@ -139,7 +130,6 @@ static const struct file_operations debugfs_fops = {
139 .owner = THIS_MODULE, 130 .owner = THIS_MODULE,
140 .open = qstat_seq_open, 131 .open = qstat_seq_open,
141 .read = seq_read, 132 .read = seq_read,
142 .write = qstat_seq_write,
143 .llseek = seq_lseek, 133 .llseek = seq_lseek,
144 .release = single_release, 134 .release = single_release,
145}; 135};
@@ -166,7 +156,8 @@ static char *qperf_names[] = {
166 "QEBSM eqbs", 156 "QEBSM eqbs",
167 "QEBSM eqbs partial", 157 "QEBSM eqbs partial",
168 "QEBSM sqbs", 158 "QEBSM sqbs",
169 "QEBSM sqbs partial" 159 "QEBSM sqbs partial",
160 "Discarded interrupts"
170}; 161};
171 162
172static int qperf_show(struct seq_file *m, void *v) 163static int qperf_show(struct seq_file *m, void *v)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 00520f9a7a8e..5fcfa7f9e9ef 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -884,8 +884,19 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
884 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 884 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
885 return; 885 return;
886 886
887 for_each_input_queue(irq_ptr, q, i) 887 for_each_input_queue(irq_ptr, q, i) {
888 tasklet_schedule(&q->tasklet); 888 if (q->u.in.queue_start_poll) {
889 /* skip if polling is enabled or already in work */
890 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
891 &q->u.in.queue_irq_state)) {
892 qperf_inc(q, int_discarded);
893 continue;
894 }
895 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
896 q->irq_ptr->int_parm);
897 } else
898 tasklet_schedule(&q->tasklet);
899 }
889 900
890 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) 901 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
891 return; 902 return;
@@ -1519,6 +1530,129 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1519} 1530}
1520EXPORT_SYMBOL_GPL(do_QDIO); 1531EXPORT_SYMBOL_GPL(do_QDIO);
1521 1532
1533/**
1534 * qdio_start_irq - process input buffers
1535 * @cdev: associated ccw_device for the qdio subchannel
1536 * @nr: input queue number
1537 *
1538 * Return codes
1539 * 0 - success
1540 * 1 - irqs not started since new data is available
1541 */
1542int qdio_start_irq(struct ccw_device *cdev, int nr)
1543{
1544 struct qdio_q *q;
1545 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1546
1547 if (!irq_ptr)
1548 return -ENODEV;
1549 q = irq_ptr->input_qs[nr];
1550
1551 WARN_ON(queue_irqs_enabled(q));
1552
1553 if (!shared_ind(q->irq_ptr))
1554 xchg(q->irq_ptr->dsci, 0);
1555
1556 qdio_stop_polling(q);
1557 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1558
1559 /*
1560 * We need to check again to not lose initiative after
1561 * resetting the ACK state.
1562 */
1563 if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci)
1564 goto rescan;
1565 if (!qdio_inbound_q_done(q))
1566 goto rescan;
1567 return 0;
1568
1569rescan:
1570 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1571 &q->u.in.queue_irq_state))
1572 return 0;
1573 else
1574 return 1;
1575
1576}
1577EXPORT_SYMBOL(qdio_start_irq);
1578
1579/**
1580 * qdio_get_next_buffers - process input buffers
1581 * @cdev: associated ccw_device for the qdio subchannel
1582 * @nr: input queue number
1583 * @bufnr: first filled buffer number
1584 * @error: buffers are in error state
1585 *
1586 * Return codes
1587 * < 0 - error
1588 * = 0 - no new buffers found
1589 * > 0 - number of processed buffers
1590 */
1591int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1592 int *error)
1593{
1594 struct qdio_q *q;
1595 int start, end;
1596 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1597
1598 if (!irq_ptr)
1599 return -ENODEV;
1600 q = irq_ptr->input_qs[nr];
1601 WARN_ON(queue_irqs_enabled(q));
1602
1603 qdio_sync_after_thinint(q);
1604
1605 /*
1606 * The interrupt could be caused by a PCI request. Check the
1607 * PCI capable outbound queues.
1608 */
1609 qdio_check_outbound_after_thinint(q);
1610
1611 if (!qdio_inbound_q_moved(q))
1612 return 0;
1613
1614 /* Note: upper-layer MUST stop processing immediately here ... */
1615 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1616 return -EIO;
1617
1618 start = q->first_to_kick;
1619 end = q->first_to_check;
1620 *bufnr = start;
1621 *error = q->qdio_error;
1622
1623 /* for the next time */
1624 q->first_to_kick = end;
1625 q->qdio_error = 0;
1626 return sub_buf(end, start);
1627}
1628EXPORT_SYMBOL(qdio_get_next_buffers);
1629
1630/**
1631 * qdio_stop_irq - disable interrupt processing for the device
1632 * @cdev: associated ccw_device for the qdio subchannel
1633 * @nr: input queue number
1634 *
1635 * Return codes
1636 * 0 - interrupts were already disabled
1637 * 1 - interrupts successfully disabled
1638 */
1639int qdio_stop_irq(struct ccw_device *cdev, int nr)
1640{
1641 struct qdio_q *q;
1642 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1643
1644 if (!irq_ptr)
1645 return -ENODEV;
1646 q = irq_ptr->input_qs[nr];
1647
1648 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1649 &q->u.in.queue_irq_state))
1650 return 0;
1651 else
1652 return 1;
1653}
1654EXPORT_SYMBOL(qdio_stop_irq);
1655
1522static int __init init_QDIO(void) 1656static int __init init_QDIO(void)
1523{ 1657{
1524 int rc; 1658 int rc;
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 34c7e4046df4..a13cf7ec64b2 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -161,6 +161,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
161 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); 161 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
162 162
163 q->is_input_q = 1; 163 q->is_input_q = 1;
164 q->u.in.queue_start_poll = qdio_init->queue_start_poll;
164 setup_storage_lists(q, irq_ptr, input_sbal_array, i); 165 setup_storage_lists(q, irq_ptr, input_sbal_array, i);
165 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 166 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
166 167
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 8daf1b99f153..752dbee06af5 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -25,24 +25,20 @@
25 */ 25 */
26#define TIQDIO_NR_NONSHARED_IND 63 26#define TIQDIO_NR_NONSHARED_IND 63
27#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 27#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
28#define TIQDIO_SHARED_IND 63
29 28
30/* list of thin interrupt input queues */ 29/* list of thin interrupt input queues */
31static LIST_HEAD(tiq_list); 30static LIST_HEAD(tiq_list);
32DEFINE_MUTEX(tiq_list_lock); 31DEFINE_MUTEX(tiq_list_lock);
33 32
34/* adapter local summary indicator */ 33/* adapter local summary indicator */
35static unsigned char *tiqdio_alsi; 34static u8 *tiqdio_alsi;
36 35
37/* device state change indicators */ 36struct indicator_t *q_indicators;
38struct indicator_t {
39 u32 ind; /* u32 because of compare-and-swap performance */
40 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
41};
42static struct indicator_t *q_indicators;
43 37
44static int css_qdio_omit_svs; 38static int css_qdio_omit_svs;
45 39
40static u64 last_ai_time;
41
46static inline unsigned long do_clear_global_summary(void) 42static inline unsigned long do_clear_global_summary(void)
47{ 43{
48 register unsigned long __fn asm("1") = 3; 44 register unsigned long __fn asm("1") = 3;
@@ -116,59 +112,73 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
116 } 112 }
117} 113}
118 114
119static inline int shared_ind(struct qdio_irq *irq_ptr) 115static inline int shared_ind_used(void)
120{ 116{
121 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 117 return atomic_read(&q_indicators[TIQDIO_SHARED_IND].count);
122} 118}
123 119
124/** 120/**
125 * tiqdio_thinint_handler - thin interrupt handler for qdio 121 * tiqdio_thinint_handler - thin interrupt handler for qdio
126 * @ind: pointer to adapter local summary indicator 122 * @alsi: pointer to adapter local summary indicator
127 * @drv_data: NULL 123 * @data: NULL
128 */ 124 */
129static void tiqdio_thinint_handler(void *ind, void *drv_data) 125static void tiqdio_thinint_handler(void *alsi, void *data)
130{ 126{
131 struct qdio_q *q; 127 struct qdio_q *q;
132 128
129 last_ai_time = S390_lowcore.int_clock;
130
133 /* 131 /*
134 * SVS only when needed: issue SVS to benefit from iqdio interrupt 132 * SVS only when needed: issue SVS to benefit from iqdio interrupt
135 * avoidance (SVS clears adapter interrupt suppression overwrite) 133 * avoidance (SVS clears adapter interrupt suppression overwrite).
136 */ 134 */
137 if (!css_qdio_omit_svs) 135 if (!css_qdio_omit_svs)
138 do_clear_global_summary(); 136 do_clear_global_summary();
139 137
140 /* 138 /* reset local summary indicator */
141 * reset local summary indicator (tiqdio_alsi) to stop adapter 139 if (shared_ind_used())
142 * interrupts for now 140 xchg(tiqdio_alsi, 0);
143 */
144 xchg((u8 *)ind, 0);
145 141
146 /* protect tiq_list entries, only changed in activate or shutdown */ 142 /* protect tiq_list entries, only changed in activate or shutdown */
147 rcu_read_lock(); 143 rcu_read_lock();
148 144
149 /* check for work on all inbound thinint queues */ 145 /* check for work on all inbound thinint queues */
150 list_for_each_entry_rcu(q, &tiq_list, entry) 146 list_for_each_entry_rcu(q, &tiq_list, entry) {
147
151 /* only process queues from changed sets */ 148 /* only process queues from changed sets */
152 if (*q->irq_ptr->dsci) { 149 if (!*q->irq_ptr->dsci)
153 qperf_inc(q, adapter_int); 150 continue;
154 151
152 if (q->u.in.queue_start_poll) {
153 /* skip if polling is enabled or already in work */
154 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
155 &q->u.in.queue_irq_state)) {
156 qperf_inc(q, int_discarded);
157 continue;
158 }
159
160 /* avoid dsci clear here, done after processing */
161 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
162 q->irq_ptr->int_parm);
163 } else {
155 /* only clear it if the indicator is non-shared */ 164 /* only clear it if the indicator is non-shared */
156 if (!shared_ind(q->irq_ptr)) 165 if (!shared_ind(q->irq_ptr))
157 xchg(q->irq_ptr->dsci, 0); 166 xchg(q->irq_ptr->dsci, 0);
158 /* 167 /*
159 * don't call inbound processing directly since 168 * Call inbound processing but not directly
160 * that could starve other thinint queues 169 * since that could starve other thinint queues.
161 */ 170 */
162 tasklet_schedule(&q->tasklet); 171 tasklet_schedule(&q->tasklet);
163 } 172 }
164 173 qperf_inc(q, adapter_int);
174 }
165 rcu_read_unlock(); 175 rcu_read_unlock();
166 176
167 /* 177 /*
168 * if we used the shared indicator clear it now after all queues 178 * If the shared indicator was used clear it now after all queues
169 * were processed 179 * were processed.
170 */ 180 */
171 if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { 181 if (shared_ind_used()) {
172 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 182 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
173 183
174 /* prevent racing */ 184 /* prevent racing */
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 977bb4d4ed15..456b18743397 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -100,6 +100,6 @@ config QETH_IPV6
100 100
101config CCWGROUP 101config CCWGROUP
102 tristate 102 tristate
103 default (LCS || CTCM || QETH) 103 default (LCS || CTCM || QETH || CLAW)
104 104
105endmenu 105endmenu
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index d1257768be90..6be43eb126b4 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -676,6 +676,7 @@ enum qeth_discipline_id {
676}; 676};
677 677
678struct qeth_discipline { 678struct qeth_discipline {
679 void (*start_poll)(struct ccw_device *, int, unsigned long);
679 qdio_handler_t *input_handler; 680 qdio_handler_t *input_handler;
680 qdio_handler_t *output_handler; 681 qdio_handler_t *output_handler;
681 int (*recover)(void *ptr); 682 int (*recover)(void *ptr);
@@ -702,6 +703,16 @@ struct qeth_skb_data {
702#define QETH_SKB_MAGIC 0x71657468 703#define QETH_SKB_MAGIC 0x71657468
703#define QETH_SIGA_CC2_RETRIES 3 704#define QETH_SIGA_CC2_RETRIES 3
704 705
706struct qeth_rx {
707 int b_count;
708 int b_index;
709 struct qdio_buffer_element *b_element;
710 int e_offset;
711 int qdio_err;
712};
713
714#define QETH_NAPI_WEIGHT 128
715
705struct qeth_card { 716struct qeth_card {
706 struct list_head list; 717 struct list_head list;
707 enum qeth_card_states state; 718 enum qeth_card_states state;
@@ -749,6 +760,8 @@ struct qeth_card {
749 debug_info_t *debug; 760 debug_info_t *debug;
750 struct mutex conf_mutex; 761 struct mutex conf_mutex;
751 struct mutex discipline_mutex; 762 struct mutex discipline_mutex;
763 struct napi_struct napi;
764 struct qeth_rx rx;
752}; 765};
753 766
754struct qeth_card_list_struct { 767struct qeth_card_list_struct {
@@ -831,6 +844,10 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
831 struct qdio_buffer *, struct qdio_buffer_element **, int *, 844 struct qdio_buffer *, struct qdio_buffer_element **, int *,
832 struct qeth_hdr **); 845 struct qeth_hdr **);
833void qeth_schedule_recovery(struct qeth_card *); 846void qeth_schedule_recovery(struct qeth_card *);
847void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
848void qeth_qdio_input_handler(struct ccw_device *,
849 unsigned int, unsigned int, int,
850 int, unsigned long);
834void qeth_qdio_output_handler(struct ccw_device *, unsigned int, 851void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
835 int, int, int, unsigned long); 852 int, int, int, unsigned long);
836void qeth_clear_ipacmd_list(struct qeth_card *); 853void qeth_clear_ipacmd_list(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3a5a18a0fc28..764267062601 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2911,6 +2911,27 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2911 } 2911 }
2912} 2912}
2913 2913
2914void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
2915 unsigned long card_ptr)
2916{
2917 struct qeth_card *card = (struct qeth_card *)card_ptr;
2918
2919 if (card->dev)
2920 napi_schedule(&card->napi);
2921}
2922EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
2923
2924void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
2925 unsigned int queue, int first_element, int count,
2926 unsigned long card_ptr)
2927{
2928 struct qeth_card *card = (struct qeth_card *)card_ptr;
2929
2930 if (qdio_err)
2931 qeth_schedule_recovery(card);
2932}
2933EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
2934
2914void qeth_qdio_output_handler(struct ccw_device *ccwdev, 2935void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2915 unsigned int qdio_error, int __queue, int first_element, 2936 unsigned int qdio_error, int __queue, int first_element,
2916 int count, unsigned long card_ptr) 2937 int count, unsigned long card_ptr)
@@ -3843,6 +3864,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
3843 init_data.no_output_qs = card->qdio.no_out_queues; 3864 init_data.no_output_qs = card->qdio.no_out_queues;
3844 init_data.input_handler = card->discipline.input_handler; 3865 init_data.input_handler = card->discipline.input_handler;
3845 init_data.output_handler = card->discipline.output_handler; 3866 init_data.output_handler = card->discipline.output_handler;
3867 init_data.queue_start_poll = card->discipline.start_poll;
3846 init_data.int_parm = (unsigned long) card; 3868 init_data.int_parm = (unsigned long) card;
3847 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; 3869 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
3848 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; 3870 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
@@ -4513,8 +4535,8 @@ static struct {
4513/* 20 */{"queue 1 buffer usage"}, 4535/* 20 */{"queue 1 buffer usage"},
4514 {"queue 2 buffer usage"}, 4536 {"queue 2 buffer usage"},
4515 {"queue 3 buffer usage"}, 4537 {"queue 3 buffer usage"},
4516 {"rx handler time"}, 4538 {"rx poll time"},
4517 {"rx handler count"}, 4539 {"rx poll count"},
4518 {"rx do_QDIO time"}, 4540 {"rx do_QDIO time"},
4519 {"rx do_QDIO count"}, 4541 {"rx do_QDIO count"},
4520 {"tx handler time"}, 4542 {"tx handler time"},
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 830d63524d61..01c3c1f77879 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -407,29 +407,25 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
407 return rc; 407 return rc;
408} 408}
409 409
410static void qeth_l2_process_inbound_buffer(struct qeth_card *card, 410static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
411 struct qeth_qdio_buffer *buf, int index) 411 int budget, int *done)
412{ 412{
413 struct qdio_buffer_element *element; 413 int work_done = 0;
414 struct sk_buff *skb; 414 struct sk_buff *skb;
415 struct qeth_hdr *hdr; 415 struct qeth_hdr *hdr;
416 int offset;
417 unsigned int len; 416 unsigned int len;
418 417
419 /* get first element of current buffer */ 418 *done = 0;
420 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; 419 BUG_ON(!budget);
421 offset = 0; 420 while (budget) {
422 if (card->options.performance_stats) 421 skb = qeth_core_get_next_skb(card,
423 card->perf_stats.bufs_rec++; 422 card->qdio.in_q->bufs[card->rx.b_index].buffer,
424 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element, 423 &card->rx.b_element, &card->rx.e_offset, &hdr);
425 &offset, &hdr))) { 424 if (!skb) {
426 skb->dev = card->dev; 425 *done = 1;
427 /* is device UP ? */ 426 break;
428 if (!(card->dev->flags & IFF_UP)) {
429 dev_kfree_skb_any(skb);
430 continue;
431 } 427 }
432 428 skb->dev = card->dev;
433 switch (hdr->hdr.l2.id) { 429 switch (hdr->hdr.l2.id) {
434 case QETH_HEADER_TYPE_LAYER2: 430 case QETH_HEADER_TYPE_LAYER2:
435 skb->pkt_type = PACKET_HOST; 431 skb->pkt_type = PACKET_HOST;
@@ -441,7 +437,7 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
441 if (skb->protocol == htons(ETH_P_802_2)) 437 if (skb->protocol == htons(ETH_P_802_2))
442 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 438 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
443 len = skb->len; 439 len = skb->len;
444 netif_rx(skb); 440 netif_receive_skb(skb);
445 break; 441 break;
446 case QETH_HEADER_TYPE_OSN: 442 case QETH_HEADER_TYPE_OSN:
447 if (card->info.type == QETH_CARD_TYPE_OSN) { 443 if (card->info.type == QETH_CARD_TYPE_OSN) {
@@ -459,9 +455,87 @@ static void qeth_l2_process_inbound_buffer(struct qeth_card *card,
459 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 455 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
460 continue; 456 continue;
461 } 457 }
458 work_done++;
459 budget--;
462 card->stats.rx_packets++; 460 card->stats.rx_packets++;
463 card->stats.rx_bytes += len; 461 card->stats.rx_bytes += len;
464 } 462 }
463 return work_done;
464}
465
466static int qeth_l2_poll(struct napi_struct *napi, int budget)
467{
468 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
469 int work_done = 0;
470 struct qeth_qdio_buffer *buffer;
471 int done;
472 int new_budget = budget;
473
474 if (card->options.performance_stats) {
475 card->perf_stats.inbound_cnt++;
476 card->perf_stats.inbound_start_time = qeth_get_micros();
477 }
478
479 while (1) {
480 if (!card->rx.b_count) {
481 card->rx.qdio_err = 0;
482 card->rx.b_count = qdio_get_next_buffers(
483 card->data.ccwdev, 0, &card->rx.b_index,
484 &card->rx.qdio_err);
485 if (card->rx.b_count <= 0) {
486 card->rx.b_count = 0;
487 break;
488 }
489 card->rx.b_element =
490 &card->qdio.in_q->bufs[card->rx.b_index]
491 .buffer->element[0];
492 card->rx.e_offset = 0;
493 }
494
495 while (card->rx.b_count) {
496 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
497 if (!(card->rx.qdio_err &&
498 qeth_check_qdio_errors(card, buffer->buffer,
499 card->rx.qdio_err, "qinerr")))
500 work_done += qeth_l2_process_inbound_buffer(
501 card, new_budget, &done);
502 else
503 done = 1;
504
505 if (done) {
506 if (card->options.performance_stats)
507 card->perf_stats.bufs_rec++;
508 qeth_put_buffer_pool_entry(card,
509 buffer->pool_entry);
510 qeth_queue_input_buffer(card, card->rx.b_index);
511 card->rx.b_count--;
512 if (card->rx.b_count) {
513 card->rx.b_index =
514 (card->rx.b_index + 1) %
515 QDIO_MAX_BUFFERS_PER_Q;
516 card->rx.b_element =
517 &card->qdio.in_q
518 ->bufs[card->rx.b_index]
519 .buffer->element[0];
520 card->rx.e_offset = 0;
521 }
522 }
523
524 if (work_done >= budget)
525 goto out;
526 else
527 new_budget = budget - work_done;
528 }
529 }
530
531 napi_complete(napi);
532 if (qdio_start_irq(card->data.ccwdev, 0))
533 napi_schedule(&card->napi);
534out:
535 if (card->options.performance_stats)
536 card->perf_stats.inbound_time += qeth_get_micros() -
537 card->perf_stats.inbound_start_time;
538 return work_done;
465} 539}
466 540
467static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, 541static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
@@ -755,49 +829,10 @@ tx_drop:
755 return NETDEV_TX_OK; 829 return NETDEV_TX_OK;
756} 830}
757 831
758static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
759 unsigned int qdio_err, unsigned int queue,
760 int first_element, int count, unsigned long card_ptr)
761{
762 struct net_device *net_dev;
763 struct qeth_card *card;
764 struct qeth_qdio_buffer *buffer;
765 int index;
766 int i;
767
768 card = (struct qeth_card *) card_ptr;
769 net_dev = card->dev;
770 if (card->options.performance_stats) {
771 card->perf_stats.inbound_cnt++;
772 card->perf_stats.inbound_start_time = qeth_get_micros();
773 }
774 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
775 QETH_CARD_TEXT(card, 1, "qdinchk");
776 QETH_CARD_TEXT_(card, 1, "%04X%04X", first_element,
777 count);
778 QETH_CARD_TEXT_(card, 1, "%04X", queue);
779 qeth_schedule_recovery(card);
780 return;
781 }
782 for (i = first_element; i < (first_element + count); ++i) {
783 index = i % QDIO_MAX_BUFFERS_PER_Q;
784 buffer = &card->qdio.in_q->bufs[index];
785 if (!(qdio_err &&
786 qeth_check_qdio_errors(card, buffer->buffer, qdio_err,
787 "qinerr")))
788 qeth_l2_process_inbound_buffer(card, buffer, index);
789 /* clear buffer and give back to hardware */
790 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
791 qeth_queue_input_buffer(card, index);
792 }
793 if (card->options.performance_stats)
794 card->perf_stats.inbound_time += qeth_get_micros() -
795 card->perf_stats.inbound_start_time;
796}
797
798static int qeth_l2_open(struct net_device *dev) 832static int qeth_l2_open(struct net_device *dev)
799{ 833{
800 struct qeth_card *card = dev->ml_priv; 834 struct qeth_card *card = dev->ml_priv;
835 int rc = 0;
801 836
802 QETH_CARD_TEXT(card, 4, "qethopen"); 837 QETH_CARD_TEXT(card, 4, "qethopen");
803 if (card->state != CARD_STATE_SOFTSETUP) 838 if (card->state != CARD_STATE_SOFTSETUP)
@@ -814,18 +849,24 @@ static int qeth_l2_open(struct net_device *dev)
814 849
815 if (!card->lan_online && netif_carrier_ok(dev)) 850 if (!card->lan_online && netif_carrier_ok(dev))
816 netif_carrier_off(dev); 851 netif_carrier_off(dev);
817 return 0; 852 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
853 napi_enable(&card->napi);
854 napi_schedule(&card->napi);
855 } else
856 rc = -EIO;
857 return rc;
818} 858}
819 859
820
821static int qeth_l2_stop(struct net_device *dev) 860static int qeth_l2_stop(struct net_device *dev)
822{ 861{
823 struct qeth_card *card = dev->ml_priv; 862 struct qeth_card *card = dev->ml_priv;
824 863
825 QETH_CARD_TEXT(card, 4, "qethstop"); 864 QETH_CARD_TEXT(card, 4, "qethstop");
826 netif_tx_disable(dev); 865 netif_tx_disable(dev);
827 if (card->state == CARD_STATE_UP) 866 if (card->state == CARD_STATE_UP) {
828 card->state = CARD_STATE_SOFTSETUP; 867 card->state = CARD_STATE_SOFTSETUP;
868 napi_disable(&card->napi);
869 }
829 return 0; 870 return 0;
830} 871}
831 872
@@ -836,8 +877,9 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
836 INIT_LIST_HEAD(&card->vid_list); 877 INIT_LIST_HEAD(&card->vid_list);
837 INIT_LIST_HEAD(&card->mc_list); 878 INIT_LIST_HEAD(&card->mc_list);
838 card->options.layer2 = 1; 879 card->options.layer2 = 1;
880 card->discipline.start_poll = qeth_qdio_start_poll;
839 card->discipline.input_handler = (qdio_handler_t *) 881 card->discipline.input_handler = (qdio_handler_t *)
840 qeth_l2_qdio_input_handler; 882 qeth_qdio_input_handler;
841 card->discipline.output_handler = (qdio_handler_t *) 883 card->discipline.output_handler = (qdio_handler_t *)
842 qeth_qdio_output_handler; 884 qeth_qdio_output_handler;
843 card->discipline.recover = qeth_l2_recover; 885 card->discipline.recover = qeth_l2_recover;
@@ -923,6 +965,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
923 card->info.broadcast_capable = 1; 965 card->info.broadcast_capable = 1;
924 qeth_l2_request_initial_mac(card); 966 qeth_l2_request_initial_mac(card);
925 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 967 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
968 netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
926 return register_netdev(card->dev); 969 return register_netdev(card->dev);
927} 970}
928 971
@@ -955,6 +998,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
955 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 998 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
956 999
957 card->state = CARD_STATE_HARDSETUP; 1000 card->state = CARD_STATE_HARDSETUP;
1001 memset(&card->rx, 0, sizeof(struct qeth_rx));
958 qeth_print_status_message(card); 1002 qeth_print_status_message(card);
959 1003
960 /* softsetup */ 1004 /* softsetup */
@@ -1086,9 +1130,6 @@ static int qeth_l2_recover(void *ptr)
1086 card->use_hard_stop = 1; 1130 card->use_hard_stop = 1;
1087 __qeth_l2_set_offline(card->gdev, 1); 1131 __qeth_l2_set_offline(card->gdev, 1);
1088 rc = __qeth_l2_set_online(card->gdev, 1); 1132 rc = __qeth_l2_set_online(card->gdev, 1);
1089 /* don't run another scheduled recovery */
1090 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1091 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1092 if (!rc) 1133 if (!rc)
1093 dev_info(&card->gdev->dev, 1134 dev_info(&card->gdev->dev,
1094 "Device successfully recovered!\n"); 1135 "Device successfully recovered!\n");
@@ -1099,6 +1140,8 @@ static int qeth_l2_recover(void *ptr)
1099 dev_warn(&card->gdev->dev, "The qeth device driver " 1140 dev_warn(&card->gdev->dev, "The qeth device driver "
1100 "failed to recover an error on the device\n"); 1141 "failed to recover an error on the device\n");
1101 } 1142 }
1143 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1144 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
1102 return 0; 1145 return 0;
1103} 1146}
1104 1147
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e22ae248f613..5b79f573bd93 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -103,12 +103,7 @@ int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
103 103
104void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) 104void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
105{ 105{
106 sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x" 106 sprintf(buf, "%pI6", addr);
107 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x",
108 addr[0], addr[1], addr[2], addr[3],
109 addr[4], addr[5], addr[6], addr[7],
110 addr[8], addr[9], addr[10], addr[11],
111 addr[12], addr[13], addr[14], addr[15]);
112} 107}
113 108
114int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr) 109int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
@@ -2112,51 +2107,44 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
2112 return vlan_id; 2107 return vlan_id;
2113} 2108}
2114 2109
2115static void qeth_l3_process_inbound_buffer(struct qeth_card *card, 2110static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
2116 struct qeth_qdio_buffer *buf, int index) 2111 int budget, int *done)
2117{ 2112{
2118 struct qdio_buffer_element *element; 2113 int work_done = 0;
2119 struct sk_buff *skb; 2114 struct sk_buff *skb;
2120 struct qeth_hdr *hdr; 2115 struct qeth_hdr *hdr;
2121 int offset;
2122 __u16 vlan_tag = 0; 2116 __u16 vlan_tag = 0;
2123 unsigned int len; 2117 unsigned int len;
2124 /* get first element of current buffer */
2125 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
2126 offset = 0;
2127 if (card->options.performance_stats)
2128 card->perf_stats.bufs_rec++;
2129 while ((skb = qeth_core_get_next_skb(card, buf->buffer, &element,
2130 &offset, &hdr))) {
2131 skb->dev = card->dev;
2132 /* is device UP ? */
2133 if (!(card->dev->flags & IFF_UP)) {
2134 dev_kfree_skb_any(skb);
2135 continue;
2136 }
2137 2118
2119 *done = 0;
2120 BUG_ON(!budget);
2121 while (budget) {
2122 skb = qeth_core_get_next_skb(card,
2123 card->qdio.in_q->bufs[card->rx.b_index].buffer,
2124 &card->rx.b_element, &card->rx.e_offset, &hdr);
2125 if (!skb) {
2126 *done = 1;
2127 break;
2128 }
2129 skb->dev = card->dev;
2138 switch (hdr->hdr.l3.id) { 2130 switch (hdr->hdr.l3.id) {
2139 case QETH_HEADER_TYPE_LAYER3: 2131 case QETH_HEADER_TYPE_LAYER3:
2140 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr); 2132 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
2141 len = skb->len; 2133 len = skb->len;
2142 if (vlan_tag && !card->options.sniffer) 2134 if (vlan_tag && !card->options.sniffer)
2143 if (card->vlangrp) 2135 if (card->vlangrp)
2144 vlan_hwaccel_rx(skb, card->vlangrp, 2136 vlan_gro_receive(&card->napi,
2145 vlan_tag); 2137 card->vlangrp, vlan_tag, skb);
2146 else { 2138 else {
2147 dev_kfree_skb_any(skb); 2139 dev_kfree_skb_any(skb);
2148 continue; 2140 continue;
2149 } 2141 }
2150 else 2142 else
2151 netif_rx(skb); 2143 napi_gro_receive(&card->napi, skb);
2152 break; 2144 break;
2153 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ 2145 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
2154 skb->pkt_type = PACKET_HOST; 2146 skb->pkt_type = PACKET_HOST;
2155 skb->protocol = eth_type_trans(skb, skb->dev); 2147 skb->protocol = eth_type_trans(skb, skb->dev);
2156 if (card->options.checksum_type == NO_CHECKSUMMING)
2157 skb->ip_summed = CHECKSUM_UNNECESSARY;
2158 else
2159 skb->ip_summed = CHECKSUM_NONE;
2160 len = skb->len; 2148 len = skb->len;
2161 netif_receive_skb(skb); 2149 netif_receive_skb(skb);
2162 break; 2150 break;
@@ -2166,10 +2154,87 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
2166 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 2154 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
2167 continue; 2155 continue;
2168 } 2156 }
2169 2157 work_done++;
2158 budget--;
2170 card->stats.rx_packets++; 2159 card->stats.rx_packets++;
2171 card->stats.rx_bytes += len; 2160 card->stats.rx_bytes += len;
2172 } 2161 }
2162 return work_done;
2163}
2164
2165static int qeth_l3_poll(struct napi_struct *napi, int budget)
2166{
2167 struct qeth_card *card = container_of(napi, struct qeth_card, napi);
2168 int work_done = 0;
2169 struct qeth_qdio_buffer *buffer;
2170 int done;
2171 int new_budget = budget;
2172
2173 if (card->options.performance_stats) {
2174 card->perf_stats.inbound_cnt++;
2175 card->perf_stats.inbound_start_time = qeth_get_micros();
2176 }
2177
2178 while (1) {
2179 if (!card->rx.b_count) {
2180 card->rx.qdio_err = 0;
2181 card->rx.b_count = qdio_get_next_buffers(
2182 card->data.ccwdev, 0, &card->rx.b_index,
2183 &card->rx.qdio_err);
2184 if (card->rx.b_count <= 0) {
2185 card->rx.b_count = 0;
2186 break;
2187 }
2188 card->rx.b_element =
2189 &card->qdio.in_q->bufs[card->rx.b_index]
2190 .buffer->element[0];
2191 card->rx.e_offset = 0;
2192 }
2193
2194 while (card->rx.b_count) {
2195 buffer = &card->qdio.in_q->bufs[card->rx.b_index];
2196 if (!(card->rx.qdio_err &&
2197 qeth_check_qdio_errors(card, buffer->buffer,
2198 card->rx.qdio_err, "qinerr")))
2199 work_done += qeth_l3_process_inbound_buffer(
2200 card, new_budget, &done);
2201 else
2202 done = 1;
2203
2204 if (done) {
2205 if (card->options.performance_stats)
2206 card->perf_stats.bufs_rec++;
2207 qeth_put_buffer_pool_entry(card,
2208 buffer->pool_entry);
2209 qeth_queue_input_buffer(card, card->rx.b_index);
2210 card->rx.b_count--;
2211 if (card->rx.b_count) {
2212 card->rx.b_index =
2213 (card->rx.b_index + 1) %
2214 QDIO_MAX_BUFFERS_PER_Q;
2215 card->rx.b_element =
2216 &card->qdio.in_q
2217 ->bufs[card->rx.b_index]
2218 .buffer->element[0];
2219 card->rx.e_offset = 0;
2220 }
2221 }
2222
2223 if (work_done >= budget)
2224 goto out;
2225 else
2226 new_budget = budget - work_done;
2227 }
2228 }
2229
2230 napi_complete(napi);
2231 if (qdio_start_irq(card->data.ccwdev, 0))
2232 napi_schedule(&card->napi);
2233out:
2234 if (card->options.performance_stats)
2235 card->perf_stats.inbound_time += qeth_get_micros() -
2236 card->perf_stats.inbound_start_time;
2237 return work_done;
2173} 2238}
2174 2239
2175static int qeth_l3_verify_vlan_dev(struct net_device *dev, 2240static int qeth_l3_verify_vlan_dev(struct net_device *dev,
@@ -3103,6 +3168,7 @@ tx_drop:
3103static int qeth_l3_open(struct net_device *dev) 3168static int qeth_l3_open(struct net_device *dev)
3104{ 3169{
3105 struct qeth_card *card = dev->ml_priv; 3170 struct qeth_card *card = dev->ml_priv;
3171 int rc = 0;
3106 3172
3107 QETH_CARD_TEXT(card, 4, "qethopen"); 3173 QETH_CARD_TEXT(card, 4, "qethopen");
3108 if (card->state != CARD_STATE_SOFTSETUP) 3174 if (card->state != CARD_STATE_SOFTSETUP)
@@ -3113,7 +3179,12 @@ static int qeth_l3_open(struct net_device *dev)
3113 3179
3114 if (!card->lan_online && netif_carrier_ok(dev)) 3180 if (!card->lan_online && netif_carrier_ok(dev))
3115 netif_carrier_off(dev); 3181 netif_carrier_off(dev);
3116 return 0; 3182 if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
3183 napi_enable(&card->napi);
3184 napi_schedule(&card->napi);
3185 } else
3186 rc = -EIO;
3187 return rc;
3117} 3188}
3118 3189
3119static int qeth_l3_stop(struct net_device *dev) 3190static int qeth_l3_stop(struct net_device *dev)
@@ -3122,8 +3193,10 @@ static int qeth_l3_stop(struct net_device *dev)
3122 3193
3123 QETH_CARD_TEXT(card, 4, "qethstop"); 3194 QETH_CARD_TEXT(card, 4, "qethstop");
3124 netif_tx_disable(dev); 3195 netif_tx_disable(dev);
3125 if (card->state == CARD_STATE_UP) 3196 if (card->state == CARD_STATE_UP) {
3126 card->state = CARD_STATE_SOFTSETUP; 3197 card->state = CARD_STATE_SOFTSETUP;
3198 napi_disable(&card->napi);
3199 }
3127 return 0; 3200 return 0;
3128} 3201}
3129 3202
@@ -3293,57 +3366,19 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3293 card->dev->gso_max_size = 15 * PAGE_SIZE; 3366 card->dev->gso_max_size = 15 * PAGE_SIZE;
3294 3367
3295 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3368 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3369 netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
3296 return register_netdev(card->dev); 3370 return register_netdev(card->dev);
3297} 3371}
3298 3372
3299static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
3300 unsigned int qdio_err, unsigned int queue, int first_element,
3301 int count, unsigned long card_ptr)
3302{
3303 struct net_device *net_dev;
3304 struct qeth_card *card;
3305 struct qeth_qdio_buffer *buffer;
3306 int index;
3307 int i;
3308
3309 card = (struct qeth_card *) card_ptr;
3310 net_dev = card->dev;
3311 if (card->options.performance_stats) {
3312 card->perf_stats.inbound_cnt++;
3313 card->perf_stats.inbound_start_time = qeth_get_micros();
3314 }
3315 if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
3316 QETH_CARD_TEXT(card, 1, "qdinchk");
3317 QETH_CARD_TEXT_(card, 1, "%04X%04X",
3318 first_element, count);
3319 QETH_CARD_TEXT_(card, 1, "%04X", queue);
3320 qeth_schedule_recovery(card);
3321 return;
3322 }
3323 for (i = first_element; i < (first_element + count); ++i) {
3324 index = i % QDIO_MAX_BUFFERS_PER_Q;
3325 buffer = &card->qdio.in_q->bufs[index];
3326 if (!(qdio_err &&
3327 qeth_check_qdio_errors(card, buffer->buffer,
3328 qdio_err, "qinerr")))
3329 qeth_l3_process_inbound_buffer(card, buffer, index);
3330 /* clear buffer and give back to hardware */
3331 qeth_put_buffer_pool_entry(card, buffer->pool_entry);
3332 qeth_queue_input_buffer(card, index);
3333 }
3334 if (card->options.performance_stats)
3335 card->perf_stats.inbound_time += qeth_get_micros() -
3336 card->perf_stats.inbound_start_time;
3337}
3338
3339static int qeth_l3_probe_device(struct ccwgroup_device *gdev) 3373static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3340{ 3374{
3341 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3375 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3342 3376
3343 qeth_l3_create_device_attributes(&gdev->dev); 3377 qeth_l3_create_device_attributes(&gdev->dev);
3344 card->options.layer2 = 0; 3378 card->options.layer2 = 0;
3379 card->discipline.start_poll = qeth_qdio_start_poll;
3345 card->discipline.input_handler = (qdio_handler_t *) 3380 card->discipline.input_handler = (qdio_handler_t *)
3346 qeth_l3_qdio_input_handler; 3381 qeth_qdio_input_handler;
3347 card->discipline.output_handler = (qdio_handler_t *) 3382 card->discipline.output_handler = (qdio_handler_t *)
3348 qeth_qdio_output_handler; 3383 qeth_qdio_output_handler;
3349 card->discipline.recover = qeth_l3_recover; 3384 card->discipline.recover = qeth_l3_recover;
@@ -3402,6 +3437,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3402 } 3437 }
3403 3438
3404 card->state = CARD_STATE_HARDSETUP; 3439 card->state = CARD_STATE_HARDSETUP;
3440 memset(&card->rx, 0, sizeof(struct qeth_rx));
3405 qeth_print_status_message(card); 3441 qeth_print_status_message(card);
3406 3442
3407 /* softsetup */ 3443 /* softsetup */
@@ -3538,9 +3574,6 @@ static int qeth_l3_recover(void *ptr)
3538 card->use_hard_stop = 1; 3574 card->use_hard_stop = 1;
3539 __qeth_l3_set_offline(card->gdev, 1); 3575 __qeth_l3_set_offline(card->gdev, 1);
3540 rc = __qeth_l3_set_online(card->gdev, 1); 3576 rc = __qeth_l3_set_online(card->gdev, 1);
3541 /* don't run another scheduled recovery */
3542 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3543 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3544 if (!rc) 3577 if (!rc)
3545 dev_info(&card->gdev->dev, 3578 dev_info(&card->gdev->dev,
3546 "Device successfully recovered!\n"); 3579 "Device successfully recovered!\n");
@@ -3551,6 +3584,8 @@ static int qeth_l3_recover(void *ptr)
3551 dev_warn(&card->gdev->dev, "The qeth device driver " 3584 dev_warn(&card->gdev->dev, "The qeth device driver "
3552 "failed to recover an error on the device\n"); 3585 "failed to recover an error on the device\n");
3553 } 3586 }
3587 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3588 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
3554 return 0; 3589 return 0;
3555} 3590}
3556 3591
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index b2635759721c..da54a28a1b87 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -277,16 +277,12 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
277static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, 277static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
278 struct zfcp_qdio *qdio) 278 struct zfcp_qdio *qdio)
279{ 279{
280 280 memset(id, 0, sizeof(*id));
281 id->cdev = qdio->adapter->ccw_device; 281 id->cdev = qdio->adapter->ccw_device;
282 id->q_format = QDIO_ZFCP_QFMT; 282 id->q_format = QDIO_ZFCP_QFMT;
283 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); 283 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
284 ASCEBC(id->adapter_name, 8); 284 ASCEBC(id->adapter_name, 8);
285 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; 285 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
286 id->qib_param_field_format = 0;
287 id->qib_param_field = NULL;
288 id->input_slib_elements = NULL;
289 id->output_slib_elements = NULL;
290 id->no_input_qs = 1; 286 id->no_input_qs = 1;
291 id->no_output_qs = 1; 287 id->no_output_qs = 1;
292 id->input_handler = zfcp_qdio_int_resp; 288 id->input_handler = zfcp_qdio_int_resp;
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 5af23cc5ea9f..f383cb42b1d7 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1344,8 +1344,24 @@ static struct usbatm_driver cxacru_driver = {
1344 .tx_padding = 11, 1344 .tx_padding = 11,
1345}; 1345};
1346 1346
1347static int cxacru_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) 1347static int cxacru_usb_probe(struct usb_interface *intf,
1348 const struct usb_device_id *id)
1348{ 1349{
1350 struct usb_device *usb_dev = interface_to_usbdev(intf);
1351 char buf[15];
1352
1353 /* Avoid ADSL routers (cx82310_eth).
1354 * Abort if bDeviceClass is 0xff and iProduct is "USB NET CARD".
1355 */
1356 if (usb_dev->descriptor.bDeviceClass == USB_CLASS_VENDOR_SPEC
1357 && usb_string(usb_dev, usb_dev->descriptor.iProduct,
1358 buf, sizeof(buf)) > 0) {
1359 if (!strcmp(buf, "USB NET CARD")) {
1360 dev_info(&intf->dev, "ignoring cx82310_eth device\n");
1361 return -ENODEV;
1362 }
1363 }
1364
1349 return usbatm_usb_probe(intf, id, &cxacru_driver); 1365 return usbatm_usb_probe(intf, id, &cxacru_driver);
1350} 1366}
1351 1367
diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c
index 973321327c44..8739c4f4d015 100644
--- a/drivers/uwb/address.c
+++ b/drivers/uwb/address.c
@@ -363,10 +363,7 @@ size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr,
363{ 363{
364 size_t result; 364 size_t result;
365 if (type) 365 if (type)
366 result = scnprintf(buf, buf_size, 366 result = scnprintf(buf, buf_size, "%pM", addr);
367 "%02x:%02x:%02x:%02x:%02x:%02x",
368 addr[0], addr[1], addr[2],
369 addr[3], addr[4], addr[5]);
370 else 367 else
371 result = scnprintf(buf, buf_size, "%02x:%02x", 368 result = scnprintf(buf, buf_size, "%02x:%02x",
372 addr[1], addr[0]); 369 addr[1], addr[0]);
diff --git a/drivers/uwb/wlp/wss-lc.c b/drivers/uwb/wlp/wss-lc.c
index a005d2a03b5d..67872c83b679 100644
--- a/drivers/uwb/wlp/wss-lc.c
+++ b/drivers/uwb/wlp/wss-lc.c
@@ -791,11 +791,8 @@ int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry,
791 } else { 791 } else {
792 if (printk_ratelimit()) 792 if (printk_ratelimit())
793 dev_err(dev, "WLP: Destination neighbor (Ethernet: " 793 dev_err(dev, "WLP: Destination neighbor (Ethernet: "
794 "%02x:%02x:%02x:%02x:%02x:%02x, Dev: " 794 "%pM, Dev: %02x:%02x) is not connected.\n",
795 "%02x:%02x) is not connected. \n", eth_addr[0], 795 eth_addr, dev_addr->data[1], dev_addr->data[0]);
796 eth_addr[1], eth_addr[2], eth_addr[3],
797 eth_addr[4], eth_addr[5], dev_addr->data[1],
798 dev_addr->data[0]);
799 result = -EINVAL; 796 result = -EINVAL;
800 } 797 }
801 return result; 798 return result;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 626b629429ff..c7fbf298ad68 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -302,6 +302,7 @@ header-y += quota.h
302header-y += radeonfb.h 302header-y += radeonfb.h
303header-y += random.h 303header-y += random.h
304header-y += raw.h 304header-y += raw.h
305header-y += rds.h
305header-y += reboot.h 306header-y += reboot.h
306header-y += reiserfs_fs.h 307header-y += reiserfs_fs.h
307header-y += reiserfs_xattr.h 308header-y += reiserfs_xattr.h
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index f6481daf6e52..a8e4e832cdbb 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -449,7 +449,7 @@ void vcc_insert_socket(struct sock *sk);
449 449
450static inline int atm_guess_pdu2truesize(int size) 450static inline int atm_guess_pdu2truesize(int size)
451{ 451{
452 return (SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info)); 452 return SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info);
453} 453}
454 454
455 455
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 7434a8353e23..7187bd8a75f6 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -165,8 +165,10 @@ enum {
165 DCCPO_TIMESTAMP_ECHO = 42, 165 DCCPO_TIMESTAMP_ECHO = 42,
166 DCCPO_ELAPSED_TIME = 43, 166 DCCPO_ELAPSED_TIME = 43,
167 DCCPO_MAX = 45, 167 DCCPO_MAX = 45,
168 DCCPO_MIN_CCID_SPECIFIC = 128, 168 DCCPO_MIN_RX_CCID_SPECIFIC = 128, /* from sender to receiver */
169 DCCPO_MAX_CCID_SPECIFIC = 255, 169 DCCPO_MAX_RX_CCID_SPECIFIC = 191,
170 DCCPO_MIN_TX_CCID_SPECIFIC = 192, /* from receiver to sender */
171 DCCPO_MAX_TX_CCID_SPECIFIC = 255,
170}; 172};
171/* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */ 173/* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */
172#define DCCP_SINGLE_OPT_MAXLEN 253 174#define DCCP_SINGLE_OPT_MAXLEN 253
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 2308fbb4523a..f16a01081e15 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -71,7 +71,7 @@ static inline int is_zero_ether_addr(const u8 *addr)
71 */ 71 */
72static inline int is_multicast_ether_addr(const u8 *addr) 72static inline int is_multicast_ether_addr(const u8 *addr)
73{ 73{
74 return (0x01 & addr[0]); 74 return 0x01 & addr[0];
75} 75}
76 76
77/** 77/**
@@ -82,7 +82,7 @@ static inline int is_multicast_ether_addr(const u8 *addr)
82 */ 82 */
83static inline int is_local_ether_addr(const u8 *addr) 83static inline int is_local_ether_addr(const u8 *addr)
84{ 84{
85 return (0x02 & addr[0]); 85 return 0x02 & addr[0];
86} 86}
87 87
88/** 88/**
@@ -237,13 +237,29 @@ static inline bool is_etherdev_addr(const struct net_device *dev,
237 * entry points. 237 * entry points.
238 */ 238 */
239 239
240static inline int compare_ether_header(const void *a, const void *b) 240static inline unsigned long compare_ether_header(const void *a, const void *b)
241{ 241{
242#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
243 unsigned long fold;
244
245 /*
246 * We want to compare 14 bytes:
247 * [a0 ... a13] ^ [b0 ... b13]
248 * Use two long XOR, ORed together, with an overlap of two bytes.
249 * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] |
250 * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13]
251 * This means the [a6 a7] ^ [b6 b7] part is done two times.
252 */
253 fold = *(unsigned long *)a ^ *(unsigned long *)b;
254 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
255 return fold;
256#else
242 u32 *a32 = (u32 *)((u8 *)a + 2); 257 u32 *a32 = (u32 *)((u8 *)a + 2);
243 u32 *b32 = (u32 *)((u8 *)b + 2); 258 u32 *b32 = (u32 *)((u8 *)b + 2);
244 259
245 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | 260 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
246 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); 261 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
262#endif
247} 263}
248 264
249#endif /* _LINUX_ETHERDEVICE_H */ 265#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 991269e5b152..8a3338ceb438 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -14,6 +14,7 @@
14#define _LINUX_ETHTOOL_H 14#define _LINUX_ETHTOOL_H
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/if_ether.h>
17 18
18/* This should work for both 32 and 64 bit userland. */ 19/* This should work for both 32 and 64 bit userland. */
19struct ethtool_cmd { 20struct ethtool_cmd {
@@ -314,9 +315,20 @@ enum ethtool_flags {
314}; 315};
315 316
316/* The following structures are for supporting RX network flow 317/* The following structures are for supporting RX network flow
317 * classification configuration. Note, all multibyte fields, e.g., 318 * classification and RX n-tuple configuration. Note, all multibyte
318 * ip4src, ip4dst, psrc, pdst, spi, etc. are expected to be in network 319 * fields, e.g., ip4src, ip4dst, psrc, pdst, spi, etc. are expected to
319 * byte order. 320 * be in network byte order.
321 */
322
323/**
324 * struct ethtool_tcpip4_spec - flow specification for TCP/IPv4 etc.
325 * @ip4src: Source host
326 * @ip4dst: Destination host
327 * @psrc: Source port
328 * @pdst: Destination port
329 * @tos: Type-of-service
330 *
331 * This can be used to specify a TCP/IPv4, UDP/IPv4 or SCTP/IPv4 flow.
320 */ 332 */
321struct ethtool_tcpip4_spec { 333struct ethtool_tcpip4_spec {
322 __be32 ip4src; 334 __be32 ip4src;
@@ -326,6 +338,15 @@ struct ethtool_tcpip4_spec {
326 __u8 tos; 338 __u8 tos;
327}; 339};
328 340
341/**
342 * struct ethtool_ah_espip4_spec - flow specification for IPsec/IPv4
343 * @ip4src: Source host
344 * @ip4dst: Destination host
345 * @spi: Security parameters index
346 * @tos: Type-of-service
347 *
348 * This can be used to specify an IPsec transport or tunnel over IPv4.
349 */
329struct ethtool_ah_espip4_spec { 350struct ethtool_ah_espip4_spec {
330 __be32 ip4src; 351 __be32 ip4src;
331 __be32 ip4dst; 352 __be32 ip4dst;
@@ -333,21 +354,17 @@ struct ethtool_ah_espip4_spec {
333 __u8 tos; 354 __u8 tos;
334}; 355};
335 356
336struct ethtool_rawip4_spec {
337 __be32 ip4src;
338 __be32 ip4dst;
339 __u8 hdata[64];
340};
341
342struct ethtool_ether_spec {
343 __be16 ether_type;
344 __u8 frame_size;
345 __u8 eframe[16];
346};
347
348#define ETH_RX_NFC_IP4 1 357#define ETH_RX_NFC_IP4 1
349#define ETH_RX_NFC_IP6 2
350 358
359/**
360 * struct ethtool_usrip4_spec - general flow specification for IPv4
361 * @ip4src: Source host
362 * @ip4dst: Destination host
363 * @l4_4_bytes: First 4 bytes of transport (layer 4) header
364 * @tos: Type-of-service
365 * @ip_ver: Value must be %ETH_RX_NFC_IP4; mask must be 0
366 * @proto: Transport protocol number; mask must be 0
367 */
351struct ethtool_usrip4_spec { 368struct ethtool_usrip4_spec {
352 __be32 ip4src; 369 __be32 ip4src;
353 __be32 ip4dst; 370 __be32 ip4dst;
@@ -357,6 +374,15 @@ struct ethtool_usrip4_spec {
357 __u8 proto; 374 __u8 proto;
358}; 375};
359 376
377/**
378 * struct ethtool_rx_flow_spec - specification for RX flow filter
379 * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW
380 * @h_u: Flow fields to match (dependent on @flow_type)
381 * @m_u: Masks for flow field bits to be ignored
382 * @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC
383 * if packets should be discarded
384 * @location: Index of filter in hardware table
385 */
360struct ethtool_rx_flow_spec { 386struct ethtool_rx_flow_spec {
361 __u32 flow_type; 387 __u32 flow_type;
362 union { 388 union {
@@ -365,36 +391,91 @@ struct ethtool_rx_flow_spec {
365 struct ethtool_tcpip4_spec sctp_ip4_spec; 391 struct ethtool_tcpip4_spec sctp_ip4_spec;
366 struct ethtool_ah_espip4_spec ah_ip4_spec; 392 struct ethtool_ah_espip4_spec ah_ip4_spec;
367 struct ethtool_ah_espip4_spec esp_ip4_spec; 393 struct ethtool_ah_espip4_spec esp_ip4_spec;
368 struct ethtool_rawip4_spec raw_ip4_spec;
369 struct ethtool_ether_spec ether_spec;
370 struct ethtool_usrip4_spec usr_ip4_spec; 394 struct ethtool_usrip4_spec usr_ip4_spec;
371 __u8 hdata[64]; 395 struct ethhdr ether_spec;
372 } h_u, m_u; /* entry, mask */ 396 __u8 hdata[72];
397 } h_u, m_u;
373 __u64 ring_cookie; 398 __u64 ring_cookie;
374 __u32 location; 399 __u32 location;
375}; 400};
376 401
402/**
403 * struct ethtool_rxnfc - command to get or set RX flow classification rules
404 * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH,
405 * %ETHTOOL_GRXRINGS, %ETHTOOL_GRXCLSRLCNT, %ETHTOOL_GRXCLSRULE,
406 * %ETHTOOL_GRXCLSRLALL, %ETHTOOL_SRXCLSRLDEL or %ETHTOOL_SRXCLSRLINS
407 * @flow_type: Type of flow to be affected, e.g. %TCP_V4_FLOW
408 * @data: Command-dependent value
409 * @fs: Flow filter specification
410 * @rule_cnt: Number of rules to be affected
411 * @rule_locs: Array of valid rule indices
412 *
413 * For %ETHTOOL_GRXFH and %ETHTOOL_SRXFH, @data is a bitmask indicating
414 * the fields included in the flow hash, e.g. %RXH_IP_SRC. The following
415 * structure fields must not be used.
416 *
417 * For %ETHTOOL_GRXRINGS, @data is set to the number of RX rings/queues
418 * on return.
419 *
420 * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined
421 * rules on return.
422 *
423 * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the index of an
424 * existing filter rule on entry and @fs contains the rule on return.
425 *
426 * For %ETHTOOL_GRXCLSRLALL, @rule_cnt specifies the array size of the
427 * user buffer for @rule_locs on entry. On return, @data is the size
428 * of the filter table and @rule_locs contains the indices of the
429 * defined rules.
430 *
431 * For %ETHTOOL_SRXCLSRLINS, @fs specifies the filter rule to add or
432 * update. @fs.@location specifies the index to use and must not be
433 * ignored.
434 *
435 * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the index of an
436 * existing filter rule on entry.
437 *
438 * Implementation of indexed classification rules generally requires a
439 * TCAM.
440 */
377struct ethtool_rxnfc { 441struct ethtool_rxnfc {
378 __u32 cmd; 442 __u32 cmd;
379 __u32 flow_type; 443 __u32 flow_type;
380 /* The rx flow hash value or the rule DB size */
381 __u64 data; 444 __u64 data;
382 /* The following fields are not valid and must not be used for
383 * the ETHTOOL_{G,X}RXFH commands. */
384 struct ethtool_rx_flow_spec fs; 445 struct ethtool_rx_flow_spec fs;
385 __u32 rule_cnt; 446 __u32 rule_cnt;
386 __u32 rule_locs[0]; 447 __u32 rule_locs[0];
387}; 448};
388 449
450/**
451 * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection
452 * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR
453 * @size: On entry, the array size of the user buffer. On return from
454 * %ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table.
455 * @ring_index: RX ring/queue index for each hash value
456 */
389struct ethtool_rxfh_indir { 457struct ethtool_rxfh_indir {
390 __u32 cmd; 458 __u32 cmd;
391 /* On entry, this is the array size of the user buffer. On
392 * return from ETHTOOL_GRXFHINDIR, this is the array size of
393 * the hardware indirection table. */
394 __u32 size; 459 __u32 size;
395 __u32 ring_index[0]; /* ring/queue index for each hash value */ 460 __u32 ring_index[0];
396}; 461};
397 462
463/**
464 * struct ethtool_rx_ntuple_flow_spec - specification for RX flow filter
465 * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW
466 * @h_u: Flow field values to match (dependent on @flow_type)
467 * @m_u: Masks for flow field value bits to be ignored
468 * @vlan_tag: VLAN tag to match
469 * @vlan_tag_mask: Mask for VLAN tag bits to be ignored
470 * @data: Driver-dependent data to match
471 * @data_mask: Mask for driver-dependent data bits to be ignored
472 * @action: RX ring/queue index to deliver to (non-negative) or other action
473 * (negative, e.g. %ETHTOOL_RXNTUPLE_ACTION_DROP)
474 *
475 * For flow types %TCP_V4_FLOW, %UDP_V4_FLOW and %SCTP_V4_FLOW, where
476 * a field value and mask are both zero this is treated as if all mask
477 * bits are set i.e. the field is ignored.
478 */
398struct ethtool_rx_ntuple_flow_spec { 479struct ethtool_rx_ntuple_flow_spec {
399 __u32 flow_type; 480 __u32 flow_type;
400 union { 481 union {
@@ -403,22 +484,26 @@ struct ethtool_rx_ntuple_flow_spec {
403 struct ethtool_tcpip4_spec sctp_ip4_spec; 484 struct ethtool_tcpip4_spec sctp_ip4_spec;
404 struct ethtool_ah_espip4_spec ah_ip4_spec; 485 struct ethtool_ah_espip4_spec ah_ip4_spec;
405 struct ethtool_ah_espip4_spec esp_ip4_spec; 486 struct ethtool_ah_espip4_spec esp_ip4_spec;
406 struct ethtool_rawip4_spec raw_ip4_spec;
407 struct ethtool_ether_spec ether_spec;
408 struct ethtool_usrip4_spec usr_ip4_spec; 487 struct ethtool_usrip4_spec usr_ip4_spec;
409 __u8 hdata[64]; 488 struct ethhdr ether_spec;
410 } h_u, m_u; /* entry, mask */ 489 __u8 hdata[72];
490 } h_u, m_u;
411 491
412 __u16 vlan_tag; 492 __u16 vlan_tag;
413 __u16 vlan_tag_mask; 493 __u16 vlan_tag_mask;
414 __u64 data; /* user-defined flow spec data */ 494 __u64 data;
415 __u64 data_mask; /* user-defined flow spec mask */ 495 __u64 data_mask;
416 496
417 /* signed to distinguish between queue and actions (DROP) */
418 __s32 action; 497 __s32 action;
419#define ETHTOOL_RXNTUPLE_ACTION_DROP -1 498#define ETHTOOL_RXNTUPLE_ACTION_DROP (-1) /* drop packet */
499#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) /* clear filter */
420}; 500};
421 501
502/**
503 * struct ethtool_rx_ntuple - command to set or clear RX flow filter
504 * @cmd: Command number - %ETHTOOL_SRXNTUPLE
505 * @fs: Flow filter specification
506 */
422struct ethtool_rx_ntuple { 507struct ethtool_rx_ntuple {
423 __u32 cmd; 508 __u32 cmd;
424 struct ethtool_rx_ntuple_flow_spec fs; 509 struct ethtool_rx_ntuple_flow_spec fs;
@@ -759,22 +844,23 @@ struct ethtool_ops {
759#define WAKE_MAGIC (1 << 5) 844#define WAKE_MAGIC (1 << 5)
760#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ 845#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
761 846
762/* L3-L4 network traffic flow types */ 847/* L2-L4 network traffic flow types */
763#define TCP_V4_FLOW 0x01 848#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
764#define UDP_V4_FLOW 0x02 849#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
765#define SCTP_V4_FLOW 0x03 850#define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */
766#define AH_ESP_V4_FLOW 0x04 851#define AH_ESP_V4_FLOW 0x04 /* hash only */
767#define TCP_V6_FLOW 0x05 852#define TCP_V6_FLOW 0x05 /* hash only */
768#define UDP_V6_FLOW 0x06 853#define UDP_V6_FLOW 0x06 /* hash only */
769#define SCTP_V6_FLOW 0x07 854#define SCTP_V6_FLOW 0x07 /* hash only */
770#define AH_ESP_V6_FLOW 0x08 855#define AH_ESP_V6_FLOW 0x08 /* hash only */
771#define AH_V4_FLOW 0x09 856#define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */
772#define ESP_V4_FLOW 0x0a 857#define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */
773#define AH_V6_FLOW 0x0b 858#define AH_V6_FLOW 0x0b /* hash only */
774#define ESP_V6_FLOW 0x0c 859#define ESP_V6_FLOW 0x0c /* hash only */
775#define IP_USER_FLOW 0x0d 860#define IP_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */
776#define IPV4_FLOW 0x10 861#define IPV4_FLOW 0x10 /* hash only */
777#define IPV6_FLOW 0x11 862#define IPV6_FLOW 0x11 /* hash only */
863#define ETHER_FLOW 0x12 /* spec only (ether_spec) */
778 864
779/* L3-L4 network traffic flow hash options */ 865/* L3-L4 network traffic flow hash options */
780#define RXH_L2DA (1 << 1) 866#define RXH_L2DA (1 << 1)
diff --git a/include/linux/if.h b/include/linux/if.h
index 53558ec59e1b..123959927745 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -75,6 +75,8 @@
75#define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */ 75#define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */
76#define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */ 76#define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */
77#define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */ 77#define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */
78#define IFF_OVS_DATAPATH 0x10000 /* device used as Open vSwitch
79 * datapath port */
78 80
79#define IF_GET_IFACE 0x0001 /* for querying only */ 81#define IF_GET_IFACE 0x0001 /* for querying only */
80#define IF_GET_PROTO 0x0002 82#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index bed7a4682b90..f9c3df03db0f 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -137,8 +137,6 @@ extern struct ctl_table ether_table[];
137 137
138extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); 138extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
139 139
140#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
141
142#endif 140#endif
143 141
144#endif /* _LINUX_IF_ETHER_H */ 142#endif /* _LINUX_IF_ETHER_H */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 35280b302290..8a2fd66a8b5f 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -40,6 +40,12 @@ struct macvlan_rx_stats {
40 unsigned long rx_errors; 40 unsigned long rx_errors;
41}; 41};
42 42
43/*
44 * Maximum times a macvtap device can be opened. This can be used to
45 * configure the number of receive queue, e.g. for multiqueue virtio.
46 */
47#define MAX_MACVTAP_QUEUES (NR_CPUS < 16 ? NR_CPUS : 16)
48
43struct macvlan_dev { 49struct macvlan_dev {
44 struct net_device *dev; 50 struct net_device *dev;
45 struct list_head list; 51 struct list_head list;
@@ -50,7 +56,8 @@ struct macvlan_dev {
50 enum macvlan_mode mode; 56 enum macvlan_mode mode;
51 int (*receive)(struct sk_buff *skb); 57 int (*receive)(struct sk_buff *skb);
52 int (*forward)(struct net_device *dev, struct sk_buff *skb); 58 int (*forward)(struct net_device *dev, struct sk_buff *skb);
53 struct macvtap_queue *tap; 59 struct macvtap_queue *taps[MAX_MACVTAP_QUEUES];
60 int numvtaps;
54}; 61};
55 62
56static inline void macvlan_count_rx(const struct macvlan_dev *vlan, 63static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 27741e05446f..397921b09ef9 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -40,25 +40,35 @@
40 * PPPoE addressing definition 40 * PPPoE addressing definition
41 */ 41 */
42typedef __be16 sid_t; 42typedef __be16 sid_t;
43struct pppoe_addr{ 43struct pppoe_addr {
44 sid_t sid; /* Session identifier */ 44 sid_t sid; /* Session identifier */
45 unsigned char remote[ETH_ALEN]; /* Remote address */ 45 unsigned char remote[ETH_ALEN]; /* Remote address */
46 char dev[IFNAMSIZ]; /* Local device to use */ 46 char dev[IFNAMSIZ]; /* Local device to use */
47}; 47};
48 48
49/************************************************************************ 49/************************************************************************
50 * Protocols supported by AF_PPPOX 50 * PPTP addressing definition
51 */ 51 */
52struct pptp_addr {
53 __be16 call_id;
54 struct in_addr sin_addr;
55};
56
57/************************************************************************
58 * Protocols supported by AF_PPPOX
59 */
52#define PX_PROTO_OE 0 /* Currently just PPPoE */ 60#define PX_PROTO_OE 0 /* Currently just PPPoE */
53#define PX_PROTO_OL2TP 1 /* Now L2TP also */ 61#define PX_PROTO_OL2TP 1 /* Now L2TP also */
54#define PX_MAX_PROTO 2 62#define PX_PROTO_PPTP 2
55 63#define PX_MAX_PROTO 3
56struct sockaddr_pppox { 64
57 sa_family_t sa_family; /* address family, AF_PPPOX */ 65struct sockaddr_pppox {
58 unsigned int sa_protocol; /* protocol identifier */ 66 sa_family_t sa_family; /* address family, AF_PPPOX */
59 union{ 67 unsigned int sa_protocol; /* protocol identifier */
60 struct pppoe_addr pppoe; 68 union {
61 }sa_addr; 69 struct pppoe_addr pppoe;
70 struct pptp_addr pptp;
71 } sa_addr;
62} __attribute__((packed)); 72} __attribute__((packed));
63 73
64/* The use of the above union isn't viable because the size of this 74/* The use of the above union isn't viable because the size of this
@@ -150,15 +160,23 @@ struct pppoe_opt {
150 relayed to (PPPoE relaying) */ 160 relayed to (PPPoE relaying) */
151}; 161};
152 162
163struct pptp_opt {
164 struct pptp_addr src_addr;
165 struct pptp_addr dst_addr;
166 u32 ack_sent, ack_recv;
167 u32 seq_sent, seq_recv;
168 int ppp_flags;
169};
153#include <net/sock.h> 170#include <net/sock.h>
154 171
155struct pppox_sock { 172struct pppox_sock {
156 /* struct sock must be the first member of pppox_sock */ 173 /* struct sock must be the first member of pppox_sock */
157 struct sock sk; 174 struct sock sk;
158 struct ppp_channel chan; 175 struct ppp_channel chan;
159 struct pppox_sock *next; /* for hash table */ 176 struct pppox_sock *next; /* for hash table */
160 union { 177 union {
161 struct pppoe_opt pppoe; 178 struct pppoe_opt pppoe;
179 struct pptp_opt pptp;
162 } proto; 180 } proto;
163 __be16 num; 181 __be16 num;
164}; 182};
@@ -186,7 +204,7 @@ struct pppox_proto {
186 struct module *owner; 204 struct module *owner;
187}; 205};
188 206
189extern int register_pppox_proto(int proto_num, struct pppox_proto *pp); 207extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
190extern void unregister_pppox_proto(int proto_num); 208extern void unregister_pppox_proto(int proto_num);
191extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ 209extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
192extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); 210extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 3d870fda8c4f..a52320751bfc 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -119,7 +119,7 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
119 119
120extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 120extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
121 u16 vlan_tci, int polling); 121 u16 vlan_tci, int polling);
122extern int vlan_hwaccel_do_receive(struct sk_buff *skb); 122extern void vlan_hwaccel_do_receive(struct sk_buff *skb);
123extern gro_result_t 123extern gro_result_t
124vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, 124vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
125 unsigned int vlan_tci, struct sk_buff *skb); 125 unsigned int vlan_tci, struct sk_buff *skb);
@@ -147,9 +147,8 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
147 return NET_XMIT_SUCCESS; 147 return NET_XMIT_SUCCESS;
148} 148}
149 149
150static inline int vlan_hwaccel_do_receive(struct sk_buff *skb) 150static inline void vlan_hwaccel_do_receive(struct sk_buff *skb)
151{ 151{
152 return 0;
153} 152}
154 153
155static inline gro_result_t 154static inline gro_result_t
diff --git a/include/linux/in.h b/include/linux/in.h
index 41d88a4689af..beeb6dee2b49 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -250,6 +250,25 @@ struct sockaddr_in {
250 250
251#ifdef __KERNEL__ 251#ifdef __KERNEL__
252 252
253#include <linux/errno.h>
254
255static inline int proto_ports_offset(int proto)
256{
257 switch (proto) {
258 case IPPROTO_TCP:
259 case IPPROTO_UDP:
260 case IPPROTO_DCCP:
261 case IPPROTO_ESP: /* SPI */
262 case IPPROTO_SCTP:
263 case IPPROTO_UDPLITE:
264 return 0;
265 case IPPROTO_AH: /* SPI */
266 return 4;
267 default:
268 return -EINVAL;
269 }
270}
271
253static inline bool ipv4_is_loopback(__be32 addr) 272static inline bool ipv4_is_loopback(__be32 addr)
254{ 273{
255 return (addr & htonl(0xff000000)) == htonl(0x7f000000); 274 return (addr & htonl(0xff000000)) == htonl(0x7f000000);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 2be1a1a2beb9..1ec09bb4a3ab 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -9,6 +9,7 @@
9#include <linux/rcupdate.h> 9#include <linux/rcupdate.h>
10#include <linux/timer.h> 10#include <linux/timer.h>
11#include <linux/sysctl.h> 11#include <linux/sysctl.h>
12#include <linux/rtnetlink.h>
12 13
13enum 14enum
14{ 15{
@@ -198,14 +199,10 @@ static __inline__ int bad_mask(__be32 mask, __be32 addr)
198 199
199static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) 200static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev)
200{ 201{
201 struct in_device *in_dev = dev->ip_ptr; 202 return rcu_dereference(dev->ip_ptr);
202 if (in_dev)
203 in_dev = rcu_dereference(in_dev);
204 return in_dev;
205} 203}
206 204
207static __inline__ struct in_device * 205static inline struct in_device *in_dev_get(const struct net_device *dev)
208in_dev_get(const struct net_device *dev)
209{ 206{
210 struct in_device *in_dev; 207 struct in_device *in_dev;
211 208
@@ -217,10 +214,9 @@ in_dev_get(const struct net_device *dev)
217 return in_dev; 214 return in_dev;
218} 215}
219 216
220static __inline__ struct in_device * 217static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
221__in_dev_get_rtnl(const struct net_device *dev)
222{ 218{
223 return (struct in_device*)dev->ip_ptr; 219 return rcu_dereference_check(dev->ip_ptr, lockdep_rtnl_is_held());
224} 220}
225 221
226extern void in_dev_finish_destroy(struct in_device *idev); 222extern void in_dev_finish_destroy(struct in_device *idev);
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 0f82293a82ed..78a1b9671752 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -56,6 +56,7 @@ enum {
56 MLX4_CMD_QUERY_HCA = 0xb, 56 MLX4_CMD_QUERY_HCA = 0xb,
57 MLX4_CMD_QUERY_PORT = 0x43, 57 MLX4_CMD_QUERY_PORT = 0x43,
58 MLX4_CMD_SENSE_PORT = 0x4d, 58 MLX4_CMD_SENSE_PORT = 0x4d,
59 MLX4_CMD_HW_HEALTH_CHECK = 0x50,
59 MLX4_CMD_SET_PORT = 0xc, 60 MLX4_CMD_SET_PORT = 0xc,
60 MLX4_CMD_ACCESS_DDR = 0x2e, 61 MLX4_CMD_ACCESS_DDR = 0x2e,
61 MLX4_CMD_MAP_ICM = 0xffa, 62 MLX4_CMD_MAP_ICM = 0xffa,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7a7f9c1e679a..7338654c02b4 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -186,6 +186,10 @@ struct mlx4_caps {
186 int eth_mtu_cap[MLX4_MAX_PORTS + 1]; 186 int eth_mtu_cap[MLX4_MAX_PORTS + 1];
187 int gid_table_len[MLX4_MAX_PORTS + 1]; 187 int gid_table_len[MLX4_MAX_PORTS + 1];
188 int pkey_table_len[MLX4_MAX_PORTS + 1]; 188 int pkey_table_len[MLX4_MAX_PORTS + 1];
189 int trans_type[MLX4_MAX_PORTS + 1];
190 int vendor_oui[MLX4_MAX_PORTS + 1];
191 int wavelength[MLX4_MAX_PORTS + 1];
192 u64 trans_code[MLX4_MAX_PORTS + 1];
189 int local_ca_ack_delay; 193 int local_ca_ack_delay;
190 int num_uars; 194 int num_uars;
191 int bf_reg_size; 195 int bf_reg_size;
@@ -229,6 +233,8 @@ struct mlx4_caps {
229 u32 bmme_flags; 233 u32 bmme_flags;
230 u32 reserved_lkey; 234 u32 reserved_lkey;
231 u16 stat_rate_support; 235 u16 stat_rate_support;
236 int udp_rss;
237 int loopback_support;
232 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 238 u8 port_width_cap[MLX4_MAX_PORTS + 1];
233 int max_gso_sz; 239 int max_gso_sz;
234 int reserved_qps_cnt[MLX4_NUM_QP_REGION]; 240 int reserved_qps_cnt[MLX4_NUM_QP_REGION];
@@ -480,5 +486,6 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
480 u32 *lkey, u32 *rkey); 486 u32 *lkey, u32 *rkey);
481int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); 487int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
482int mlx4_SYNC_TPT(struct mlx4_dev *dev); 488int mlx4_SYNC_TPT(struct mlx4_dev *dev);
489int mlx4_test_interrupts(struct mlx4_dev *dev);
483 490
484#endif /* MLX4_DEVICE_H */ 491#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 46c36ffe20ee..01bd4c82d982 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -228,9 +228,9 @@ struct netdev_hw_addr {
228#define NETDEV_HW_ADDR_T_SLAVE 3 228#define NETDEV_HW_ADDR_T_SLAVE 3
229#define NETDEV_HW_ADDR_T_UNICAST 4 229#define NETDEV_HW_ADDR_T_UNICAST 4
230#define NETDEV_HW_ADDR_T_MULTICAST 5 230#define NETDEV_HW_ADDR_T_MULTICAST 5
231 int refcount;
232 bool synced; 231 bool synced;
233 bool global_use; 232 bool global_use;
233 int refcount;
234 struct rcu_head rcu_head; 234 struct rcu_head rcu_head;
235}; 235};
236 236
@@ -901,7 +901,7 @@ struct net_device {
901 901
902 unsigned int flags; /* interface flags (a la BSD) */ 902 unsigned int flags; /* interface flags (a la BSD) */
903 unsigned short gflags; 903 unsigned short gflags;
904 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ 904 unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */
905 unsigned short padded; /* How much padding added by alloc_netdev() */ 905 unsigned short padded; /* How much padding added by alloc_netdev() */
906 906
907 unsigned char operstate; /* RFC2863 operstate */ 907 unsigned char operstate; /* RFC2863 operstate */
@@ -918,10 +918,6 @@ struct net_device {
918 unsigned short needed_headroom; 918 unsigned short needed_headroom;
919 unsigned short needed_tailroom; 919 unsigned short needed_tailroom;
920 920
921 struct net_device *master; /* Pointer to master device of a group,
922 * which this device is member of.
923 */
924
925 /* Interface address info. */ 921 /* Interface address info. */
926 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ 922 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
927 unsigned char addr_assign_type; /* hw address assignment type */ 923 unsigned char addr_assign_type; /* hw address assignment type */
@@ -942,7 +938,7 @@ struct net_device {
942 void *dsa_ptr; /* dsa specific data */ 938 void *dsa_ptr; /* dsa specific data */
943#endif 939#endif
944 void *atalk_ptr; /* AppleTalk link */ 940 void *atalk_ptr; /* AppleTalk link */
945 void *ip_ptr; /* IPv4 specific data */ 941 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
946 void *dn_ptr; /* DECnet specific data */ 942 void *dn_ptr; /* DECnet specific data */
947 void *ip6_ptr; /* IPv6 specific data */ 943 void *ip6_ptr; /* IPv6 specific data */
948 void *ec_ptr; /* Econet specific data */ 944 void *ec_ptr; /* Econet specific data */
@@ -951,9 +947,20 @@ struct net_device {
951 assign before registering */ 947 assign before registering */
952 948
953/* 949/*
954 * Cache line mostly used on receive path (including eth_type_trans()) 950 * Cache lines mostly used on receive path (including eth_type_trans())
955 */ 951 */
956 unsigned long last_rx; /* Time of last Rx */ 952 unsigned long last_rx; /* Time of last Rx
953 * This should not be set in
954 * drivers, unless really needed,
955 * because network stack (bonding)
956 * use it if/when necessary, to
957 * avoid dirtying this cache line.
958 */
959
960 struct net_device *master; /* Pointer to master device of a group,
961 * which this device is member of.
962 */
963
957 /* Interface address info used in eth_type_trans() */ 964 /* Interface address info used in eth_type_trans() */
958 unsigned char *dev_addr; /* hw address, (before bcast 965 unsigned char *dev_addr; /* hw address, (before bcast
959 because most packets are 966 because most packets are
@@ -973,10 +980,14 @@ struct net_device {
973 unsigned int num_rx_queues; 980 unsigned int num_rx_queues;
974#endif 981#endif
975 982
976 struct netdev_queue rx_queue;
977 rx_handler_func_t *rx_handler; 983 rx_handler_func_t *rx_handler;
978 void *rx_handler_data; 984 void *rx_handler_data;
979 985
986 struct netdev_queue rx_queue; /* use two cache lines */
987
988/*
989 * Cache lines mostly used on transmit path
990 */
980 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 991 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
981 992
982 /* Number of TX queues allocated at alloc_netdev_mq() time */ 993 /* Number of TX queues allocated at alloc_netdev_mq() time */
@@ -990,9 +1001,7 @@ struct net_device {
990 1001
991 unsigned long tx_queue_len; /* Max frames per queue allowed */ 1002 unsigned long tx_queue_len; /* Max frames per queue allowed */
992 spinlock_t tx_global_lock; 1003 spinlock_t tx_global_lock;
993/* 1004
994 * One part is mostly used on xmit path (device)
995 */
996 /* These may be needed for future network-power-down code. */ 1005 /* These may be needed for future network-power-down code. */
997 1006
998 /* 1007 /*
@@ -1041,8 +1050,10 @@ struct net_device {
1041#endif 1050#endif
1042 1051
1043 /* mid-layer private */ 1052 /* mid-layer private */
1044 void *ml_priv; 1053 union {
1045 1054 void *ml_priv;
1055 struct pcpu_lstats __percpu *lstats; /* loopback stats */
1056 };
1046 /* GARP */ 1057 /* GARP */
1047 struct garp_port *garp_port; 1058 struct garp_port *garp_port;
1048 1059
@@ -1667,7 +1678,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1667 */ 1678 */
1668static inline int netif_is_multiqueue(const struct net_device *dev) 1679static inline int netif_is_multiqueue(const struct net_device *dev)
1669{ 1680{
1670 return (dev->num_tx_queues > 1); 1681 return dev->num_tx_queues > 1;
1671} 1682}
1672 1683
1673extern void netif_set_real_num_tx_queues(struct net_device *dev, 1684extern void netif_set_real_num_tx_queues(struct net_device *dev,
@@ -1695,6 +1706,7 @@ extern gro_result_t dev_gro_receive(struct napi_struct *napi,
1695extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); 1706extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
1696extern gro_result_t napi_gro_receive(struct napi_struct *napi, 1707extern gro_result_t napi_gro_receive(struct napi_struct *napi,
1697 struct sk_buff *skb); 1708 struct sk_buff *skb);
1709extern void napi_gro_flush(struct napi_struct *napi);
1698extern void napi_reuse_skb(struct napi_struct *napi, 1710extern void napi_reuse_skb(struct napi_struct *napi,
1699 struct sk_buff *skb); 1711 struct sk_buff *skb);
1700extern struct sk_buff * napi_get_frags(struct napi_struct *napi); 1712extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
@@ -2171,6 +2183,8 @@ extern void dev_seq_stop(struct seq_file *seq, void *v);
2171extern int netdev_class_create_file(struct class_attribute *class_attr); 2183extern int netdev_class_create_file(struct class_attribute *class_attr);
2172extern void netdev_class_remove_file(struct class_attribute *class_attr); 2184extern void netdev_class_remove_file(struct class_attribute *class_attr);
2173 2185
2186extern struct kobj_ns_type_operations net_ns_type_operations;
2187
2174extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); 2188extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
2175 2189
2176extern void linkwatch_run_queue(void); 2190extern void linkwatch_run_queue(void);
@@ -2191,7 +2205,7 @@ static inline int net_gso_ok(int features, int gso_type)
2191static inline int skb_gso_ok(struct sk_buff *skb, int features) 2205static inline int skb_gso_ok(struct sk_buff *skb, int features)
2192{ 2206{
2193 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 2207 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2194 (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST)); 2208 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2195} 2209}
2196 2210
2197static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 2211static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2c8701687336..f0518b0278a9 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -40,6 +40,43 @@
40 */ 40 */
41 41
42/** 42/**
43 * DOC: Frame transmission/registration support
44 *
45 * Frame transmission and registration support exists to allow userspace
46 * management entities such as wpa_supplicant react to management frames
47 * that are not being handled by the kernel. This includes, for example,
48 * certain classes of action frames that cannot be handled in the kernel
49 * for various reasons.
50 *
51 * Frame registration is done on a per-interface basis and registrations
52 * cannot be removed other than by closing the socket. It is possible to
53 * specify a registration filter to register, for example, only for a
54 * certain type of action frame. In particular with action frames, those
55 * that userspace registers for will not be returned as unhandled by the
56 * driver, so that the registered application has to take responsibility
57 * for doing that.
58 *
59 * The type of frame that can be registered for is also dependent on the
60 * driver and interface type. The frame types are advertised in wiphy
61 * attributes so applications know what to expect.
62 *
63 * NOTE: When an interface changes type while registrations are active,
64 * these registrations are ignored until the interface type is
65 * changed again. This means that changing the interface type can
66 * lead to a situation that couldn't otherwise be produced, but
67 * any such registrations will be dormant in the sense that they
68 * will not be serviced, i.e. they will not receive any frames.
69 *
70 * Frame transmission allows userspace to send for example the required
71 * responses to action frames. It is subject to some sanity checking,
72 * but many frames can be transmitted. When a frame was transmitted, its
73 * status is indicated to the sending socket.
74 *
75 * For more technical details, see the corresponding command descriptions
76 * below.
77 */
78
79/**
43 * enum nl80211_commands - supported nl80211 commands 80 * enum nl80211_commands - supported nl80211 commands
44 * 81 *
45 * @NL80211_CMD_UNSPEC: unspecified command to catch errors 82 * @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -258,7 +295,9 @@
258 * auth and assoc steps. For this, you need to specify the SSID in a 295 * auth and assoc steps. For this, you need to specify the SSID in a
259 * %NL80211_ATTR_SSID attribute, and can optionally specify the association 296 * %NL80211_ATTR_SSID attribute, and can optionally specify the association
260 * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC, 297 * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC,
261 * %NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_CONTROL_PORT. 298 * %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
299 * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and
300 * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT.
262 * It is also sent as an event, with the BSSID and response IEs when the 301 * It is also sent as an event, with the BSSID and response IEs when the
263 * connection is established or failed to be established. This can be 302 * connection is established or failed to be established. This can be
264 * determined by the STATUS_CODE attribute. 303 * determined by the STATUS_CODE attribute.
@@ -301,16 +340,20 @@
301 * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface 340 * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface
302 * and @NL80211_ATTR_TX_RATES the set of allowed rates. 341 * and @NL80211_ATTR_TX_RATES the set of allowed rates.
303 * 342 *
304 * @NL80211_CMD_REGISTER_ACTION: Register for receiving certain action frames 343 * @NL80211_CMD_REGISTER_FRAME: Register for receiving certain mgmt frames
305 * (via @NL80211_CMD_ACTION) for processing in userspace. This command 344 * (via @NL80211_CMD_FRAME) for processing in userspace. This command
306 * requires an interface index and a match attribute containing the first 345 * requires an interface index, a frame type attribute (optional for
307 * few bytes of the frame that should match, e.g. a single byte for only 346 * backward compatibility reasons, if not given assumes action frames)
308 * a category match or four bytes for vendor frames including the OUI. 347 * and a match attribute containing the first few bytes of the frame
309 * The registration cannot be dropped, but is removed automatically 348 * that should match, e.g. a single byte for only a category match or
310 * when the netlink socket is closed. Multiple registrations can be made. 349 * four bytes for vendor frames including the OUI. The registration
311 * @NL80211_CMD_ACTION: Action frame TX request and RX notification. This 350 * cannot be dropped, but is removed automatically when the netlink
312 * command is used both as a request to transmit an Action frame and as an 351 * socket is closed. Multiple registrations can be made.
313 * event indicating reception of an Action frame that was not processed in 352 * @NL80211_CMD_REGISTER_ACTION: Alias for @NL80211_CMD_REGISTER_FRAME for
353 * backward compatibility
354 * @NL80211_CMD_FRAME: Management frame TX request and RX notification. This
355 * command is used both as a request to transmit a management frame and
356 * as an event indicating reception of a frame that was not processed in
314 * kernel code, but is for us (i.e., which may need to be processed in a 357 * kernel code, but is for us (i.e., which may need to be processed in a
315 * user space application). %NL80211_ATTR_FRAME is used to specify the 358 * user space application). %NL80211_ATTR_FRAME is used to specify the
316 * frame contents (including header). %NL80211_ATTR_WIPHY_FREQ (and 359 * frame contents (including header). %NL80211_ATTR_WIPHY_FREQ (and
@@ -320,11 +363,14 @@
320 * operational channel). When called, this operation returns a cookie 363 * operational channel). When called, this operation returns a cookie
321 * (%NL80211_ATTR_COOKIE) that will be included with the TX status event 364 * (%NL80211_ATTR_COOKIE) that will be included with the TX status event
322 * pertaining to the TX request. 365 * pertaining to the TX request.
323 * @NL80211_CMD_ACTION_TX_STATUS: Report TX status of an Action frame 366 * @NL80211_CMD_ACTION: Alias for @NL80211_CMD_FRAME for backward compatibility.
324 * transmitted with %NL80211_CMD_ACTION. %NL80211_ATTR_COOKIE identifies 367 * @NL80211_CMD_FRAME_TX_STATUS: Report TX status of a management frame
368 * transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies
325 * the TX command and %NL80211_ATTR_FRAME includes the contents of the 369 * the TX command and %NL80211_ATTR_FRAME includes the contents of the
326 * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged 370 * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged
327 * the frame. 371 * the frame.
372 * @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for
373 * backward compatibility.
328 * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command 374 * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command
329 * is used to configure connection quality monitoring notification trigger 375 * is used to configure connection quality monitoring notification trigger
330 * levels. 376 * levels.
@@ -429,9 +475,12 @@ enum nl80211_commands {
429 475
430 NL80211_CMD_SET_TX_BITRATE_MASK, 476 NL80211_CMD_SET_TX_BITRATE_MASK,
431 477
432 NL80211_CMD_REGISTER_ACTION, 478 NL80211_CMD_REGISTER_FRAME,
433 NL80211_CMD_ACTION, 479 NL80211_CMD_REGISTER_ACTION = NL80211_CMD_REGISTER_FRAME,
434 NL80211_CMD_ACTION_TX_STATUS, 480 NL80211_CMD_FRAME,
481 NL80211_CMD_ACTION = NL80211_CMD_FRAME,
482 NL80211_CMD_FRAME_TX_STATUS,
483 NL80211_CMD_ACTION_TX_STATUS = NL80211_CMD_FRAME_TX_STATUS,
435 484
436 NL80211_CMD_SET_POWER_SAVE, 485 NL80211_CMD_SET_POWER_SAVE,
437 NL80211_CMD_GET_POWER_SAVE, 486 NL80211_CMD_GET_POWER_SAVE,
@@ -639,6 +688,15 @@ enum nl80211_commands {
639 * request, the driver will assume that the port is unauthorized until 688 * request, the driver will assume that the port is unauthorized until
640 * authorized by user space. Otherwise, port is marked authorized by 689 * authorized by user space. Otherwise, port is marked authorized by
641 * default in station mode. 690 * default in station mode.
691 * @NL80211_ATTR_CONTROL_PORT_ETHERTYPE: A 16-bit value indicating the
692 * ethertype that will be used for key negotiation. It can be
693 * specified with the associate and connect commands. If it is not
694 * specified, the value defaults to 0x888E (PAE, 802.1X). This
695 * attribute is also used as a flag in the wiphy information to
696 * indicate that protocols other than PAE are supported.
697 * @NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT: When included along with
698 * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, indicates that the custom
699 * ethertype frames used for key negotiation must not be encrypted.
642 * 700 *
643 * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. 701 * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver.
644 * We recommend using nested, driver-specific attributes within this. 702 * We recommend using nested, driver-specific attributes within this.
@@ -708,7 +766,16 @@ enum nl80211_commands {
708 * is used with %NL80211_CMD_SET_TX_BITRATE_MASK. 766 * is used with %NL80211_CMD_SET_TX_BITRATE_MASK.
709 * 767 *
710 * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain 768 * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
711 * at least one byte, currently used with @NL80211_CMD_REGISTER_ACTION. 769 * at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
770 * @NL80211_ATTR_FRAME_TYPE: A u16 indicating the frame type/subtype for the
771 * @NL80211_CMD_REGISTER_FRAME command.
772 * @NL80211_ATTR_TX_FRAME_TYPES: wiphy capability attribute, which is a
773 * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing
774 * information about which frame types can be transmitted with
775 * %NL80211_CMD_FRAME.
776 * @NL80211_ATTR_RX_FRAME_TYPES: wiphy capability attribute, which is a
777 * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing
778 * information about which frame types can be registered for RX.
712 * 779 *
713 * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was 780 * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was
714 * acknowledged by the recipient. 781 * acknowledged by the recipient.
@@ -891,6 +958,13 @@ enum nl80211_attrs {
891 NL80211_ATTR_WIPHY_TX_POWER_SETTING, 958 NL80211_ATTR_WIPHY_TX_POWER_SETTING,
892 NL80211_ATTR_WIPHY_TX_POWER_LEVEL, 959 NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
893 960
961 NL80211_ATTR_TX_FRAME_TYPES,
962 NL80211_ATTR_RX_FRAME_TYPES,
963 NL80211_ATTR_FRAME_TYPE,
964
965 NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
966 NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT,
967
894 /* add attributes here, update the policy in nl80211.c */ 968 /* add attributes here, update the policy in nl80211.c */
895 969
896 __NL80211_ATTR_AFTER_LAST, 970 __NL80211_ATTR_AFTER_LAST,
@@ -946,8 +1020,10 @@ enum nl80211_attrs {
946 * @NL80211_IFTYPE_WDS: wireless distribution interface 1020 * @NL80211_IFTYPE_WDS: wireless distribution interface
947 * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames 1021 * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames
948 * @NL80211_IFTYPE_MESH_POINT: mesh point 1022 * @NL80211_IFTYPE_MESH_POINT: mesh point
1023 * @NL80211_IFTYPE_P2P_CLIENT: P2P client
1024 * @NL80211_IFTYPE_P2P_GO: P2P group owner
949 * @NL80211_IFTYPE_MAX: highest interface type number currently defined 1025 * @NL80211_IFTYPE_MAX: highest interface type number currently defined
950 * @__NL80211_IFTYPE_AFTER_LAST: internal use 1026 * @NUM_NL80211_IFTYPES: number of defined interface types
951 * 1027 *
952 * These values are used with the %NL80211_ATTR_IFTYPE 1028 * These values are used with the %NL80211_ATTR_IFTYPE
953 * to set the type of an interface. 1029 * to set the type of an interface.
@@ -962,10 +1038,12 @@ enum nl80211_iftype {
962 NL80211_IFTYPE_WDS, 1038 NL80211_IFTYPE_WDS,
963 NL80211_IFTYPE_MONITOR, 1039 NL80211_IFTYPE_MONITOR,
964 NL80211_IFTYPE_MESH_POINT, 1040 NL80211_IFTYPE_MESH_POINT,
1041 NL80211_IFTYPE_P2P_CLIENT,
1042 NL80211_IFTYPE_P2P_GO,
965 1043
966 /* keep last */ 1044 /* keep last */
967 __NL80211_IFTYPE_AFTER_LAST, 1045 NUM_NL80211_IFTYPES,
968 NL80211_IFTYPE_MAX = __NL80211_IFTYPE_AFTER_LAST - 1 1046 NL80211_IFTYPE_MAX = NUM_NL80211_IFTYPES - 1
969}; 1047};
970 1048
971/** 1049/**
@@ -974,11 +1052,14 @@ enum nl80211_iftype {
974 * Station flags. When a station is added to an AP interface, it is 1052 * Station flags. When a station is added to an AP interface, it is
975 * assumed to be already associated (and hence authenticated.) 1053 * assumed to be already associated (and hence authenticated.)
976 * 1054 *
1055 * @__NL80211_STA_FLAG_INVALID: attribute number 0 is reserved
977 * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X) 1056 * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X)
978 * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames 1057 * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames
979 * with short barker preamble 1058 * with short barker preamble
980 * @NL80211_STA_FLAG_WME: station is WME/QoS capable 1059 * @NL80211_STA_FLAG_WME: station is WME/QoS capable
981 * @NL80211_STA_FLAG_MFP: station uses management frame protection 1060 * @NL80211_STA_FLAG_MFP: station uses management frame protection
1061 * @NL80211_STA_FLAG_MAX: highest station flag number currently defined
1062 * @__NL80211_STA_FLAG_AFTER_LAST: internal use
982 */ 1063 */
983enum nl80211_sta_flags { 1064enum nl80211_sta_flags {
984 __NL80211_STA_FLAG_INVALID, 1065 __NL80211_STA_FLAG_INVALID,
@@ -1091,14 +1172,17 @@ enum nl80211_mpath_flags {
1091 * information about a mesh path. 1172 * information about a mesh path.
1092 * 1173 *
1093 * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved 1174 * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved
1094 * @NL80211_ATTR_MPATH_FRAME_QLEN: number of queued frames for this destination 1175 * @NL80211_MPATH_INFO_FRAME_QLEN: number of queued frames for this destination
1095 * @NL80211_ATTR_MPATH_SN: destination sequence number 1176 * @NL80211_MPATH_INFO_SN: destination sequence number
1096 * @NL80211_ATTR_MPATH_METRIC: metric (cost) of this mesh path 1177 * @NL80211_MPATH_INFO_METRIC: metric (cost) of this mesh path
1097 * @NL80211_ATTR_MPATH_EXPTIME: expiration time for the path, in msec from now 1178 * @NL80211_MPATH_INFO_EXPTIME: expiration time for the path, in msec from now
1098 * @NL80211_ATTR_MPATH_FLAGS: mesh path flags, enumerated in 1179 * @NL80211_MPATH_INFO_FLAGS: mesh path flags, enumerated in
1099 * &enum nl80211_mpath_flags; 1180 * &enum nl80211_mpath_flags;
1100 * @NL80211_ATTR_MPATH_DISCOVERY_TIMEOUT: total path discovery timeout, in msec 1181 * @NL80211_MPATH_INFO_DISCOVERY_TIMEOUT: total path discovery timeout, in msec
1101 * @NL80211_ATTR_MPATH_DISCOVERY_RETRIES: mesh path discovery retries 1182 * @NL80211_MPATH_INFO_DISCOVERY_RETRIES: mesh path discovery retries
1183 * @NL80211_MPATH_INFO_MAX: highest mesh path information attribute number
1184 * currently defind
1185 * @__NL80211_MPATH_INFO_AFTER_LAST: internal use
1102 */ 1186 */
1103enum nl80211_mpath_info { 1187enum nl80211_mpath_info {
1104 __NL80211_MPATH_INFO_INVALID, 1188 __NL80211_MPATH_INFO_INVALID,
@@ -1127,6 +1211,8 @@ enum nl80211_mpath_info {
1127 * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE 1211 * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE
1128 * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n 1212 * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n
1129 * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n 1213 * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n
1214 * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
1215 * @__NL80211_BAND_ATTR_AFTER_LAST: internal use
1130 */ 1216 */
1131enum nl80211_band_attr { 1217enum nl80211_band_attr {
1132 __NL80211_BAND_ATTR_INVALID, 1218 __NL80211_BAND_ATTR_INVALID,
@@ -1147,6 +1233,7 @@ enum nl80211_band_attr {
1147 1233
1148/** 1234/**
1149 * enum nl80211_frequency_attr - frequency attributes 1235 * enum nl80211_frequency_attr - frequency attributes
1236 * @__NL80211_FREQUENCY_ATTR_INVALID: attribute number 0 is reserved
1150 * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz 1237 * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz
1151 * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current 1238 * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current
1152 * regulatory domain. 1239 * regulatory domain.
@@ -1158,6 +1245,9 @@ enum nl80211_band_attr {
1158 * on this channel in current regulatory domain. 1245 * on this channel in current regulatory domain.
1159 * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm 1246 * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm
1160 * (100 * dBm). 1247 * (100 * dBm).
1248 * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
1249 * currently defined
1250 * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
1161 */ 1251 */
1162enum nl80211_frequency_attr { 1252enum nl80211_frequency_attr {
1163 __NL80211_FREQUENCY_ATTR_INVALID, 1253 __NL80211_FREQUENCY_ATTR_INVALID,
@@ -1177,9 +1267,13 @@ enum nl80211_frequency_attr {
1177 1267
1178/** 1268/**
1179 * enum nl80211_bitrate_attr - bitrate attributes 1269 * enum nl80211_bitrate_attr - bitrate attributes
1270 * @__NL80211_BITRATE_ATTR_INVALID: attribute number 0 is reserved
1180 * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps 1271 * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps
1181 * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported 1272 * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported
1182 * in 2.4 GHz band. 1273 * in 2.4 GHz band.
1274 * @NL80211_BITRATE_ATTR_MAX: highest bitrate attribute number
1275 * currently defined
1276 * @__NL80211_BITRATE_ATTR_AFTER_LAST: internal use
1183 */ 1277 */
1184enum nl80211_bitrate_attr { 1278enum nl80211_bitrate_attr {
1185 __NL80211_BITRATE_ATTR_INVALID, 1279 __NL80211_BITRATE_ATTR_INVALID,
@@ -1235,6 +1329,7 @@ enum nl80211_reg_type {
1235 1329
1236/** 1330/**
1237 * enum nl80211_reg_rule_attr - regulatory rule attributes 1331 * enum nl80211_reg_rule_attr - regulatory rule attributes
1332 * @__NL80211_REG_RULE_ATTR_INVALID: attribute number 0 is reserved
1238 * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional 1333 * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional
1239 * considerations for a given frequency range. These are the 1334 * considerations for a given frequency range. These are the
1240 * &enum nl80211_reg_rule_flags. 1335 * &enum nl80211_reg_rule_flags.
@@ -1251,6 +1346,9 @@ enum nl80211_reg_type {
1251 * If you don't have one then don't send this. 1346 * If you don't have one then don't send this.
1252 * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for 1347 * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for
1253 * a given frequency range. The value is in mBm (100 * dBm). 1348 * a given frequency range. The value is in mBm (100 * dBm).
1349 * @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number
1350 * currently defined
1351 * @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use
1254 */ 1352 */
1255enum nl80211_reg_rule_attr { 1353enum nl80211_reg_rule_attr {
1256 __NL80211_REG_RULE_ATTR_INVALID, 1354 __NL80211_REG_RULE_ATTR_INVALID,
@@ -1302,6 +1400,9 @@ enum nl80211_reg_rule_flags {
1302 * @__NL80211_SURVEY_INFO_INVALID: attribute number 0 is reserved 1400 * @__NL80211_SURVEY_INFO_INVALID: attribute number 0 is reserved
1303 * @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel 1401 * @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel
1304 * @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm) 1402 * @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm)
1403 * @NL80211_SURVEY_INFO_MAX: highest survey info attribute number
1404 * currently defined
1405 * @__NL80211_SURVEY_INFO_AFTER_LAST: internal use
1305 */ 1406 */
1306enum nl80211_survey_info { 1407enum nl80211_survey_info {
1307 __NL80211_SURVEY_INFO_INVALID, 1408 __NL80211_SURVEY_INFO_INVALID,
@@ -1466,6 +1567,7 @@ enum nl80211_channel_type {
1466 * enum nl80211_bss - netlink attributes for a BSS 1567 * enum nl80211_bss - netlink attributes for a BSS
1467 * 1568 *
1468 * @__NL80211_BSS_INVALID: invalid 1569 * @__NL80211_BSS_INVALID: invalid
1570 * @NL80211_BSS_BSSID: BSSID of the BSS (6 octets)
1469 * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) 1571 * @NL80211_BSS_FREQUENCY: frequency in MHz (u32)
1470 * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) 1572 * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64)
1471 * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) 1573 * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16)
@@ -1509,6 +1611,12 @@ enum nl80211_bss {
1509 1611
1510/** 1612/**
1511 * enum nl80211_bss_status - BSS "status" 1613 * enum nl80211_bss_status - BSS "status"
1614 * @NL80211_BSS_STATUS_AUTHENTICATED: Authenticated with this BSS.
1615 * @NL80211_BSS_STATUS_ASSOCIATED: Associated with this BSS.
1616 * @NL80211_BSS_STATUS_IBSS_JOINED: Joined to this IBSS.
1617 *
1618 * The BSS status is a BSS attribute in scan dumps, which
1619 * indicates the status the interface has wrt. this BSS.
1512 */ 1620 */
1513enum nl80211_bss_status { 1621enum nl80211_bss_status {
1514 NL80211_BSS_STATUS_AUTHENTICATED, 1622 NL80211_BSS_STATUS_AUTHENTICATED,
@@ -1619,8 +1727,8 @@ enum nl80211_tx_rate_attributes {
1619 1727
1620/** 1728/**
1621 * enum nl80211_band - Frequency band 1729 * enum nl80211_band - Frequency band
1622 * @NL80211_BAND_2GHZ - 2.4 GHz ISM band 1730 * @NL80211_BAND_2GHZ: 2.4 GHz ISM band
1623 * @NL80211_BAND_5GHZ - around 5 GHz band (4.9 - 5.7 GHz) 1731 * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz)
1624 */ 1732 */
1625enum nl80211_band { 1733enum nl80211_band {
1626 NL80211_BAND_2GHZ, 1734 NL80211_BAND_2GHZ,
@@ -1658,9 +1766,9 @@ enum nl80211_attr_cqm {
1658 1766
1659/** 1767/**
1660 * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event 1768 * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event
1661 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW - The RSSI level is lower than the 1769 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW: The RSSI level is lower than the
1662 * configured threshold 1770 * configured threshold
1663 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH - The RSSI is higher than the 1771 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the
1664 * configured threshold 1772 * configured threshold
1665 */ 1773 */
1666enum nl80211_cqm_rssi_threshold_event { 1774enum nl80211_cqm_rssi_threshold_event {
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 10d33309e9a6..9438660b46ea 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2189,6 +2189,9 @@
2189#define PCI_VENDOR_ID_ARIMA 0x161f 2189#define PCI_VENDOR_ID_ARIMA 0x161f
2190 2190
2191#define PCI_VENDOR_ID_BROCADE 0x1657 2191#define PCI_VENDOR_ID_BROCADE 0x1657
2192#define PCI_DEVICE_ID_BROCADE_CT 0x0014
2193#define PCI_DEVICE_ID_BROCADE_FC_8G1P 0x0017
2194#define PCI_DEVICE_ID_BROCADE_CT_FC 0x0021
2192 2195
2193#define PCI_VENDOR_ID_SIBYTE 0x166d 2196#define PCI_VENDOR_ID_SIBYTE 0x166d
2194#define PCI_DEVICE_ID_BCM1250_PCI 0x0001 2197#define PCI_DEVICE_ID_BCM1250_PCI 0x0001
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index 76edadf046d3..85e14a83283b 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -47,6 +47,8 @@
47 47
48/* ioctls */ 48/* ioctls */
49#define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) 49#define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0)
50#define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14)
51#define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15)
50 52
51/* Phonet protocol header */ 53/* Phonet protocol header */
52struct phonethdr { 54struct phonethdr {
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6b0a782c6224..a6e047a04f79 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -116,7 +116,7 @@ struct mii_bus {
116 /* list of all PHYs on bus */ 116 /* list of all PHYs on bus */
117 struct phy_device *phy_map[PHY_MAX_ADDR]; 117 struct phy_device *phy_map[PHY_MAX_ADDR];
118 118
119 /* Phy addresses to be ignored when probing */ 119 /* PHY addresses to be ignored when probing */
120 u32 phy_mask; 120 u32 phy_mask;
121 121
122 /* 122 /*
@@ -283,7 +283,7 @@ struct phy_device {
283 283
284 phy_interface_t interface; 284 phy_interface_t interface;
285 285
286 /* Bus address of the PHY (0-32) */ 286 /* Bus address of the PHY (0-31) */
287 int addr; 287 int addr;
288 288
289 /* 289 /*
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index 7f6ba8658abe..defbde203d07 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -332,6 +332,7 @@ enum {
332 FLOW_KEY_SKUID, 332 FLOW_KEY_SKUID,
333 FLOW_KEY_SKGID, 333 FLOW_KEY_SKGID,
334 FLOW_KEY_VLAN_TAG, 334 FLOW_KEY_VLAN_TAG,
335 FLOW_KEY_RXHASH,
335 __FLOW_KEY_MAX, 336 __FLOW_KEY_MAX,
336}; 337};
337 338
diff --git a/include/linux/rds.h b/include/linux/rds.h
index 24bce3ded9ea..91950950aa59 100644
--- a/include/linux/rds.h
+++ b/include/linux/rds.h
@@ -36,15 +36,6 @@
36 36
37#include <linux/types.h> 37#include <linux/types.h>
38 38
39/* These sparse annotated types shouldn't be in any user
40 * visible header file. We should clean this up rather
41 * than kludging around them. */
42#ifndef __KERNEL__
43#define __be16 u_int16_t
44#define __be32 u_int32_t
45#define __be64 u_int64_t
46#endif
47
48#define RDS_IB_ABI_VERSION 0x301 39#define RDS_IB_ABI_VERSION 0x301
49 40
50/* 41/*
@@ -82,6 +73,10 @@
82#define RDS_CMSG_RDMA_MAP 3 73#define RDS_CMSG_RDMA_MAP 3
83#define RDS_CMSG_RDMA_STATUS 4 74#define RDS_CMSG_RDMA_STATUS 4
84#define RDS_CMSG_CONG_UPDATE 5 75#define RDS_CMSG_CONG_UPDATE 5
76#define RDS_CMSG_ATOMIC_FADD 6
77#define RDS_CMSG_ATOMIC_CSWP 7
78#define RDS_CMSG_MASKED_ATOMIC_FADD 8
79#define RDS_CMSG_MASKED_ATOMIC_CSWP 9
85 80
86#define RDS_INFO_FIRST 10000 81#define RDS_INFO_FIRST 10000
87#define RDS_INFO_COUNTERS 10000 82#define RDS_INFO_COUNTERS 10000
@@ -98,9 +93,9 @@
98#define RDS_INFO_LAST 10010 93#define RDS_INFO_LAST 10010
99 94
100struct rds_info_counter { 95struct rds_info_counter {
101 u_int8_t name[32]; 96 uint8_t name[32];
102 u_int64_t value; 97 uint64_t value;
103} __packed; 98} __attribute__((packed));
104 99
105#define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 100#define RDS_INFO_CONNECTION_FLAG_SENDING 0x01
106#define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 101#define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02
@@ -109,56 +104,48 @@ struct rds_info_counter {
109#define TRANSNAMSIZ 16 104#define TRANSNAMSIZ 16
110 105
111struct rds_info_connection { 106struct rds_info_connection {
112 u_int64_t next_tx_seq; 107 uint64_t next_tx_seq;
113 u_int64_t next_rx_seq; 108 uint64_t next_rx_seq;
114 __be32 laddr;
115 __be32 faddr;
116 u_int8_t transport[TRANSNAMSIZ]; /* null term ascii */
117 u_int8_t flags;
118} __packed;
119
120struct rds_info_flow {
121 __be32 laddr; 109 __be32 laddr;
122 __be32 faddr; 110 __be32 faddr;
123 u_int32_t bytes; 111 uint8_t transport[TRANSNAMSIZ]; /* null term ascii */
124 __be16 lport; 112 uint8_t flags;
125 __be16 fport; 113} __attribute__((packed));
126} __packed;
127 114
128#define RDS_INFO_MESSAGE_FLAG_ACK 0x01 115#define RDS_INFO_MESSAGE_FLAG_ACK 0x01
129#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 116#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02
130 117
131struct rds_info_message { 118struct rds_info_message {
132 u_int64_t seq; 119 uint64_t seq;
133 u_int32_t len; 120 uint32_t len;
134 __be32 laddr; 121 __be32 laddr;
135 __be32 faddr; 122 __be32 faddr;
136 __be16 lport; 123 __be16 lport;
137 __be16 fport; 124 __be16 fport;
138 u_int8_t flags; 125 uint8_t flags;
139} __packed; 126} __attribute__((packed));
140 127
141struct rds_info_socket { 128struct rds_info_socket {
142 u_int32_t sndbuf; 129 uint32_t sndbuf;
143 __be32 bound_addr; 130 __be32 bound_addr;
144 __be32 connected_addr; 131 __be32 connected_addr;
145 __be16 bound_port; 132 __be16 bound_port;
146 __be16 connected_port; 133 __be16 connected_port;
147 u_int32_t rcvbuf; 134 uint32_t rcvbuf;
148 u_int64_t inum; 135 uint64_t inum;
149} __packed; 136} __attribute__((packed));
150 137
151struct rds_info_tcp_socket { 138struct rds_info_tcp_socket {
152 __be32 local_addr; 139 __be32 local_addr;
153 __be16 local_port; 140 __be16 local_port;
154 __be32 peer_addr; 141 __be32 peer_addr;
155 __be16 peer_port; 142 __be16 peer_port;
156 u_int64_t hdr_rem; 143 uint64_t hdr_rem;
157 u_int64_t data_rem; 144 uint64_t data_rem;
158 u_int32_t last_sent_nxt; 145 uint32_t last_sent_nxt;
159 u_int32_t last_expected_una; 146 uint32_t last_expected_una;
160 u_int32_t last_seen_una; 147 uint32_t last_seen_una;
161} __packed; 148} __attribute__((packed));
162 149
163#define RDS_IB_GID_LEN 16 150#define RDS_IB_GID_LEN 16
164struct rds_info_rdma_connection { 151struct rds_info_rdma_connection {
@@ -212,42 +199,69 @@ struct rds_info_rdma_connection {
212 * (so that the application does not have to worry about 199 * (so that the application does not have to worry about
213 * alignment). 200 * alignment).
214 */ 201 */
215typedef u_int64_t rds_rdma_cookie_t; 202typedef uint64_t rds_rdma_cookie_t;
216 203
217struct rds_iovec { 204struct rds_iovec {
218 u_int64_t addr; 205 uint64_t addr;
219 u_int64_t bytes; 206 uint64_t bytes;
220}; 207};
221 208
222struct rds_get_mr_args { 209struct rds_get_mr_args {
223 struct rds_iovec vec; 210 struct rds_iovec vec;
224 u_int64_t cookie_addr; 211 uint64_t cookie_addr;
225 uint64_t flags; 212 uint64_t flags;
226}; 213};
227 214
228struct rds_get_mr_for_dest_args { 215struct rds_get_mr_for_dest_args {
229 struct sockaddr_storage dest_addr; 216 struct sockaddr_storage dest_addr;
230 struct rds_iovec vec; 217 struct rds_iovec vec;
231 u_int64_t cookie_addr; 218 uint64_t cookie_addr;
232 uint64_t flags; 219 uint64_t flags;
233}; 220};
234 221
235struct rds_free_mr_args { 222struct rds_free_mr_args {
236 rds_rdma_cookie_t cookie; 223 rds_rdma_cookie_t cookie;
237 u_int64_t flags; 224 uint64_t flags;
238}; 225};
239 226
240struct rds_rdma_args { 227struct rds_rdma_args {
241 rds_rdma_cookie_t cookie; 228 rds_rdma_cookie_t cookie;
242 struct rds_iovec remote_vec; 229 struct rds_iovec remote_vec;
243 u_int64_t local_vec_addr; 230 uint64_t local_vec_addr;
244 u_int64_t nr_local; 231 uint64_t nr_local;
245 u_int64_t flags; 232 uint64_t flags;
246 u_int64_t user_token; 233 uint64_t user_token;
234};
235
236struct rds_atomic_args {
237 rds_rdma_cookie_t cookie;
238 uint64_t local_addr;
239 uint64_t remote_addr;
240 union {
241 struct {
242 uint64_t compare;
243 uint64_t swap;
244 } cswp;
245 struct {
246 uint64_t add;
247 } fadd;
248 struct {
249 uint64_t compare;
250 uint64_t swap;
251 uint64_t compare_mask;
252 uint64_t swap_mask;
253 } m_cswp;
254 struct {
255 uint64_t add;
256 uint64_t nocarry_mask;
257 } m_fadd;
258 };
259 uint64_t flags;
260 uint64_t user_token;
247}; 261};
248 262
249struct rds_rdma_notify { 263struct rds_rdma_notify {
250 u_int64_t user_token; 264 uint64_t user_token;
251 int32_t status; 265 int32_t status;
252}; 266};
253 267
@@ -266,5 +280,6 @@ struct rds_rdma_notify {
266#define RDS_RDMA_USE_ONCE 0x0008 /* free MR after use */ 280#define RDS_RDMA_USE_ONCE 0x0008 /* free MR after use */
267#define RDS_RDMA_DONTWAIT 0x0010 /* Don't wait in SET_BARRIER */ 281#define RDS_RDMA_DONTWAIT 0x0010 /* Don't wait in SET_BARRIER */
268#define RDS_RDMA_NOTIFY_ME 0x0020 /* Notify when operation completes */ 282#define RDS_RDMA_NOTIFY_ME 0x0020 /* Notify when operation completes */
283#define RDS_RDMA_SILENT 0x0040 /* Do not interrupt remote */
269 284
270#endif /* IB_RDS_H */ 285#endif /* IB_RDS_H */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 58d44491880f..68c436bddc88 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -749,6 +749,26 @@ extern int rtnl_is_locked(void);
749extern int lockdep_rtnl_is_held(void); 749extern int lockdep_rtnl_is_held(void);
750#endif /* #ifdef CONFIG_PROVE_LOCKING */ 750#endif /* #ifdef CONFIG_PROVE_LOCKING */
751 751
752/**
753 * rcu_dereference_rtnl - rcu_dereference with debug checking
754 * @p: The pointer to read, prior to dereferencing
755 *
756 * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
757 * or RTNL
758 */
759#define rcu_dereference_rtnl(p) \
760 rcu_dereference_check(p, rcu_read_lock_held() || \
761 lockdep_rtnl_is_held())
762
763/**
764 * rtnl_dereference - rcu_dereference with debug checking
765 * @p: The pointer to read, prior to dereferencing
766 *
767 * Do an rcu_dereference(p), but check caller holds RTNL
768 */
769#define rtnl_dereference(p) \
770 rcu_dereference_check(p, lockdep_rtnl_is_held())
771
752extern void rtnetlink_init(void); 772extern void rtnetlink_init(void);
753extern void __rtnl_unlock(void); 773extern void __rtnl_unlock(void);
754 774
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 77eb60d2b496..0b53c43ac92e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -129,8 +129,13 @@ typedef struct skb_frag_struct skb_frag_t;
129 129
130struct skb_frag_struct { 130struct skb_frag_struct {
131 struct page *page; 131 struct page *page;
132#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
132 __u32 page_offset; 133 __u32 page_offset;
133 __u32 size; 134 __u32 size;
135#else
136 __u16 page_offset;
137 __u16 size;
138#endif
134}; 139};
135 140
136#define HAVE_HW_TIME_STAMP 141#define HAVE_HW_TIME_STAMP
@@ -163,26 +168,19 @@ struct skb_shared_hwtstamps {
163 ktime_t syststamp; 168 ktime_t syststamp;
164}; 169};
165 170
166/** 171/* Definitions for tx_flags in struct skb_shared_info */
167 * struct skb_shared_tx - instructions for time stamping of outgoing packets 172enum {
168 * @hardware: generate hardware time stamp 173 /* generate hardware time stamp */
169 * @software: generate software time stamp 174 SKBTX_HW_TSTAMP = 1 << 0,
170 * @in_progress: device driver is going to provide 175
171 * hardware time stamp 176 /* generate software time stamp */
172 * @prevent_sk_orphan: make sk reference available on driver level 177 SKBTX_SW_TSTAMP = 1 << 1,
173 * @flags: all shared_tx flags 178
174 * 179 /* device driver is going to provide hardware time stamp */
175 * These flags are attached to packets as part of the 180 SKBTX_IN_PROGRESS = 1 << 2,
176 * &skb_shared_info. Use skb_tx() to get a pointer. 181
177 */ 182 /* ensure the originating sk reference is available on driver level */
178union skb_shared_tx { 183 SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
179 struct {
180 __u8 hardware:1,
181 software:1,
182 in_progress:1,
183 prevent_sk_orphan:1;
184 };
185 __u8 flags;
186}; 184};
187 185
188/* This data is invariant across clones and lives at 186/* This data is invariant across clones and lives at
@@ -195,7 +193,7 @@ struct skb_shared_info {
195 unsigned short gso_segs; 193 unsigned short gso_segs;
196 unsigned short gso_type; 194 unsigned short gso_type;
197 __be32 ip6_frag_id; 195 __be32 ip6_frag_id;
198 union skb_shared_tx tx_flags; 196 __u8 tx_flags;
199 struct sk_buff *frag_list; 197 struct sk_buff *frag_list;
200 struct skb_shared_hwtstamps hwtstamps; 198 struct skb_shared_hwtstamps hwtstamps;
201 199
@@ -558,6 +556,15 @@ extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
558 unsigned int to, struct ts_config *config, 556 unsigned int to, struct ts_config *config,
559 struct ts_state *state); 557 struct ts_state *state);
560 558
559extern __u32 __skb_get_rxhash(struct sk_buff *skb);
560static inline __u32 skb_get_rxhash(struct sk_buff *skb)
561{
562 if (!skb->rxhash)
563 skb->rxhash = __skb_get_rxhash(skb);
564
565 return skb->rxhash;
566}
567
561#ifdef NET_SKBUFF_DATA_USES_OFFSET 568#ifdef NET_SKBUFF_DATA_USES_OFFSET
562static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 569static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
563{ 570{
@@ -578,11 +585,6 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
578 return &skb_shinfo(skb)->hwtstamps; 585 return &skb_shinfo(skb)->hwtstamps;
579} 586}
580 587
581static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
582{
583 return &skb_shinfo(skb)->tx_flags;
584}
585
586/** 588/**
587 * skb_queue_empty - check if a queue is empty 589 * skb_queue_empty - check if a queue is empty
588 * @list: queue head 590 * @list: queue head
@@ -604,7 +606,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
604static inline bool skb_queue_is_last(const struct sk_buff_head *list, 606static inline bool skb_queue_is_last(const struct sk_buff_head *list,
605 const struct sk_buff *skb) 607 const struct sk_buff *skb)
606{ 608{
607 return (skb->next == (struct sk_buff *) list); 609 return skb->next == (struct sk_buff *)list;
608} 610}
609 611
610/** 612/**
@@ -617,7 +619,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
617static inline bool skb_queue_is_first(const struct sk_buff_head *list, 619static inline bool skb_queue_is_first(const struct sk_buff_head *list,
618 const struct sk_buff *skb) 620 const struct sk_buff *skb)
619{ 621{
620 return (skb->prev == (struct sk_buff *) list); 622 return skb->prev == (struct sk_buff *)list;
621} 623}
622 624
623/** 625/**
@@ -1123,7 +1125,7 @@ extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1123 int off, int size); 1125 int off, int size);
1124 1126
1125#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1127#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1126#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frags(skb)) 1128#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1127#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1129#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1128 1130
1129#ifdef NET_SKBUFF_DATA_USES_OFFSET 1131#ifdef NET_SKBUFF_DATA_USES_OFFSET
@@ -1787,7 +1789,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1787 skb = skb->prev) 1789 skb = skb->prev)
1788 1790
1789 1791
1790static inline bool skb_has_frags(const struct sk_buff *skb) 1792static inline bool skb_has_frag_list(const struct sk_buff *skb)
1791{ 1793{
1792 return skb_shinfo(skb)->frag_list != NULL; 1794 return skb_shinfo(skb)->frag_list != NULL;
1793} 1795}
@@ -1987,8 +1989,8 @@ extern void skb_tstamp_tx(struct sk_buff *orig_skb,
1987 1989
1988static inline void sw_tx_timestamp(struct sk_buff *skb) 1990static inline void sw_tx_timestamp(struct sk_buff *skb)
1989{ 1991{
1990 union skb_shared_tx *shtx = skb_tx(skb); 1992 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
1991 if (shtx->software && !shtx->in_progress) 1993 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
1992 skb_tstamp_tx(skb, NULL); 1994 skb_tstamp_tx(skb, NULL);
1993} 1995}
1994 1996
@@ -2159,7 +2161,7 @@ static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2159 2161
2160static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) 2162static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2161{ 2163{
2162 return (skb->queue_mapping != 0); 2164 return skb->queue_mapping != 0;
2163} 2165}
2164 2166
2165extern u16 skb_tx_hash(const struct net_device *dev, 2167extern u16 skb_tx_hash(const struct net_device *dev,
@@ -2209,6 +2211,21 @@ static inline void skb_forward_csum(struct sk_buff *skb)
2209 skb->ip_summed = CHECKSUM_NONE; 2211 skb->ip_summed = CHECKSUM_NONE;
2210} 2212}
2211 2213
2214/**
2215 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2216 * @skb: skb to check
2217 *
2218 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2219 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2220 * use this helper, to document places where we make this assertion.
2221 */
2222static inline void skb_checksum_none_assert(struct sk_buff *skb)
2223{
2224#ifdef DEBUG
2225 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2226#endif
2227}
2228
2212bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2229bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2213#endif /* __KERNEL__ */ 2230#endif /* __KERNEL__ */
2214#endif /* _LINUX_SKBUFF_H */ 2231#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index a6d5225b9275..11daf9c140e7 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -97,6 +97,7 @@
97#define SSB_TMSLOW_RESET 0x00000001 /* Reset */ 97#define SSB_TMSLOW_RESET 0x00000001 /* Reset */
98#define SSB_TMSLOW_REJECT_22 0x00000002 /* Reject (Backplane rev 2.2) */ 98#define SSB_TMSLOW_REJECT_22 0x00000002 /* Reject (Backplane rev 2.2) */
99#define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */ 99#define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */
100#define SSB_TMSLOW_PHYCLK 0x00000010 /* MAC PHY Clock Control Enable */
100#define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */ 101#define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */
101#define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ 102#define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */
102#define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */ 103#define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 632ff7c03280..d66c61774d95 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -32,10 +32,14 @@
32struct plat_stmmacenet_data { 32struct plat_stmmacenet_data {
33 int bus_id; 33 int bus_id;
34 int pbl; 34 int pbl;
35 int clk_csr;
35 int has_gmac; 36 int has_gmac;
36 int enh_desc; 37 int enh_desc;
38 int tx_coe;
39 int bugged_jumbo;
40 int pmt;
37 void (*fix_mac_speed)(void *priv, unsigned int speed); 41 void (*fix_mac_speed)(void *priv, unsigned int speed);
38 void (*bus_setup)(unsigned long ioaddr); 42 void (*bus_setup)(void __iomem *ioaddr);
39#ifdef CONFIG_STM_DRIVERS 43#ifdef CONFIG_STM_DRIVERS
40 struct stm_pad_config *pad_config; 44 struct stm_pad_config *pad_config;
41#endif 45#endif
diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild
index 76990937f4c9..67b501c302b2 100644
--- a/include/linux/tc_act/Kbuild
+++ b/include/linux/tc_act/Kbuild
@@ -4,3 +4,4 @@ header-y += tc_mirred.h
4header-y += tc_pedit.h 4header-y += tc_pedit.h
5header-y += tc_nat.h 5header-y += tc_nat.h
6header-y += tc_skbedit.h 6header-y += tc_skbedit.h
7header-y += tc_csum.h
diff --git a/include/linux/tc_act/tc_csum.h b/include/linux/tc_act/tc_csum.h
new file mode 100644
index 000000000000..a047c49a3153
--- /dev/null
+++ b/include/linux/tc_act/tc_csum.h
@@ -0,0 +1,32 @@
1#ifndef __LINUX_TC_CSUM_H
2#define __LINUX_TC_CSUM_H
3
4#include <linux/types.h>
5#include <linux/pkt_cls.h>
6
7#define TCA_ACT_CSUM 16
8
9enum {
10 TCA_CSUM_UNSPEC,
11 TCA_CSUM_PARMS,
12 TCA_CSUM_TM,
13 __TCA_CSUM_MAX
14};
15#define TCA_CSUM_MAX (__TCA_CSUM_MAX - 1)
16
17enum {
18 TCA_CSUM_UPDATE_FLAG_IPV4HDR = 1,
19 TCA_CSUM_UPDATE_FLAG_ICMP = 2,
20 TCA_CSUM_UPDATE_FLAG_IGMP = 4,
21 TCA_CSUM_UPDATE_FLAG_TCP = 8,
22 TCA_CSUM_UPDATE_FLAG_UDP = 16,
23 TCA_CSUM_UPDATE_FLAG_UDPLITE = 32
24};
25
26struct tc_csum {
27 tc_gen;
28
29 __u32 update_flags;
30};
31
32#endif /* __LINUX_TC_CSUM_H */
diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h
index 0864206ec1a3..7138962664f8 100644
--- a/include/linux/tc_ematch/tc_em_meta.h
+++ b/include/linux/tc_ematch/tc_em_meta.h
@@ -79,6 +79,7 @@ enum {
79 TCF_META_ID_SK_SENDMSG_OFF, 79 TCF_META_ID_SK_SENDMSG_OFF,
80 TCF_META_ID_SK_WRITE_PENDING, 80 TCF_META_ID_SK_WRITE_PENDING,
81 TCF_META_ID_VLAN_TAG, 81 TCF_META_ID_VLAN_TAG,
82 TCF_META_ID_RXHASH,
82 __TCF_META_ID_MAX 83 __TCF_META_ID_MAX
83}; 84};
84#define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1) 85#define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1)
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index a778ee024590..e64f4c67d0ef 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -105,6 +105,7 @@ enum {
105#define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */ 105#define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */
106#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ 106#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/
107#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ 107#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */
108#define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */
108 109
109/* for TCP_INFO socket option */ 110/* for TCP_INFO socket option */
110#define TCPI_OPT_TIMESTAMPS 1 111#define TCPI_OPT_TIMESTAMPS 1
diff --git a/include/linux/spi/wl12xx.h b/include/linux/wl12xx.h
index a223ecbc71ef..95deae3968f4 100644
--- a/include/linux/spi/wl12xx.h
+++ b/include/linux/wl12xx.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009 Nokia Corporation
5 * 5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com> 6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -21,14 +21,18 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef _LINUX_SPI_WL12XX_H 24#ifndef _LINUX_WL12XX_H
25#define _LINUX_SPI_WL12XX_H 25#define _LINUX_WL12XX_H
26 26
27struct wl12xx_platform_data { 27struct wl12xx_platform_data {
28 void (*set_power)(bool enable); 28 void (*set_power)(bool enable);
29 /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */ 29 /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
30 int irq; 30 int irq;
31 bool use_eeprom; 31 bool use_eeprom;
32 int board_ref_clock;
32}; 33};
33 34
35int wl12xx_set_platform_data(const struct wl12xx_platform_data *data);
36const struct wl12xx_platform_data *wl12xx_get_platform_data(void);
37
34#endif 38#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 4d40c4d0230b..958d2749b7a9 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -175,20 +175,32 @@ extern int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
175extern int register_inet6addr_notifier(struct notifier_block *nb); 175extern int register_inet6addr_notifier(struct notifier_block *nb);
176extern int unregister_inet6addr_notifier(struct notifier_block *nb); 176extern int unregister_inet6addr_notifier(struct notifier_block *nb);
177 177
178static inline struct inet6_dev * 178/**
179__in6_dev_get(struct net_device *dev) 179 * __in6_dev_get - get inet6_dev pointer from netdevice
180 * @dev: network device
181 *
182 * Caller must hold rcu_read_lock or RTNL, because this function
183 * does not take a reference on the inet6_dev.
184 */
185static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev)
180{ 186{
181 return rcu_dereference_check(dev->ip6_ptr, 187 return rcu_dereference_rtnl(dev->ip6_ptr);
182 rcu_read_lock_held() ||
183 lockdep_rtnl_is_held());
184} 188}
185 189
186static inline struct inet6_dev * 190/**
187in6_dev_get(struct net_device *dev) 191 * in6_dev_get - get inet6_dev pointer from netdevice
192 * @dev: network device
193 *
194 * This version can be used in any context, and takes a reference
195 * on the inet6_dev. Callers must use in6_dev_put() later to
196 * release this reference.
197 */
198static inline struct inet6_dev *in6_dev_get(const struct net_device *dev)
188{ 199{
189 struct inet6_dev *idev = NULL; 200 struct inet6_dev *idev;
201
190 rcu_read_lock(); 202 rcu_read_lock();
191 idev = __in6_dev_get(dev); 203 idev = rcu_dereference(dev->ip6_ptr);
192 if (idev) 204 if (idev)
193 atomic_inc(&idev->refcnt); 205 atomic_inc(&idev->refcnt);
194 rcu_read_unlock(); 206 rcu_read_unlock();
@@ -197,16 +209,21 @@ in6_dev_get(struct net_device *dev)
197 209
198extern void in6_dev_finish_destroy(struct inet6_dev *idev); 210extern void in6_dev_finish_destroy(struct inet6_dev *idev);
199 211
200static inline void 212static inline void in6_dev_put(struct inet6_dev *idev)
201in6_dev_put(struct inet6_dev *idev)
202{ 213{
203 if (atomic_dec_and_test(&idev->refcnt)) 214 if (atomic_dec_and_test(&idev->refcnt))
204 in6_dev_finish_destroy(idev); 215 in6_dev_finish_destroy(idev);
205} 216}
206 217
207#define __in6_dev_put(idev) atomic_dec(&(idev)->refcnt) 218static inline void __in6_dev_put(struct inet6_dev *idev)
208#define in6_dev_hold(idev) atomic_inc(&(idev)->refcnt) 219{
220 atomic_dec(&idev->refcnt);
221}
209 222
223static inline void in6_dev_hold(struct inet6_dev *idev)
224{
225 atomic_inc(&idev->refcnt);
226}
210 227
211extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp); 228extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
212 229
@@ -216,9 +233,15 @@ static inline void in6_ifa_put(struct inet6_ifaddr *ifp)
216 inet6_ifa_finish_destroy(ifp); 233 inet6_ifa_finish_destroy(ifp);
217} 234}
218 235
219#define __in6_ifa_put(ifp) atomic_dec(&(ifp)->refcnt) 236static inline void __in6_ifa_put(struct inet6_ifaddr *ifp)
220#define in6_ifa_hold(ifp) atomic_inc(&(ifp)->refcnt) 237{
238 atomic_dec(&ifp->refcnt);
239}
221 240
241static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
242{
243 atomic_inc(&ifp->refcnt);
244}
222 245
223 246
224/* 247/*
@@ -241,23 +264,23 @@ static inline int ipv6_addr_is_multicast(const struct in6_addr *addr)
241 264
242static inline int ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr) 265static inline int ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
243{ 266{
244 return (((addr->s6_addr32[0] ^ htonl(0xff020000)) | 267 return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
245 addr->s6_addr32[1] | addr->s6_addr32[2] | 268 addr->s6_addr32[1] | addr->s6_addr32[2] |
246 (addr->s6_addr32[3] ^ htonl(0x00000001))) == 0); 269 (addr->s6_addr32[3] ^ htonl(0x00000001))) == 0;
247} 270}
248 271
249static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr) 272static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
250{ 273{
251 return (((addr->s6_addr32[0] ^ htonl(0xff020000)) | 274 return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
252 addr->s6_addr32[1] | addr->s6_addr32[2] | 275 addr->s6_addr32[1] | addr->s6_addr32[2] |
253 (addr->s6_addr32[3] ^ htonl(0x00000002))) == 0); 276 (addr->s6_addr32[3] ^ htonl(0x00000002))) == 0;
254} 277}
255 278
256extern int __ipv6_isatap_ifid(u8 *eui, __be32 addr); 279extern int __ipv6_isatap_ifid(u8 *eui, __be32 addr);
257 280
258static inline int ipv6_addr_is_isatap(const struct in6_addr *addr) 281static inline int ipv6_addr_is_isatap(const struct in6_addr *addr)
259{ 282{
260 return ((addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE)); 283 return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE);
261} 284}
262 285
263#ifdef CONFIG_PROC_FS 286#ifdef CONFIG_PROC_FS
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 4568b938ca35..ebec8c9a929d 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -233,7 +233,7 @@ static inline void inquiry_cache_init(struct hci_dev *hdev)
233static inline int inquiry_cache_empty(struct hci_dev *hdev) 233static inline int inquiry_cache_empty(struct hci_dev *hdev)
234{ 234{
235 struct inquiry_cache *c = &hdev->inq_cache; 235 struct inquiry_cache *c = &hdev->inq_cache;
236 return (c->list == NULL); 236 return c->list == NULL;
237} 237}
238 238
239static inline long inquiry_cache_age(struct hci_dev *hdev) 239static inline long inquiry_cache_age(struct hci_dev *hdev)
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 6c241444f902..c819c8bf9b68 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -414,7 +414,7 @@ static inline int l2cap_tx_window_full(struct sock *sk)
414 if (sub < 0) 414 if (sub < 0)
415 sub += 64; 415 sub += 64;
416 416
417 return (sub == pi->remote_tx_win); 417 return sub == pi->remote_tx_win;
418} 418}
419 419
420#define __get_txseq(ctrl) ((ctrl) & L2CAP_CTRL_TXSEQ) >> 1 420#define __get_txseq(ctrl) ((ctrl) & L2CAP_CTRL_TXSEQ) >> 1
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 2fd06c60ffbb..a0613ff62c97 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -25,6 +25,43 @@
25#include <linux/wireless.h> 25#include <linux/wireless.h>
26 26
27 27
28/**
29 * DOC: Introduction
30 *
31 * cfg80211 is the configuration API for 802.11 devices in Linux. It bridges
32 * userspace and drivers, and offers some utility functionality associated
33 * with 802.11. cfg80211 must, directly or indirectly via mac80211, be used
34 * by all modern wireless drivers in Linux, so that they offer a consistent
35 * API through nl80211. For backward compatibility, cfg80211 also offers
36 * wireless extensions to userspace, but hides them from drivers completely.
37 *
38 * Additionally, cfg80211 contains code to help enforce regulatory spectrum
39 * use restrictions.
40 */
41
42
43/**
44 * DOC: Device registration
45 *
46 * In order for a driver to use cfg80211, it must register the hardware device
47 * with cfg80211. This happens through a number of hardware capability structs
48 * described below.
49 *
50 * The fundamental structure for each device is the 'wiphy', of which each
51 * instance describes a physical wireless device connected to the system. Each
52 * such wiphy can have zero, one, or many virtual interfaces associated with
53 * it, which need to be identified as such by pointing the network interface's
54 * @ieee80211_ptr pointer to a &struct wireless_dev which further describes
55 * the wireless part of the interface, normally this struct is embedded in the
56 * network interface's private data area. Drivers can optionally allow creating
57 * or destroying virtual interfaces on the fly, but without at least one or the
58 * ability to create some the wireless device isn't useful.
59 *
60 * Each wiphy structure contains device capability information, and also has
61 * a pointer to the various operations the driver offers. The definitions and
62 * structures here describe these capabilities in detail.
63 */
64
28/* 65/*
29 * wireless hardware capability structures 66 * wireless hardware capability structures
30 */ 67 */
@@ -205,6 +242,21 @@ struct ieee80211_supported_band {
205 */ 242 */
206 243
207/** 244/**
245 * DOC: Actions and configuration
246 *
247 * Each wireless device and each virtual interface offer a set of configuration
248 * operations and other actions that are invoked by userspace. Each of these
249 * actions is described in the operations structure, and the parameters these
250 * operations use are described separately.
251 *
252 * Additionally, some operations are asynchronous and expect to get status
253 * information via some functions that drivers need to call.
254 *
255 * Scanning and BSS list handling with its associated functionality is described
256 * in a separate chapter.
257 */
258
259/**
208 * struct vif_params - describes virtual interface parameters 260 * struct vif_params - describes virtual interface parameters
209 * @mesh_id: mesh ID to use 261 * @mesh_id: mesh ID to use
210 * @mesh_id_len: length of the mesh ID 262 * @mesh_id_len: length of the mesh ID
@@ -570,8 +622,28 @@ struct ieee80211_txq_params {
570/* from net/wireless.h */ 622/* from net/wireless.h */
571struct wiphy; 623struct wiphy;
572 624
573/* from net/ieee80211.h */ 625/**
574struct ieee80211_channel; 626 * DOC: Scanning and BSS list handling
627 *
628 * The scanning process itself is fairly simple, but cfg80211 offers quite
629 * a bit of helper functionality. To start a scan, the scan operation will
630 * be invoked with a scan definition. This scan definition contains the
631 * channels to scan, and the SSIDs to send probe requests for (including the
632 * wildcard, if desired). A passive scan is indicated by having no SSIDs to
633 * probe. Additionally, a scan request may contain extra information elements
634 * that should be added to the probe request. The IEs are guaranteed to be
635 * well-formed, and will not exceed the maximum length the driver advertised
636 * in the wiphy structure.
637 *
638 * When scanning finds a BSS, cfg80211 needs to be notified of that, because
639 * it is responsible for maintaining the BSS list; the driver should not
640 * maintain a list itself. For this notification, various functions exist.
641 *
642 * Since drivers do not maintain a BSS list, there are also a number of
643 * functions to search for a BSS and obtain information about it from the
644 * BSS structure cfg80211 maintains. The BSS list is also made available
645 * to userspace.
646 */
575 647
576/** 648/**
577 * struct cfg80211_ssid - SSID description 649 * struct cfg80211_ssid - SSID description
@@ -691,6 +763,10 @@ const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie);
691 * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is 763 * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
692 * required to assume that the port is unauthorized until authorized by 764 * required to assume that the port is unauthorized until authorized by
693 * user space. Otherwise, port is marked authorized by default. 765 * user space. Otherwise, port is marked authorized by default.
766 * @control_port_ethertype: the control port protocol that should be
767 * allowed through even on unauthorized ports
768 * @control_port_no_encrypt: TRUE to prevent encryption of control port
769 * protocol frames.
694 */ 770 */
695struct cfg80211_crypto_settings { 771struct cfg80211_crypto_settings {
696 u32 wpa_versions; 772 u32 wpa_versions;
@@ -700,6 +776,8 @@ struct cfg80211_crypto_settings {
700 int n_akm_suites; 776 int n_akm_suites;
701 u32 akm_suites[NL80211_MAX_NR_AKM_SUITES]; 777 u32 akm_suites[NL80211_MAX_NR_AKM_SUITES];
702 bool control_port; 778 bool control_port;
779 __be16 control_port_ethertype;
780 bool control_port_no_encrypt;
703}; 781};
704 782
705/** 783/**
@@ -1020,7 +1098,7 @@ struct cfg80211_pmksa {
1020 * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation. 1098 * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation.
1021 * This allows the operation to be terminated prior to timeout based on 1099 * This allows the operation to be terminated prior to timeout based on
1022 * the duration value. 1100 * the duration value.
1023 * @action: Transmit an action frame 1101 * @mgmt_tx: Transmit a management frame
1024 * 1102 *
1025 * @testmode_cmd: run a test mode command 1103 * @testmode_cmd: run a test mode command
1026 * 1104 *
@@ -1172,7 +1250,7 @@ struct cfg80211_ops {
1172 struct net_device *dev, 1250 struct net_device *dev,
1173 u64 cookie); 1251 u64 cookie);
1174 1252
1175 int (*action)(struct wiphy *wiphy, struct net_device *dev, 1253 int (*mgmt_tx)(struct wiphy *wiphy, struct net_device *dev,
1176 struct ieee80211_channel *chan, 1254 struct ieee80211_channel *chan,
1177 enum nl80211_channel_type channel_type, 1255 enum nl80211_channel_type channel_type,
1178 bool channel_type_valid, 1256 bool channel_type_valid,
@@ -1221,21 +1299,29 @@ struct cfg80211_ops {
1221 * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station 1299 * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station
1222 * on a VLAN interface) 1300 * on a VLAN interface)
1223 * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station 1301 * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station
1302 * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the
1303 * control port protocol ethertype. The device also honours the
1304 * control_port_no_encrypt flag.
1224 */ 1305 */
1225enum wiphy_flags { 1306enum wiphy_flags {
1226 WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), 1307 WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
1227 WIPHY_FLAG_STRICT_REGULATORY = BIT(1), 1308 WIPHY_FLAG_STRICT_REGULATORY = BIT(1),
1228 WIPHY_FLAG_DISABLE_BEACON_HINTS = BIT(2), 1309 WIPHY_FLAG_DISABLE_BEACON_HINTS = BIT(2),
1229 WIPHY_FLAG_NETNS_OK = BIT(3), 1310 WIPHY_FLAG_NETNS_OK = BIT(3),
1230 WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4), 1311 WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4),
1231 WIPHY_FLAG_4ADDR_AP = BIT(5), 1312 WIPHY_FLAG_4ADDR_AP = BIT(5),
1232 WIPHY_FLAG_4ADDR_STATION = BIT(6), 1313 WIPHY_FLAG_4ADDR_STATION = BIT(6),
1314 WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
1233}; 1315};
1234 1316
1235struct mac_address { 1317struct mac_address {
1236 u8 addr[ETH_ALEN]; 1318 u8 addr[ETH_ALEN];
1237}; 1319};
1238 1320
1321struct ieee80211_txrx_stypes {
1322 u16 tx, rx;
1323};
1324
1239/** 1325/**
1240 * struct wiphy - wireless hardware description 1326 * struct wiphy - wireless hardware description
1241 * @reg_notifier: the driver's regulatory notification callback 1327 * @reg_notifier: the driver's regulatory notification callback
@@ -1286,6 +1372,10 @@ struct mac_address {
1286 * @privid: a pointer that drivers can use to identify if an arbitrary 1372 * @privid: a pointer that drivers can use to identify if an arbitrary
1287 * wiphy is theirs, e.g. in global notifiers 1373 * wiphy is theirs, e.g. in global notifiers
1288 * @bands: information about bands/channels supported by this device 1374 * @bands: information about bands/channels supported by this device
1375 *
1376 * @mgmt_stypes: bitmasks of frame subtypes that can be subscribed to or
1377 * transmitted through nl80211, points to an array indexed by interface
1378 * type
1289 */ 1379 */
1290struct wiphy { 1380struct wiphy {
1291 /* assign these fields before you register the wiphy */ 1381 /* assign these fields before you register the wiphy */
@@ -1294,9 +1384,12 @@ struct wiphy {
1294 u8 perm_addr[ETH_ALEN]; 1384 u8 perm_addr[ETH_ALEN];
1295 u8 addr_mask[ETH_ALEN]; 1385 u8 addr_mask[ETH_ALEN];
1296 1386
1297 u16 n_addresses;
1298 struct mac_address *addresses; 1387 struct mac_address *addresses;
1299 1388
1389 const struct ieee80211_txrx_stypes *mgmt_stypes;
1390
1391 u16 n_addresses;
1392
1300 /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ 1393 /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
1301 u16 interface_modes; 1394 u16 interface_modes;
1302 1395
@@ -1492,8 +1585,8 @@ struct cfg80211_cached_keys;
1492 * set by driver (if supported) on add_interface BEFORE registering the 1585 * set by driver (if supported) on add_interface BEFORE registering the
1493 * netdev and may otherwise be used by driver read-only, will be update 1586 * netdev and may otherwise be used by driver read-only, will be update
1494 * by cfg80211 on change_interface 1587 * by cfg80211 on change_interface
1495 * @action_registrations: list of registrations for action frames 1588 * @mgmt_registrations: list of registrations for management frames
1496 * @action_registrations_lock: lock for the list 1589 * @mgmt_registrations_lock: lock for the list
1497 * @mtx: mutex used to lock data in this struct 1590 * @mtx: mutex used to lock data in this struct
1498 * @cleanup_work: work struct used for cleanup that can't be done directly 1591 * @cleanup_work: work struct used for cleanup that can't be done directly
1499 */ 1592 */
@@ -1505,8 +1598,8 @@ struct wireless_dev {
1505 struct list_head list; 1598 struct list_head list;
1506 struct net_device *netdev; 1599 struct net_device *netdev;
1507 1600
1508 struct list_head action_registrations; 1601 struct list_head mgmt_registrations;
1509 spinlock_t action_registrations_lock; 1602 spinlock_t mgmt_registrations_lock;
1510 1603
1511 struct mutex mtx; 1604 struct mutex mtx;
1512 1605
@@ -1563,8 +1656,10 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
1563 return wiphy_priv(wdev->wiphy); 1656 return wiphy_priv(wdev->wiphy);
1564} 1657}
1565 1658
1566/* 1659/**
1567 * Utility functions 1660 * DOC: Utility functions
1661 *
1662 * cfg80211 offers a number of utility functions that can be useful.
1568 */ 1663 */
1569 1664
1570/** 1665/**
@@ -1715,7 +1810,15 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
1715 * ieee80211_hdrlen - get header length in bytes from frame control 1810 * ieee80211_hdrlen - get header length in bytes from frame control
1716 * @fc: frame control field in little-endian format 1811 * @fc: frame control field in little-endian format
1717 */ 1812 */
1718unsigned int ieee80211_hdrlen(__le16 fc); 1813unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc);
1814
1815/**
1816 * DOC: Data path helpers
1817 *
1818 * In addition to generic utilities, cfg80211 also offers
1819 * functions that help implement the data path for devices
1820 * that do not do the 802.11/802.3 conversion on the device.
1821 */
1719 1822
1720/** 1823/**
1721 * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 1824 * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
@@ -1777,8 +1880,10 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb);
1777 */ 1880 */
1778const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len); 1881const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
1779 1882
1780/* 1883/**
1781 * Regulatory helper functions for wiphys 1884 * DOC: Regulatory enforcement infrastructure
1885 *
1886 * TODO
1782 */ 1887 */
1783 1888
1784/** 1889/**
@@ -2181,6 +2286,20 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
2181void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp); 2286void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp);
2182 2287
2183/** 2288/**
2289 * DOC: RFkill integration
2290 *
2291 * RFkill integration in cfg80211 is almost invisible to drivers,
2292 * as cfg80211 automatically registers an rfkill instance for each
2293 * wireless device it knows about. Soft kill is also translated
2294 * into disconnecting and turning all interfaces off, drivers are
2295 * expected to turn off the device when all interfaces are down.
2296 *
2297 * However, devices may have a hard RFkill line, in which case they
2298 * also need to interact with the rfkill subsystem, via cfg80211.
2299 * They can do this with a few helper functions documented here.
2300 */
2301
2302/**
2184 * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state 2303 * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state
2185 * @wiphy: the wiphy 2304 * @wiphy: the wiphy
2186 * @blocked: block status 2305 * @blocked: block status
@@ -2201,6 +2320,17 @@ void wiphy_rfkill_stop_polling(struct wiphy *wiphy);
2201 2320
2202#ifdef CONFIG_NL80211_TESTMODE 2321#ifdef CONFIG_NL80211_TESTMODE
2203/** 2322/**
2323 * DOC: Test mode
2324 *
2325 * Test mode is a set of utility functions to allow drivers to
2326 * interact with driver-specific tools to aid, for instance,
2327 * factory programming.
2328 *
2329 * This chapter describes how drivers interact with it, for more
2330 * information see the nl80211 book's chapter on it.
2331 */
2332
2333/**
2204 * cfg80211_testmode_alloc_reply_skb - allocate testmode reply 2334 * cfg80211_testmode_alloc_reply_skb - allocate testmode reply
2205 * @wiphy: the wiphy 2335 * @wiphy: the wiphy
2206 * @approxlen: an upper bound of the length of the data that will 2336 * @approxlen: an upper bound of the length of the data that will
@@ -2373,38 +2503,39 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
2373 struct station_info *sinfo, gfp_t gfp); 2503 struct station_info *sinfo, gfp_t gfp);
2374 2504
2375/** 2505/**
2376 * cfg80211_rx_action - notification of received, unprocessed Action frame 2506 * cfg80211_rx_mgmt - notification of received, unprocessed management frame
2377 * @dev: network device 2507 * @dev: network device
2378 * @freq: Frequency on which the frame was received in MHz 2508 * @freq: Frequency on which the frame was received in MHz
2379 * @buf: Action frame (header + body) 2509 * @buf: Management frame (header + body)
2380 * @len: length of the frame data 2510 * @len: length of the frame data
2381 * @gfp: context flags 2511 * @gfp: context flags
2382 * Returns %true if a user space application is responsible for rejecting the 2512 *
2383 * unrecognized Action frame; %false if no such application is registered 2513 * Returns %true if a user space application has registered for this frame.
2384 * (i.e., the driver is responsible for rejecting the unrecognized Action 2514 * For action frames, that makes it responsible for rejecting unrecognized
2385 * frame) 2515 * action frames; %false otherwise, in which case for action frames the
2516 * driver is responsible for rejecting the frame.
2386 * 2517 *
2387 * This function is called whenever an Action frame is received for a station 2518 * This function is called whenever an Action frame is received for a station
2388 * mode interface, but is not processed in kernel. 2519 * mode interface, but is not processed in kernel.
2389 */ 2520 */
2390bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, 2521bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
2391 size_t len, gfp_t gfp); 2522 size_t len, gfp_t gfp);
2392 2523
2393/** 2524/**
2394 * cfg80211_action_tx_status - notification of TX status for Action frame 2525 * cfg80211_mgmt_tx_status - notification of TX status for management frame
2395 * @dev: network device 2526 * @dev: network device
2396 * @cookie: Cookie returned by cfg80211_ops::action() 2527 * @cookie: Cookie returned by cfg80211_ops::mgmt_tx()
2397 * @buf: Action frame (header + body) 2528 * @buf: Management frame (header + body)
2398 * @len: length of the frame data 2529 * @len: length of the frame data
2399 * @ack: Whether frame was acknowledged 2530 * @ack: Whether frame was acknowledged
2400 * @gfp: context flags 2531 * @gfp: context flags
2401 * 2532 *
2402 * This function is called whenever an Action frame was requested to be 2533 * This function is called whenever a management frame was requested to be
2403 * transmitted with cfg80211_ops::action() to report the TX status of the 2534 * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
2404 * transmission attempt. 2535 * transmission attempt.
2405 */ 2536 */
2406void cfg80211_action_tx_status(struct net_device *dev, u64 cookie, 2537void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie,
2407 const u8 *buf, size_t len, bool ack, gfp_t gfp); 2538 const u8 *buf, size_t len, bool ack, gfp_t gfp);
2408 2539
2409 2540
2410/** 2541/**
@@ -2427,49 +2558,36 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
2427/* wiphy_printk helpers, similar to dev_printk */ 2558/* wiphy_printk helpers, similar to dev_printk */
2428 2559
2429#define wiphy_printk(level, wiphy, format, args...) \ 2560#define wiphy_printk(level, wiphy, format, args...) \
2430 printk(level "%s: " format, wiphy_name(wiphy), ##args) 2561 dev_printk(level, &(wiphy)->dev, format, ##args)
2431#define wiphy_emerg(wiphy, format, args...) \ 2562#define wiphy_emerg(wiphy, format, args...) \
2432 wiphy_printk(KERN_EMERG, wiphy, format, ##args) 2563 dev_emerg(&(wiphy)->dev, format, ##args)
2433#define wiphy_alert(wiphy, format, args...) \ 2564#define wiphy_alert(wiphy, format, args...) \
2434 wiphy_printk(KERN_ALERT, wiphy, format, ##args) 2565 dev_alert(&(wiphy)->dev, format, ##args)
2435#define wiphy_crit(wiphy, format, args...) \ 2566#define wiphy_crit(wiphy, format, args...) \
2436 wiphy_printk(KERN_CRIT, wiphy, format, ##args) 2567 dev_crit(&(wiphy)->dev, format, ##args)
2437#define wiphy_err(wiphy, format, args...) \ 2568#define wiphy_err(wiphy, format, args...) \
2438 wiphy_printk(KERN_ERR, wiphy, format, ##args) 2569 dev_err(&(wiphy)->dev, format, ##args)
2439#define wiphy_warn(wiphy, format, args...) \ 2570#define wiphy_warn(wiphy, format, args...) \
2440 wiphy_printk(KERN_WARNING, wiphy, format, ##args) 2571 dev_warn(&(wiphy)->dev, format, ##args)
2441#define wiphy_notice(wiphy, format, args...) \ 2572#define wiphy_notice(wiphy, format, args...) \
2442 wiphy_printk(KERN_NOTICE, wiphy, format, ##args) 2573 dev_notice(&(wiphy)->dev, format, ##args)
2443#define wiphy_info(wiphy, format, args...) \ 2574#define wiphy_info(wiphy, format, args...) \
2444 wiphy_printk(KERN_INFO, wiphy, format, ##args) 2575 dev_info(&(wiphy)->dev, format, ##args)
2445
2446int wiphy_debug(const struct wiphy *wiphy, const char *format, ...)
2447 __attribute__ ((format (printf, 2, 3)));
2448 2576
2449#if defined(DEBUG) 2577#define wiphy_debug(wiphy, format, args...) \
2450#define wiphy_dbg(wiphy, format, args...) \
2451 wiphy_printk(KERN_DEBUG, wiphy, format, ##args) 2578 wiphy_printk(KERN_DEBUG, wiphy, format, ##args)
2452#elif defined(CONFIG_DYNAMIC_DEBUG) 2579
2453#define wiphy_dbg(wiphy, format, args...) \ 2580#define wiphy_dbg(wiphy, format, args...) \
2454 dynamic_pr_debug("%s: " format, wiphy_name(wiphy), ##args) 2581 dev_dbg(&(wiphy)->dev, format, ##args)
2455#else
2456#define wiphy_dbg(wiphy, format, args...) \
2457({ \
2458 if (0) \
2459 wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \
2460 0; \
2461})
2462#endif
2463 2582
2464#if defined(VERBOSE_DEBUG) 2583#if defined(VERBOSE_DEBUG)
2465#define wiphy_vdbg wiphy_dbg 2584#define wiphy_vdbg wiphy_dbg
2466#else 2585#else
2467
2468#define wiphy_vdbg(wiphy, format, args...) \ 2586#define wiphy_vdbg(wiphy, format, args...) \
2469({ \ 2587({ \
2470 if (0) \ 2588 if (0) \
2471 wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \ 2589 wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \
2472 0; \ 2590 0; \
2473}) 2591})
2474#endif 2592#endif
2475 2593
diff --git a/include/net/gre.h b/include/net/gre.h
new file mode 100644
index 000000000000..82665474bcb7
--- /dev/null
+++ b/include/net/gre.h
@@ -0,0 +1,18 @@
1#ifndef __LINUX_GRE_H
2#define __LINUX_GRE_H
3
4#include <linux/skbuff.h>
5
6#define GREPROTO_CISCO 0
7#define GREPROTO_PPTP 1
8#define GREPROTO_MAX 2
9
10struct gre_protocol {
11 int (*handler)(struct sk_buff *skb);
12 void (*err_handler)(struct sk_buff *skb, u32 info);
13};
14
15int gre_add_protocol(const struct gre_protocol *proto, u8 version);
16int gre_del_protocol(const struct gre_protocol *proto, u8 version);
17
18#endif
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index b6d3b55da19b..e4f494b42e06 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -125,6 +125,7 @@ struct inet_connection_sock {
125 int probe_size; 125 int probe_size;
126 } icsk_mtup; 126 } icsk_mtup;
127 u32 icsk_ca_priv[16]; 127 u32 icsk_ca_priv[16];
128 u32 icsk_user_timeout;
128#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32)) 129#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32))
129}; 130};
130 131
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 9b5d08f4f6e8..88bdd010d65d 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -27,7 +27,7 @@ static inline int INET_ECN_is_not_ect(__u8 dsfield)
27 27
28static inline int INET_ECN_is_capable(__u8 dsfield) 28static inline int INET_ECN_is_capable(__u8 dsfield)
29{ 29{
30 return (dsfield & INET_ECN_ECT_0); 30 return dsfield & INET_ECN_ECT_0;
31} 31}
32 32
33static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) 33static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
diff --git a/include/net/ip.h b/include/net/ip.h
index 890f9725d681..dbee3fe260e1 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -53,7 +53,7 @@ struct ipcm_cookie {
53 __be32 addr; 53 __be32 addr;
54 int oif; 54 int oif;
55 struct ip_options *opt; 55 struct ip_options *opt;
56 union skb_shared_tx shtx; 56 __u8 tx_flags;
57}; 57};
58 58
59#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) 59#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
@@ -238,9 +238,9 @@ int ip_decrease_ttl(struct iphdr *iph)
238static inline 238static inline
239int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) 239int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
240{ 240{
241 return (inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO || 241 return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO ||
242 (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT && 242 (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT &&
243 !(dst_metric_locked(dst, RTAX_MTU)))); 243 !(dst_metric_locked(dst, RTAX_MTU)));
244} 244}
245 245
246extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); 246extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 1f8412410998..4a3cd2cd2f5e 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -262,7 +262,7 @@ static inline int ipv6_addr_scope(const struct in6_addr *addr)
262 262
263static inline int __ipv6_addr_src_scope(int type) 263static inline int __ipv6_addr_src_scope(int type)
264{ 264{
265 return (type == IPV6_ADDR_ANY ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16)); 265 return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16);
266} 266}
267 267
268static inline int ipv6_addr_src_scope(const struct in6_addr *addr) 268static inline int ipv6_addr_src_scope(const struct in6_addr *addr)
@@ -279,10 +279,10 @@ static inline int
279ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, 279ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
280 const struct in6_addr *a2) 280 const struct in6_addr *a2)
281{ 281{
282 return (!!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | 282 return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) |
283 ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | 283 ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) |
284 ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | 284 ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) |
285 ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]))); 285 ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
286} 286}
287 287
288static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2) 288static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2)
@@ -317,10 +317,10 @@ static inline void ipv6_addr_set(struct in6_addr *addr,
317static inline int ipv6_addr_equal(const struct in6_addr *a1, 317static inline int ipv6_addr_equal(const struct in6_addr *a1,
318 const struct in6_addr *a2) 318 const struct in6_addr *a2)
319{ 319{
320 return (((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | 320 return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
321 (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | 321 (a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
322 (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | 322 (a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
323 (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0); 323 (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
324} 324}
325 325
326static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, 326static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2,
@@ -373,20 +373,20 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a);
373 373
374static inline int ipv6_addr_any(const struct in6_addr *a) 374static inline int ipv6_addr_any(const struct in6_addr *a)
375{ 375{
376 return ((a->s6_addr32[0] | a->s6_addr32[1] | 376 return (a->s6_addr32[0] | a->s6_addr32[1] |
377 a->s6_addr32[2] | a->s6_addr32[3] ) == 0); 377 a->s6_addr32[2] | a->s6_addr32[3]) == 0;
378} 378}
379 379
380static inline int ipv6_addr_loopback(const struct in6_addr *a) 380static inline int ipv6_addr_loopback(const struct in6_addr *a)
381{ 381{
382 return ((a->s6_addr32[0] | a->s6_addr32[1] | 382 return (a->s6_addr32[0] | a->s6_addr32[1] |
383 a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0); 383 a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0;
384} 384}
385 385
386static inline int ipv6_addr_v4mapped(const struct in6_addr *a) 386static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
387{ 387{
388 return ((a->s6_addr32[0] | a->s6_addr32[1] | 388 return (a->s6_addr32[0] | a->s6_addr32[1] |
389 (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0); 389 (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0;
390} 390}
391 391
392/* 392/*
@@ -395,8 +395,7 @@ static inline int ipv6_addr_v4mapped(const struct in6_addr *a)
395 */ 395 */
396static inline int ipv6_addr_orchid(const struct in6_addr *a) 396static inline int ipv6_addr_orchid(const struct in6_addr *a)
397{ 397{
398 return ((a->s6_addr32[0] & htonl(0xfffffff0)) 398 return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010);
399 == htonl(0x20010010));
400} 399}
401 400
402static inline void ipv6_addr_set_v4mapped(const __be32 addr, 401static inline void ipv6_addr_set_v4mapped(const __be32 addr,
@@ -441,7 +440,7 @@ static inline int __ipv6_addr_diff(const void *token1, const void *token2, int a
441 * if returned value is greater than prefix length. 440 * if returned value is greater than prefix length.
442 * --ANK (980803) 441 * --ANK (980803)
443 */ 442 */
444 return (addrlen << 5); 443 return addrlen << 5;
445} 444}
446 445
447static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2) 446static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2)
diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h
index 73cacb3ac16c..0af8b8dfbc22 100644
--- a/include/net/irda/irlan_common.h
+++ b/include/net/irda/irlan_common.h
@@ -171,7 +171,6 @@ struct irlan_cb {
171 int magic; 171 int magic;
172 struct list_head dev_list; 172 struct list_head dev_list;
173 struct net_device *dev; /* Ethernet device structure*/ 173 struct net_device *dev; /* Ethernet device structure*/
174 struct net_device_stats stats;
175 174
176 __u32 saddr; /* Source device address */ 175 __u32 saddr; /* Source device address */
177 __u32 daddr; /* Destination device address */ 176 __u32 daddr; /* Destination device address */
diff --git a/include/net/irda/irlan_event.h b/include/net/irda/irlan_event.h
index 6d9539f05806..018b5a77e610 100644
--- a/include/net/irda/irlan_event.h
+++ b/include/net/irda/irlan_event.h
@@ -67,7 +67,7 @@ typedef enum {
67 IRLAN_WATCHDOG_TIMEOUT, 67 IRLAN_WATCHDOG_TIMEOUT,
68} IRLAN_EVENT; 68} IRLAN_EVENT;
69 69
70extern char *irlan_state[]; 70extern const char * const irlan_state[];
71 71
72void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, 72void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event,
73 struct sk_buff *skb); 73 struct sk_buff *skb);
diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h
index 9d0c78ea92f5..17fcd964f9d9 100644
--- a/include/net/irda/irlap.h
+++ b/include/net/irda/irlap.h
@@ -282,7 +282,7 @@ static inline int irlap_is_primary(struct irlap_cb *self)
282 default: 282 default:
283 ret = -1; 283 ret = -1;
284 } 284 }
285 return(ret); 285 return ret;
286} 286}
287 287
288/* Clear a pending IrLAP disconnect. - Jean II */ 288/* Clear a pending IrLAP disconnect. - Jean II */
diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h
index 3ffc1d0f93d6..fff11b7fe8a4 100644
--- a/include/net/irda/irlmp.h
+++ b/include/net/irda/irlmp.h
@@ -274,7 +274,7 @@ static inline int irlmp_lap_tx_queue_full(struct lsap_cb *self)
274 if (self->lap->irlap == NULL) 274 if (self->lap->irlap == NULL)
275 return 0; 275 return 0;
276 276
277 return(IRLAP_GET_TX_QUEUE_LEN(self->lap->irlap) >= LAP_HIGH_THRESHOLD); 277 return IRLAP_GET_TX_QUEUE_LEN(self->lap->irlap) >= LAP_HIGH_THRESHOLD;
278} 278}
279 279
280/* After doing a irlmp_dup(), this get one of the two socket back into 280/* After doing a irlmp_dup(), this get one of the two socket back into
diff --git a/include/net/irda/irttp.h b/include/net/irda/irttp.h
index 11aee7a2972a..af4b87721d13 100644
--- a/include/net/irda/irttp.h
+++ b/include/net/irda/irttp.h
@@ -204,7 +204,7 @@ static inline int irttp_is_primary(struct tsap_cb *self)
204 (self->lsap->lap == NULL) || 204 (self->lsap->lap == NULL) ||
205 (self->lsap->lap->irlap == NULL)) 205 (self->lsap->lap->irlap == NULL))
206 return -2; 206 return -2;
207 return(irlap_is_primary(self->lsap->lap->irlap)); 207 return irlap_is_primary(self->lsap->lap->irlap);
208} 208}
209 209
210#endif /* IRTTP_H */ 210#endif /* IRTTP_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index b0787a1dea90..12a49f0ba32c 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -149,6 +149,7 @@ struct ieee80211_low_level_stats {
149 * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed. 149 * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed.
150 * @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note 150 * @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note
151 * that it is only ever disabled for station mode. 151 * that it is only ever disabled for station mode.
152 * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
152 */ 153 */
153enum ieee80211_bss_change { 154enum ieee80211_bss_change {
154 BSS_CHANGED_ASSOC = 1<<0, 155 BSS_CHANGED_ASSOC = 1<<0,
@@ -165,6 +166,7 @@ enum ieee80211_bss_change {
165 BSS_CHANGED_IBSS = 1<<11, 166 BSS_CHANGED_IBSS = 1<<11,
166 BSS_CHANGED_ARP_FILTER = 1<<12, 167 BSS_CHANGED_ARP_FILTER = 1<<12,
167 BSS_CHANGED_QOS = 1<<13, 168 BSS_CHANGED_QOS = 1<<13,
169 BSS_CHANGED_IDLE = 1<<14,
168 170
169 /* when adding here, make sure to change ieee80211_reconfig */ 171 /* when adding here, make sure to change ieee80211_reconfig */
170}; 172};
@@ -223,6 +225,9 @@ enum ieee80211_bss_change {
223 * hardware must not perform any ARP filtering. Note, that the filter will 225 * hardware must not perform any ARP filtering. Note, that the filter will
224 * be enabled also in promiscuous mode. 226 * be enabled also in promiscuous mode.
225 * @qos: This is a QoS-enabled BSS. 227 * @qos: This is a QoS-enabled BSS.
228 * @idle: This interface is idle. There's also a global idle flag in the
229 * hardware config which may be more appropriate depending on what
230 * your driver/device needs to do.
226 */ 231 */
227struct ieee80211_bss_conf { 232struct ieee80211_bss_conf {
228 const u8 *bssid; 233 const u8 *bssid;
@@ -247,6 +252,7 @@ struct ieee80211_bss_conf {
247 u8 arp_addr_cnt; 252 u8 arp_addr_cnt;
248 bool arp_filter_enabled; 253 bool arp_filter_enabled;
249 bool qos; 254 bool qos;
255 bool idle;
250}; 256};
251 257
252/** 258/**
@@ -763,6 +769,8 @@ struct ieee80211_channel_switch {
763 * @bss_conf: BSS configuration for this interface, either our own 769 * @bss_conf: BSS configuration for this interface, either our own
764 * or the BSS we're associated to 770 * or the BSS we're associated to
765 * @addr: address of this interface 771 * @addr: address of this interface
772 * @p2p: indicates whether this AP or STA interface is a p2p
773 * interface, i.e. a GO or p2p-sta respectively
766 * @drv_priv: data area for driver use, will always be aligned to 774 * @drv_priv: data area for driver use, will always be aligned to
767 * sizeof(void *). 775 * sizeof(void *).
768 */ 776 */
@@ -770,6 +778,7 @@ struct ieee80211_vif {
770 enum nl80211_iftype type; 778 enum nl80211_iftype type;
771 struct ieee80211_bss_conf bss_conf; 779 struct ieee80211_bss_conf bss_conf;
772 u8 addr[ETH_ALEN]; 780 u8 addr[ETH_ALEN];
781 bool p2p;
773 /* must be last */ 782 /* must be last */
774 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); 783 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
775}; 784};
@@ -783,20 +792,6 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
783} 792}
784 793
785/** 794/**
786 * enum ieee80211_key_alg - key algorithm
787 * @ALG_WEP: WEP40 or WEP104
788 * @ALG_TKIP: TKIP
789 * @ALG_CCMP: CCMP (AES)
790 * @ALG_AES_CMAC: AES-128-CMAC
791 */
792enum ieee80211_key_alg {
793 ALG_WEP,
794 ALG_TKIP,
795 ALG_CCMP,
796 ALG_AES_CMAC,
797};
798
799/**
800 * enum ieee80211_key_flags - key flags 795 * enum ieee80211_key_flags - key flags
801 * 796 *
802 * These flags are used for communication about keys between the driver 797 * These flags are used for communication about keys between the driver
@@ -833,7 +828,7 @@ enum ieee80211_key_flags {
833 * @hw_key_idx: To be set by the driver, this is the key index the driver 828 * @hw_key_idx: To be set by the driver, this is the key index the driver
834 * wants to be given when a frame is transmitted and needs to be 829 * wants to be given when a frame is transmitted and needs to be
835 * encrypted in hardware. 830 * encrypted in hardware.
836 * @alg: The key algorithm. 831 * @cipher: The key's cipher suite selector.
837 * @flags: key flags, see &enum ieee80211_key_flags. 832 * @flags: key flags, see &enum ieee80211_key_flags.
838 * @keyidx: the key index (0-3) 833 * @keyidx: the key index (0-3)
839 * @keylen: key material length 834 * @keylen: key material length
@@ -846,7 +841,7 @@ enum ieee80211_key_flags {
846 * @iv_len: The IV length for this key type 841 * @iv_len: The IV length for this key type
847 */ 842 */
848struct ieee80211_key_conf { 843struct ieee80211_key_conf {
849 enum ieee80211_key_alg alg; 844 u32 cipher;
850 u8 icv_len; 845 u8 icv_len;
851 u8 iv_len; 846 u8 iv_len;
852 u8 hw_key_idx; 847 u8 hw_key_idx;
@@ -1102,6 +1097,10 @@ enum ieee80211_hw_flags {
1102 * 1097 *
1103 * @max_rates: maximum number of alternate rate retry stages 1098 * @max_rates: maximum number of alternate rate retry stages
1104 * @max_rate_tries: maximum number of tries for each stage 1099 * @max_rate_tries: maximum number of tries for each stage
1100 *
1101 * @napi_weight: weight used for NAPI polling. You must specify an
1102 * appropriate value here if a napi_poll operation is provided
1103 * by your driver.
1105 */ 1104 */
1106struct ieee80211_hw { 1105struct ieee80211_hw {
1107 struct ieee80211_conf conf; 1106 struct ieee80211_conf conf;
@@ -1113,6 +1112,7 @@ struct ieee80211_hw {
1113 int channel_change_time; 1112 int channel_change_time;
1114 int vif_data_size; 1113 int vif_data_size;
1115 int sta_data_size; 1114 int sta_data_size;
1115 int napi_weight;
1116 u16 queues; 1116 u16 queues;
1117 u16 max_listen_interval; 1117 u16 max_listen_interval;
1118 s8 max_signal; 1118 s8 max_signal;
@@ -1245,8 +1245,8 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
1245 * %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in 1245 * %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in
1246 * IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused 1246 * IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused
1247 * with hardware wakeup and sleep states. Driver is responsible for waking 1247 * with hardware wakeup and sleep states. Driver is responsible for waking
1248 * up the hardware before issueing commands to the hardware and putting it 1248 * up the hardware before issuing commands to the hardware and putting it
1249 * back to sleep at approriate times. 1249 * back to sleep at appropriate times.
1250 * 1250 *
1251 * When PS is enabled, hardware needs to wakeup for beacons and receive the 1251 * When PS is enabled, hardware needs to wakeup for beacons and receive the
1252 * buffered multicast/broadcast frames after the beacon. Also it must be 1252 * buffered multicast/broadcast frames after the beacon. Also it must be
@@ -1267,7 +1267,7 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
1267 * there's data traffic and still saving significantly power in idle 1267 * there's data traffic and still saving significantly power in idle
1268 * periods. 1268 * periods.
1269 * 1269 *
1270 * Dynamic powersave is supported by simply mac80211 enabling and disabling 1270 * Dynamic powersave is simply supported by mac80211 enabling and disabling
1271 * PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS 1271 * PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS
1272 * flag and mac80211 will handle everything automatically. Additionally, 1272 * flag and mac80211 will handle everything automatically. Additionally,
1273 * hardware having support for the dynamic PS feature may set the 1273 * hardware having support for the dynamic PS feature may set the
@@ -1540,6 +1540,12 @@ enum ieee80211_ampdu_mlme_action {
1540 * negative error code (which will be seen in userspace.) 1540 * negative error code (which will be seen in userspace.)
1541 * Must be implemented and can sleep. 1541 * Must be implemented and can sleep.
1542 * 1542 *
1543 * @change_interface: Called when a netdevice changes type. This callback
1544 * is optional, but only if it is supported can interface types be
1545 * switched while the interface is UP. The callback may sleep.
1546 * Note that while an interface is being switched, it will not be
1547 * found by the interface iteration callbacks.
1548 *
1543 * @remove_interface: Notifies a driver that an interface is going down. 1549 * @remove_interface: Notifies a driver that an interface is going down.
1544 * The @stop callback is called after this if it is the last interface 1550 * The @stop callback is called after this if it is the last interface
1545 * and no monitor interfaces are present. 1551 * and no monitor interfaces are present.
@@ -1687,6 +1693,8 @@ enum ieee80211_ampdu_mlme_action {
1687 * switch operation for CSAs received from the AP may implement this 1693 * switch operation for CSAs received from the AP may implement this
1688 * callback. They must then call ieee80211_chswitch_done() to indicate 1694 * callback. They must then call ieee80211_chswitch_done() to indicate
1689 * completion of the channel switch. 1695 * completion of the channel switch.
1696 *
1697 * @napi_poll: Poll Rx queue for incoming data frames.
1690 */ 1698 */
1691struct ieee80211_ops { 1699struct ieee80211_ops {
1692 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); 1700 int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -1694,6 +1702,9 @@ struct ieee80211_ops {
1694 void (*stop)(struct ieee80211_hw *hw); 1702 void (*stop)(struct ieee80211_hw *hw);
1695 int (*add_interface)(struct ieee80211_hw *hw, 1703 int (*add_interface)(struct ieee80211_hw *hw,
1696 struct ieee80211_vif *vif); 1704 struct ieee80211_vif *vif);
1705 int (*change_interface)(struct ieee80211_hw *hw,
1706 struct ieee80211_vif *vif,
1707 enum nl80211_iftype new_type, bool p2p);
1697 void (*remove_interface)(struct ieee80211_hw *hw, 1708 void (*remove_interface)(struct ieee80211_hw *hw,
1698 struct ieee80211_vif *vif); 1709 struct ieee80211_vif *vif);
1699 int (*config)(struct ieee80211_hw *hw, u32 changed); 1710 int (*config)(struct ieee80211_hw *hw, u32 changed);
@@ -1752,6 +1763,7 @@ struct ieee80211_ops {
1752 void (*flush)(struct ieee80211_hw *hw, bool drop); 1763 void (*flush)(struct ieee80211_hw *hw, bool drop);
1753 void (*channel_switch)(struct ieee80211_hw *hw, 1764 void (*channel_switch)(struct ieee80211_hw *hw,
1754 struct ieee80211_channel_switch *ch_switch); 1765 struct ieee80211_channel_switch *ch_switch);
1766 int (*napi_poll)(struct ieee80211_hw *hw, int budget);
1755}; 1767};
1756 1768
1757/** 1769/**
@@ -1897,6 +1909,22 @@ void ieee80211_free_hw(struct ieee80211_hw *hw);
1897 */ 1909 */
1898void ieee80211_restart_hw(struct ieee80211_hw *hw); 1910void ieee80211_restart_hw(struct ieee80211_hw *hw);
1899 1911
1912/** ieee80211_napi_schedule - schedule NAPI poll
1913 *
1914 * Use this function to schedule NAPI polling on a device.
1915 *
1916 * @hw: the hardware to start polling
1917 */
1918void ieee80211_napi_schedule(struct ieee80211_hw *hw);
1919
1920/** ieee80211_napi_complete - complete NAPI polling
1921 *
1922 * Use this function to finish NAPI polling on a device.
1923 *
1924 * @hw: the hardware to stop polling
1925 */
1926void ieee80211_napi_complete(struct ieee80211_hw *hw);
1927
1900/** 1928/**
1901 * ieee80211_rx - receive frame 1929 * ieee80211_rx - receive frame
1902 * 1930 *
@@ -2252,7 +2280,8 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw);
2252 * 2280 *
2253 * When hardware scan offload is used (i.e. the hw_scan() callback is 2281 * When hardware scan offload is used (i.e. the hw_scan() callback is
2254 * assigned) this function needs to be called by the driver to notify 2282 * assigned) this function needs to be called by the driver to notify
2255 * mac80211 that the scan finished. 2283 * mac80211 that the scan finished. This function can be called from
2284 * any context, including hardirq context.
2256 * 2285 *
2257 * @hw: the hardware that finished the scan 2286 * @hw: the hardware that finished the scan
2258 * @aborted: set to true if scan was aborted 2287 * @aborted: set to true if scan was aborted
@@ -2267,6 +2296,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted);
2267 * This function allows the iterator function to sleep, when the iterator 2296 * This function allows the iterator function to sleep, when the iterator
2268 * function is atomic @ieee80211_iterate_active_interfaces_atomic can 2297 * function is atomic @ieee80211_iterate_active_interfaces_atomic can
2269 * be used. 2298 * be used.
2299 * Does not iterate over a new interface during add_interface()
2270 * 2300 *
2271 * @hw: the hardware struct of which the interfaces should be iterated over 2301 * @hw: the hardware struct of which the interfaces should be iterated over
2272 * @iterator: the iterator function to call 2302 * @iterator: the iterator function to call
@@ -2284,6 +2314,7 @@ void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
2284 * hardware that are currently active and calls the callback for them. 2314 * hardware that are currently active and calls the callback for them.
2285 * This function requires the iterator callback function to be atomic, 2315 * This function requires the iterator callback function to be atomic,
2286 * if that is not desired, use @ieee80211_iterate_active_interfaces instead. 2316 * if that is not desired, use @ieee80211_iterate_active_interfaces instead.
2317 * Does not iterate over a new interface during add_interface()
2287 * 2318 *
2288 * @hw: the hardware struct of which the interfaces should be iterated over 2319 * @hw: the hardware struct of which the interfaces should be iterated over
2289 * @iterator: the iterator function to call, cannot sleep 2320 * @iterator: the iterator function to call, cannot sleep
@@ -2442,7 +2473,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
2442 * 2473 *
2443 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 2474 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2444 * 2475 *
2445 * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING and 2476 * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTER and
2446 * %IEEE80211_CONF_PS is set, the driver needs to inform whenever the 2477 * %IEEE80211_CONF_PS is set, the driver needs to inform whenever the
2447 * hardware is not receiving beacons with this function. 2478 * hardware is not receiving beacons with this function.
2448 */ 2479 */
@@ -2453,7 +2484,7 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif);
2453 * 2484 *
2454 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 2485 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2455 * 2486 *
2456 * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING, and 2487 * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTER, and
2457 * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver 2488 * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver
2458 * needs to inform if the connection to the AP has been lost. 2489 * needs to inform if the connection to the AP has been lost.
2459 * 2490 *
@@ -2518,6 +2549,18 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
2518 */ 2549 */
2519void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success); 2550void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success);
2520 2551
2552/**
2553 * ieee80211_request_smps - request SM PS transition
2554 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2555 * @smps_mode: new SM PS mode
2556 *
2557 * This allows the driver to request an SM PS transition in managed
2558 * mode. This is useful when the driver has more information than
2559 * the stack about possible interference, for example by bluetooth.
2560 */
2561void ieee80211_request_smps(struct ieee80211_vif *vif,
2562 enum ieee80211_smps_mode smps_mode);
2563
2521/* Rate control API */ 2564/* Rate control API */
2522 2565
2523/** 2566/**
@@ -2681,4 +2724,26 @@ conf_is_ht(struct ieee80211_conf *conf)
2681 return conf->channel_type != NL80211_CHAN_NO_HT; 2724 return conf->channel_type != NL80211_CHAN_NO_HT;
2682} 2725}
2683 2726
2727static inline enum nl80211_iftype
2728ieee80211_iftype_p2p(enum nl80211_iftype type, bool p2p)
2729{
2730 if (p2p) {
2731 switch (type) {
2732 case NL80211_IFTYPE_STATION:
2733 return NL80211_IFTYPE_P2P_CLIENT;
2734 case NL80211_IFTYPE_AP:
2735 return NL80211_IFTYPE_P2P_GO;
2736 default:
2737 break;
2738 }
2739 }
2740 return type;
2741}
2742
2743static inline enum nl80211_iftype
2744ieee80211_vif_type_p2p(struct ieee80211_vif *vif)
2745{
2746 return ieee80211_iftype_p2p(vif->type, vif->p2p);
2747}
2748
2684#endif /* MAC80211_H */ 2749#endif /* MAC80211_H */
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index 35672b1cf44a..37f23dc05de8 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -77,6 +77,11 @@ static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
77#define MAX_PNPIPE_HEADER (MAX_PHONET_HEADER + 4) 77#define MAX_PNPIPE_HEADER (MAX_PHONET_HEADER + 4)
78 78
79enum { 79enum {
80 PNS_PIPE_CREATE_REQ = 0x00,
81 PNS_PIPE_CREATE_RESP,
82 PNS_PIPE_REMOVE_REQ,
83 PNS_PIPE_REMOVE_RESP,
84
80 PNS_PIPE_DATA = 0x20, 85 PNS_PIPE_DATA = 0x20,
81 PNS_PIPE_ALIGNED_DATA, 86 PNS_PIPE_ALIGNED_DATA,
82 87
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index 7b114079a51b..d5df797f9540 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -54,6 +54,11 @@ void pn_sock_hash(struct sock *sk);
54void pn_sock_unhash(struct sock *sk); 54void pn_sock_unhash(struct sock *sk);
55int pn_sock_get_port(struct sock *sk, unsigned short sport); 55int pn_sock_get_port(struct sock *sk, unsigned short sport);
56 56
57struct sock *pn_find_sock_by_res(struct net *net, u8 res);
58int pn_sock_bind_res(struct sock *sock, u8 res);
59int pn_sock_unbind_res(struct sock *sk, u8 res);
60void pn_sock_unbind_all_res(struct sock *sk);
61
57int pn_skb_send(struct sock *sk, struct sk_buff *skb, 62int pn_skb_send(struct sock *sk, struct sk_buff *skb,
58 const struct sockaddr_pn *target); 63 const struct sockaddr_pn *target);
59 64
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
index 2d16783d5e20..13649eb57413 100644
--- a/include/net/phonet/pn_dev.h
+++ b/include/net/phonet/pn_dev.h
@@ -57,5 +57,6 @@ struct net_device *phonet_route_output(struct net *net, u8 daddr);
57#define PN_NO_ADDR 0xff 57#define PN_NO_ADDR 0xff
58 58
59extern const struct file_operations pn_sock_seq_fops; 59extern const struct file_operations pn_sock_seq_fops;
60extern const struct file_operations pn_res_seq_fops;
60 61
61#endif 62#endif
diff --git a/include/net/raw.h b/include/net/raw.h
index 43c57502659b..42ce6fe7a2d5 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -45,7 +45,10 @@ struct raw_iter_state {
45 struct raw_hashinfo *h; 45 struct raw_hashinfo *h;
46}; 46};
47 47
48#define raw_seq_private(seq) ((struct raw_iter_state *)(seq)->private) 48static inline struct raw_iter_state *raw_seq_private(struct seq_file *seq)
49{
50 return seq->private;
51}
49void *raw_seq_start(struct seq_file *seq, loff_t *pos); 52void *raw_seq_start(struct seq_file *seq, loff_t *pos);
50void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos); 53void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos);
51void raw_seq_stop(struct seq_file *seq, void *v); 54void raw_seq_stop(struct seq_file *seq, void *v);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 3c8728aaab4e..eda8808fdacd 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -601,7 +601,7 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
601 slot = 0; 601 slot = 0;
602 slot >>= rtab->rate.cell_log; 602 slot >>= rtab->rate.cell_log;
603 if (slot > 255) 603 if (slot > 255)
604 return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]); 604 return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
605 return rtab->data[slot]; 605 return rtab->data[slot];
606} 606}
607 607
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 65946bc43d00..505845ddb0be 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -275,24 +275,35 @@ struct sctp_mib {
275/* Print debugging messages. */ 275/* Print debugging messages. */
276#if SCTP_DEBUG 276#if SCTP_DEBUG
277extern int sctp_debug_flag; 277extern int sctp_debug_flag;
278#define SCTP_DEBUG_PRINTK(whatever...) \ 278#define SCTP_DEBUG_PRINTK(fmt, args...) \
279 ((void) (sctp_debug_flag && printk(KERN_DEBUG whatever))) 279do { \
280#define SCTP_DEBUG_PRINTK_IPADDR(lead, trail, leadparm, saddr, otherparms...) \ 280 if (sctp_debug_flag) \
281 if (sctp_debug_flag) { \ 281 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
282 if (saddr->sa.sa_family == AF_INET6) { \ 282} while (0)
283 printk(KERN_DEBUG \ 283#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) \
284 lead "%pI6" trail, \ 284do { \
285 leadparm, \ 285 if (sctp_debug_flag) \
286 &saddr->v6.sin6_addr, \ 286 pr_cont(fmt, ##args); \
287 otherparms); \ 287} while (0)
288 } else { \ 288#define SCTP_DEBUG_PRINTK_IPADDR(fmt_lead, fmt_trail, \
289 printk(KERN_DEBUG \ 289 args_lead, saddr, args_trail...) \
290 lead "%pI4" trail, \ 290do { \
291 leadparm, \ 291 if (sctp_debug_flag) { \
292 &saddr->v4.sin_addr.s_addr, \ 292 if (saddr->sa.sa_family == AF_INET6) { \
293 otherparms); \ 293 printk(KERN_DEBUG \
294 } \ 294 pr_fmt(fmt_lead "%pI6" fmt_trail), \
295 } 295 args_lead, \
296 &saddr->v6.sin6_addr, \
297 args_trail); \
298 } else { \
299 printk(KERN_DEBUG \
300 pr_fmt(fmt_lead "%pI4" fmt_trail), \
301 args_lead, \
302 &saddr->v4.sin_addr.s_addr, \
303 args_trail); \
304 } \
305 } \
306} while (0)
296#define SCTP_ENABLE_DEBUG { sctp_debug_flag = 1; } 307#define SCTP_ENABLE_DEBUG { sctp_debug_flag = 1; }
297#define SCTP_DISABLE_DEBUG { sctp_debug_flag = 0; } 308#define SCTP_DISABLE_DEBUG { sctp_debug_flag = 0; }
298 309
@@ -306,6 +317,7 @@ extern int sctp_debug_flag;
306#else /* SCTP_DEBUG */ 317#else /* SCTP_DEBUG */
307 318
308#define SCTP_DEBUG_PRINTK(whatever...) 319#define SCTP_DEBUG_PRINTK(whatever...)
320#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
309#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) 321#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
310#define SCTP_ENABLE_DEBUG 322#define SCTP_ENABLE_DEBUG
311#define SCTP_DISABLE_DEBUG 323#define SCTP_DISABLE_DEBUG
@@ -393,7 +405,7 @@ static inline void sctp_v6_del_protocol(void) { return; }
393/* Map an association to an assoc_id. */ 405/* Map an association to an assoc_id. */
394static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc) 406static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
395{ 407{
396 return (asoc?asoc->assoc_id:0); 408 return asoc ? asoc->assoc_id : 0;
397} 409}
398 410
399/* Look up the association by its id. */ 411/* Look up the association by its id. */
@@ -461,7 +473,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
461/* Tests if the list has one and only one entry. */ 473/* Tests if the list has one and only one entry. */
462static inline int sctp_list_single_entry(struct list_head *head) 474static inline int sctp_list_single_entry(struct list_head *head)
463{ 475{
464 return ((head->next != head) && (head->next == head->prev)); 476 return (head->next != head) && (head->next == head->prev);
465} 477}
466 478
467/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */ 479/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */
@@ -619,13 +631,13 @@ static inline int sctp_sanity_check(void)
619/* This is the hash function for the SCTP port hash table. */ 631/* This is the hash function for the SCTP port hash table. */
620static inline int sctp_phashfn(__u16 lport) 632static inline int sctp_phashfn(__u16 lport)
621{ 633{
622 return (lport & (sctp_port_hashsize - 1)); 634 return lport & (sctp_port_hashsize - 1);
623} 635}
624 636
625/* This is the hash function for the endpoint hash table. */ 637/* This is the hash function for the endpoint hash table. */
626static inline int sctp_ep_hashfn(__u16 lport) 638static inline int sctp_ep_hashfn(__u16 lport)
627{ 639{
628 return (lport & (sctp_ep_hashsize - 1)); 640 return lport & (sctp_ep_hashsize - 1);
629} 641}
630 642
631/* This is the hash function for the association hash table. */ 643/* This is the hash function for the association hash table. */
@@ -633,7 +645,7 @@ static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport)
633{ 645{
634 int h = (lport << 16) + rport; 646 int h = (lport << 16) + rport;
635 h ^= h>>8; 647 h ^= h>>8;
636 return (h & (sctp_assoc_hashsize - 1)); 648 return h & (sctp_assoc_hashsize - 1);
637} 649}
638 650
639/* This is the hash function for the association hash table. This is 651/* This is the hash function for the association hash table. This is
@@ -644,7 +656,7 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag)
644{ 656{
645 int h = (lport << 16) + rport; 657 int h = (lport << 16) + rport;
646 h ^= vtag; 658 h ^= vtag;
647 return (h & (sctp_assoc_hashsize-1)); 659 return h & (sctp_assoc_hashsize - 1);
648} 660}
649 661
650#define sctp_for_each_hentry(epb, node, head) \ 662#define sctp_for_each_hentry(epb, node, head) \
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 4088c89a9055..9352d12f02de 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -345,12 +345,12 @@ enum {
345 345
346static inline int TSN_lt(__u32 s, __u32 t) 346static inline int TSN_lt(__u32 s, __u32 t)
347{ 347{
348 return (((s) - (t)) & TSN_SIGN_BIT); 348 return ((s) - (t)) & TSN_SIGN_BIT;
349} 349}
350 350
351static inline int TSN_lte(__u32 s, __u32 t) 351static inline int TSN_lte(__u32 s, __u32 t)
352{ 352{
353 return (((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT)); 353 return ((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT);
354} 354}
355 355
356/* Compare two SSNs */ 356/* Compare two SSNs */
@@ -369,12 +369,12 @@ enum {
369 369
370static inline int SSN_lt(__u16 s, __u16 t) 370static inline int SSN_lt(__u16 s, __u16 t)
371{ 371{
372 return (((s) - (t)) & SSN_SIGN_BIT); 372 return ((s) - (t)) & SSN_SIGN_BIT;
373} 373}
374 374
375static inline int SSN_lte(__u16 s, __u16 t) 375static inline int SSN_lte(__u16 s, __u16 t)
376{ 376{
377 return (((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT)); 377 return ((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT);
378} 378}
379 379
380/* 380/*
@@ -388,7 +388,7 @@ enum {
388 388
389static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t) 389static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t)
390{ 390{
391 return (((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT)); 391 return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT);
392} 392}
393 393
394/* Check VTAG of the packet matches the sender's own tag. */ 394/* Check VTAG of the packet matches the sender's own tag. */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index f9e7473613bd..69fef4fb79c0 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -847,7 +847,7 @@ void sctp_packet_free(struct sctp_packet *);
847 847
848static inline int sctp_packet_empty(struct sctp_packet *packet) 848static inline int sctp_packet_empty(struct sctp_packet *packet)
849{ 849{
850 return (packet->size == packet->overhead); 850 return packet->size == packet->overhead;
851} 851}
852 852
853/* This represents a remote transport address. 853/* This represents a remote transport address.
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
index 4aabc5a96cf6..e7728bc14ccf 100644
--- a/include/net/sctp/tsnmap.h
+++ b/include/net/sctp/tsnmap.h
@@ -157,7 +157,7 @@ __u16 sctp_tsnmap_pending(struct sctp_tsnmap *map);
157/* Is there a gap in the TSN map? */ 157/* Is there a gap in the TSN map? */
158static inline int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map) 158static inline int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map)
159{ 159{
160 return (map->cumulative_tsn_ack_point != map->max_tsn_seen); 160 return map->cumulative_tsn_ack_point != map->max_tsn_seen;
161} 161}
162 162
163/* Mark a duplicate TSN. Note: limit the storage of duplicate TSN 163/* Mark a duplicate TSN. Note: limit the storage of duplicate TSN
diff --git a/include/net/sock.h b/include/net/sock.h
index adab9dc58183..73a4f9702a65 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1558,7 +1558,11 @@ static inline void sk_wake_async(struct sock *sk, int how, int band)
1558} 1558}
1559 1559
1560#define SOCK_MIN_SNDBUF 2048 1560#define SOCK_MIN_SNDBUF 2048
1561#define SOCK_MIN_RCVBUF 256 1561/*
1562 * Since sk_rmem_alloc sums skb->truesize, even a small frame might need
1563 * sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak
1564 */
1565#define SOCK_MIN_RCVBUF (2048 + sizeof(struct sk_buff))
1562 1566
1563static inline void sk_stream_moderate_sndbuf(struct sock *sk) 1567static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1564{ 1568{
@@ -1670,17 +1674,13 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
1670 1674
1671/** 1675/**
1672 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped 1676 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
1673 * @msg: outgoing packet
1674 * @sk: socket sending this packet 1677 * @sk: socket sending this packet
1675 * @shtx: filled with instructions for time stamping 1678 * @tx_flags: filled with instructions for time stamping
1676 * 1679 *
1677 * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if 1680 * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if
1678 * parameters are invalid. 1681 * parameters are invalid.
1679 */ 1682 */
1680extern int sock_tx_timestamp(struct msghdr *msg, 1683extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
1681 struct sock *sk,
1682 union skb_shared_tx *shtx);
1683
1684 1684
1685/** 1685/**
1686 * sk_eat_skb - Release a skb if it is no longer needed 1686 * sk_eat_skb - Release a skb if it is no longer needed
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
new file mode 100644
index 000000000000..9e8710be7a04
--- /dev/null
+++ b/include/net/tc_act/tc_csum.h
@@ -0,0 +1,15 @@
1#ifndef __NET_TC_CSUM_H
2#define __NET_TC_CSUM_H
3
4#include <linux/types.h>
5#include <net/act_api.h>
6
7struct tcf_csum {
8 struct tcf_common common;
9
10 u32 update_flags;
11};
12#define to_tcf_csum(pc) \
13 container_of(pc,struct tcf_csum,common)
14
15#endif /* __NET_TC_CSUM_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 3e4b33e36602..914a60c7ad62 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -803,6 +803,15 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
803/* Use define here intentionally to get WARN_ON location shown at the caller */ 803/* Use define here intentionally to get WARN_ON location shown at the caller */
804#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) 804#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
805 805
806/*
807 * Convert RFC 3390 larger initial window into an equivalent number of packets.
808 * This is based on the numbers specified in RFC 5681, 3.1.
809 */
810static inline u32 rfc3390_bytes_to_packets(const u32 smss)
811{
812 return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
813}
814
806extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); 815extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
807extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); 816extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
808 817
diff --git a/include/net/tipc/tipc_msg.h b/include/net/tipc/tipc_msg.h
index 2e159a812f83..ffe50b4e7b93 100644
--- a/include/net/tipc/tipc_msg.h
+++ b/include/net/tipc/tipc_msg.h
@@ -107,7 +107,7 @@ static inline u32 msg_hdr_sz(struct tipc_msg *m)
107 107
108static inline int msg_short(struct tipc_msg *m) 108static inline int msg_short(struct tipc_msg *m)
109{ 109{
110 return (msg_hdr_sz(m) == 24); 110 return msg_hdr_sz(m) == 24;
111} 111}
112 112
113static inline u32 msg_size(struct tipc_msg *m) 113static inline u32 msg_size(struct tipc_msg *m)
@@ -117,7 +117,7 @@ static inline u32 msg_size(struct tipc_msg *m)
117 117
118static inline u32 msg_data_sz(struct tipc_msg *m) 118static inline u32 msg_data_sz(struct tipc_msg *m)
119{ 119{
120 return (msg_size(m) - msg_hdr_sz(m)); 120 return msg_size(m) - msg_hdr_sz(m);
121} 121}
122 122
123static inline unchar *msg_data(struct tipc_msg *m) 123static inline unchar *msg_data(struct tipc_msg *m)
@@ -132,17 +132,17 @@ static inline u32 msg_type(struct tipc_msg *m)
132 132
133static inline u32 msg_named(struct tipc_msg *m) 133static inline u32 msg_named(struct tipc_msg *m)
134{ 134{
135 return (msg_type(m) == TIPC_NAMED_MSG); 135 return msg_type(m) == TIPC_NAMED_MSG;
136} 136}
137 137
138static inline u32 msg_mcast(struct tipc_msg *m) 138static inline u32 msg_mcast(struct tipc_msg *m)
139{ 139{
140 return (msg_type(m) == TIPC_MCAST_MSG); 140 return msg_type(m) == TIPC_MCAST_MSG;
141} 141}
142 142
143static inline u32 msg_connected(struct tipc_msg *m) 143static inline u32 msg_connected(struct tipc_msg *m)
144{ 144{
145 return (msg_type(m) == TIPC_CONN_MSG); 145 return msg_type(m) == TIPC_CONN_MSG;
146} 146}
147 147
148static inline u32 msg_errcode(struct tipc_msg *m) 148static inline u32 msg_errcode(struct tipc_msg *m)
diff --git a/net/802/fc.c b/net/802/fc.c
index 34cf1ee014b8..1e49f2d4ea96 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -70,7 +70,7 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev,
70 if(daddr) 70 if(daddr)
71 { 71 {
72 memcpy(fch->daddr,daddr,dev->addr_len); 72 memcpy(fch->daddr,daddr,dev->addr_len);
73 return(hdr_len); 73 return hdr_len;
74 } 74 }
75 return -hdr_len; 75 return -hdr_len;
76} 76}
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 3ef0ab0a543a..94b3ad08f39a 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -82,10 +82,10 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev,
82 if (daddr != NULL) 82 if (daddr != NULL)
83 { 83 {
84 memcpy(fddi->daddr, daddr, dev->addr_len); 84 memcpy(fddi->daddr, daddr, dev->addr_len);
85 return(hl); 85 return hl;
86 } 86 }
87 87
88 return(-hl); 88 return -hl;
89} 89}
90 90
91 91
@@ -108,7 +108,7 @@ static int fddi_rebuild_header(struct sk_buff *skb)
108 { 108 {
109 printk("%s: Don't know how to resolve type %04X addresses.\n", 109 printk("%s: Don't know how to resolve type %04X addresses.\n",
110 skb->dev->name, ntohs(fddi->hdr.llc_snap.ethertype)); 110 skb->dev->name, ntohs(fddi->hdr.llc_snap.ethertype));
111 return(0); 111 return 0;
112 } 112 }
113} 113}
114 114
@@ -162,7 +162,7 @@ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev)
162 162
163 /* Assume 802.2 SNAP frames, for now */ 163 /* Assume 802.2 SNAP frames, for now */
164 164
165 return(type); 165 return type;
166} 166}
167 167
168EXPORT_SYMBOL(fddi_type_trans); 168EXPORT_SYMBOL(fddi_type_trans);
@@ -170,9 +170,9 @@ EXPORT_SYMBOL(fddi_type_trans);
170int fddi_change_mtu(struct net_device *dev, int new_mtu) 170int fddi_change_mtu(struct net_device *dev, int new_mtu)
171{ 171{
172 if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN)) 172 if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN))
173 return(-EINVAL); 173 return -EINVAL;
174 dev->mtu = new_mtu; 174 dev->mtu = new_mtu;
175 return(0); 175 return 0;
176} 176}
177EXPORT_SYMBOL(fddi_change_mtu); 177EXPORT_SYMBOL(fddi_change_mtu);
178 178
diff --git a/net/802/hippi.c b/net/802/hippi.c
index cd3e8e929529..91aca8780fd0 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -152,7 +152,7 @@ int hippi_change_mtu(struct net_device *dev, int new_mtu)
152 if ((new_mtu < 68) || (new_mtu > 65280)) 152 if ((new_mtu < 68) || (new_mtu > 65280))
153 return -EINVAL; 153 return -EINVAL;
154 dev->mtu = new_mtu; 154 dev->mtu = new_mtu;
155 return(0); 155 return 0;
156} 156}
157EXPORT_SYMBOL(hippi_change_mtu); 157EXPORT_SYMBOL(hippi_change_mtu);
158 158
diff --git a/net/802/tr.c b/net/802/tr.c
index 1c6e596074df..5e20cf8a074b 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -145,7 +145,7 @@ static int tr_header(struct sk_buff *skb, struct net_device *dev,
145 { 145 {
146 memcpy(trh->daddr,daddr,dev->addr_len); 146 memcpy(trh->daddr,daddr,dev->addr_len);
147 tr_source_route(skb, trh, dev); 147 tr_source_route(skb, trh, dev);
148 return(hdr_len); 148 return hdr_len;
149 } 149 }
150 150
151 return -hdr_len; 151 return -hdr_len;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a2ad15250575..2c6c2bd6e4a9 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -525,6 +525,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
525 break; 525 break;
526 526
527 case NETDEV_UNREGISTER: 527 case NETDEV_UNREGISTER:
528 /* twiddle thumbs on netns device moves */
529 if (dev->reg_state != NETREG_UNREGISTERING)
530 break;
531
528 /* Delete all VLANs for this dev. */ 532 /* Delete all VLANs for this dev. */
529 grp->killall = 1; 533 grp->killall = 1;
530 534
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 8d9503ad01da..b26ce343072c 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -25,6 +25,7 @@ struct vlan_priority_tci_mapping {
25 * @rx_multicast: number of received multicast packets 25 * @rx_multicast: number of received multicast packets
26 * @syncp: synchronization point for 64bit counters 26 * @syncp: synchronization point for 64bit counters
27 * @rx_errors: number of errors 27 * @rx_errors: number of errors
28 * @rx_dropped: number of dropped packets
28 */ 29 */
29struct vlan_rx_stats { 30struct vlan_rx_stats {
30 u64 rx_packets; 31 u64 rx_packets;
@@ -32,6 +33,7 @@ struct vlan_rx_stats {
32 u64 rx_multicast; 33 u64 rx_multicast;
33 struct u64_stats_sync syncp; 34 struct u64_stats_sync syncp;
34 unsigned long rx_errors; 35 unsigned long rx_errors;
36 unsigned long rx_dropped;
35}; 37};
36 38
37/** 39/**
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 01ddb0472f86..0eb486d342dc 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -27,7 +27,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
27 else if (vlan_id) 27 else if (vlan_id)
28 goto drop; 28 goto drop;
29 29
30 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 30 return polling ? netif_receive_skb(skb) : netif_rx(skb);
31 31
32drop: 32drop:
33 dev_kfree_skb_any(skb); 33 dev_kfree_skb_any(skb);
@@ -35,12 +35,12 @@ drop:
35} 35}
36EXPORT_SYMBOL(__vlan_hwaccel_rx); 36EXPORT_SYMBOL(__vlan_hwaccel_rx);
37 37
38int vlan_hwaccel_do_receive(struct sk_buff *skb) 38void vlan_hwaccel_do_receive(struct sk_buff *skb)
39{ 39{
40 struct net_device *dev = skb->dev; 40 struct net_device *dev = skb->dev;
41 struct vlan_rx_stats *rx_stats; 41 struct vlan_rx_stats *rx_stats;
42 42
43 skb->dev = vlan_dev_info(dev)->real_dev; 43 skb->dev = vlan_dev_real_dev(dev);
44 netif_nit_deliver(skb); 44 netif_nit_deliver(skb);
45 45
46 skb->dev = dev; 46 skb->dev = dev;
@@ -69,7 +69,6 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
69 break; 69 break;
70 } 70 }
71 u64_stats_update_end(&rx_stats->syncp); 71 u64_stats_update_end(&rx_stats->syncp);
72 return 0;
73} 72}
74 73
75struct net_device *vlan_dev_real_dev(const struct net_device *dev) 74struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -106,9 +105,12 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
106 goto drop; 105 goto drop;
107 106
108 for (p = napi->gro_list; p; p = p->next) { 107 for (p = napi->gro_list; p; p = p->next) {
109 NAPI_GRO_CB(p)->same_flow = 108 unsigned long diffs;
110 p->dev == skb->dev && !compare_ether_header( 109
111 skb_mac_header(p), skb_gro_mac_header(skb)); 110 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
111 diffs |= compare_ether_header(skb_mac_header(p),
112 skb_gro_mac_header(skb));
113 NAPI_GRO_CB(p)->same_flow = !diffs;
112 NAPI_GRO_CB(p)->flush = 0; 114 NAPI_GRO_CB(p)->flush = 0;
113 } 115 }
114 116
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 3bccdd12a264..94a1feddeb49 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -225,7 +225,10 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
225 } 225 }
226 } 226 }
227 227
228 netif_rx(skb); 228 if (unlikely(netif_rx(skb) == NET_RX_DROP)) {
229 if (rx_stats)
230 rx_stats->rx_dropped++;
231 }
229 rcu_read_unlock(); 232 rcu_read_unlock();
230 return NET_RX_SUCCESS; 233 return NET_RX_SUCCESS;
231 234
@@ -843,13 +846,15 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
843 accum.rx_packets += rxpackets; 846 accum.rx_packets += rxpackets;
844 accum.rx_bytes += rxbytes; 847 accum.rx_bytes += rxbytes;
845 accum.rx_multicast += rxmulticast; 848 accum.rx_multicast += rxmulticast;
846 /* rx_errors is an ulong, not protected by syncp */ 849 /* rx_errors, rx_dropped are ulong, not protected by syncp */
847 accum.rx_errors += p->rx_errors; 850 accum.rx_errors += p->rx_errors;
851 accum.rx_dropped += p->rx_dropped;
848 } 852 }
849 stats->rx_packets = accum.rx_packets; 853 stats->rx_packets = accum.rx_packets;
850 stats->rx_bytes = accum.rx_bytes; 854 stats->rx_bytes = accum.rx_bytes;
851 stats->rx_errors = accum.rx_errors; 855 stats->rx_errors = accum.rx_errors;
852 stats->multicast = accum.rx_multicast; 856 stats->multicast = accum.rx_multicast;
857 stats->rx_dropped = accum.rx_dropped;
853 } 858 }
854 return stats; 859 return stats;
855} 860}
diff --git a/net/9p/client.c b/net/9p/client.c
index dc6f2f26d023..f34b9f510818 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -61,13 +61,13 @@ static const match_table_t tokens = {
61 61
62inline int p9_is_proto_dotl(struct p9_client *clnt) 62inline int p9_is_proto_dotl(struct p9_client *clnt)
63{ 63{
64 return (clnt->proto_version == p9_proto_2000L); 64 return clnt->proto_version == p9_proto_2000L;
65} 65}
66EXPORT_SYMBOL(p9_is_proto_dotl); 66EXPORT_SYMBOL(p9_is_proto_dotl);
67 67
68inline int p9_is_proto_dotu(struct p9_client *clnt) 68inline int p9_is_proto_dotu(struct p9_client *clnt)
69{ 69{
70 return (clnt->proto_version == p9_proto_2000u); 70 return clnt->proto_version == p9_proto_2000u;
71} 71}
72EXPORT_SYMBOL(p9_is_proto_dotu); 72EXPORT_SYMBOL(p9_is_proto_dotu);
73 73
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index c85109d809ca..078eb162d9bf 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -222,7 +222,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
222 } 222 }
223} 223}
224 224
225static unsigned int 225static int
226p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt) 226p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
227{ 227{
228 int ret, n; 228 int ret, n;
diff --git a/net/atm/common.c b/net/atm/common.c
index 940404a73b3d..1b9c52a02cd3 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -792,7 +792,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
792 default: 792 default:
793 if (level == SOL_SOCKET) 793 if (level == SOL_SOCKET)
794 return -EINVAL; 794 return -EINVAL;
795 break; 795 break;
796 } 796 }
797 if (!vcc->dev || !vcc->dev->ops->getsockopt) 797 if (!vcc->dev || !vcc->dev->ops->getsockopt)
798 return -EINVAL; 798 return -EINVAL;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index d98bde1a0ac8..181d70c73d70 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -220,7 +220,6 @@ static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
220static int lec_open(struct net_device *dev) 220static int lec_open(struct net_device *dev)
221{ 221{
222 netif_start_queue(dev); 222 netif_start_queue(dev);
223 memset(&dev->stats, 0, sizeof(struct net_device_stats));
224 223
225 return 0; 224 return 0;
226} 225}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index cfdfd7e2a172..26eaebf4aaa9 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1103,7 +1103,7 @@ done:
1103out: 1103out:
1104 release_sock(sk); 1104 release_sock(sk);
1105 1105
1106 return 0; 1106 return err;
1107} 1107}
1108 1108
1109/* 1109/*
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 7805945a5fd6..a1690845dc6e 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -412,7 +412,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
412{ 412{
413 ax25_uid_assoc *user; 413 ax25_uid_assoc *user;
414 ax25_route *ax25_rt; 414 ax25_route *ax25_rt;
415 int err; 415 int err = 0;
416 416
417 if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL) 417 if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
418 return -EHOSTUNREACH; 418 return -EHOSTUNREACH;
@@ -453,7 +453,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
453put: 453put:
454 ax25_put_route(ax25_rt); 454 ax25_put_route(ax25_rt);
455 455
456 return 0; 456 return err;
457} 457}
458 458
459struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, 459struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 421c45bd1b95..ed0f22f57668 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -297,13 +297,12 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
297 mask |= POLLERR; 297 mask |= POLLERR;
298 298
299 if (sk->sk_shutdown & RCV_SHUTDOWN) 299 if (sk->sk_shutdown & RCV_SHUTDOWN)
300 mask |= POLLRDHUP; 300 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
301 301
302 if (sk->sk_shutdown == SHUTDOWN_MASK) 302 if (sk->sk_shutdown == SHUTDOWN_MASK)
303 mask |= POLLHUP; 303 mask |= POLLHUP;
304 304
305 if (!skb_queue_empty(&sk->sk_receive_queue) || 305 if (!skb_queue_empty(&sk->sk_receive_queue))
306 (sk->sk_shutdown & RCV_SHUTDOWN))
307 mask |= POLLIN | POLLRDNORM; 306 mask |= POLLIN | POLLRDNORM;
308 307
309 if (sk->sk_state == BT_CLOSED) 308 if (sk->sk_state == BT_CLOSED)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 7dca91bb8c57..15ea84ba344e 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -179,13 +179,13 @@ static unsigned char rfcomm_crc_table[256] = {
179/* FCS on 2 bytes */ 179/* FCS on 2 bytes */
180static inline u8 __fcs(u8 *data) 180static inline u8 __fcs(u8 *data)
181{ 181{
182 return (0xff - __crc(data)); 182 return 0xff - __crc(data);
183} 183}
184 184
185/* FCS on 3 bytes */ 185/* FCS on 3 bytes */
186static inline u8 __fcs2(u8 *data) 186static inline u8 __fcs2(u8 *data)
187{ 187{
188 return (0xff - rfcomm_crc_table[__crc(data) ^ data[2]]); 188 return 0xff - rfcomm_crc_table[__crc(data) ^ data[2]];
189} 189}
190 190
191/* Check FCS */ 191/* Check FCS */
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index c03d2c3ff03e..89ad25a76202 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -61,30 +61,27 @@ static int port_cost(struct net_device *dev)
61} 61}
62 62
63 63
64/* 64/* Check for port carrier transistions. */
65 * Check for port carrier transistions.
66 * Called from work queue to allow for calling functions that
67 * might sleep (such as speed check), and to debounce.
68 */
69void br_port_carrier_check(struct net_bridge_port *p) 65void br_port_carrier_check(struct net_bridge_port *p)
70{ 66{
71 struct net_device *dev = p->dev; 67 struct net_device *dev = p->dev;
72 struct net_bridge *br = p->br; 68 struct net_bridge *br = p->br;
73 69
74 if (netif_carrier_ok(dev)) 70 if (netif_running(dev) && netif_carrier_ok(dev))
75 p->path_cost = port_cost(dev); 71 p->path_cost = port_cost(dev);
76 72
77 if (netif_running(br->dev)) { 73 if (!netif_running(br->dev))
78 spin_lock_bh(&br->lock); 74 return;
79 if (netif_carrier_ok(dev)) { 75
80 if (p->state == BR_STATE_DISABLED) 76 spin_lock_bh(&br->lock);
81 br_stp_enable_port(p); 77 if (netif_running(dev) && netif_carrier_ok(dev)) {
82 } else { 78 if (p->state == BR_STATE_DISABLED)
83 if (p->state != BR_STATE_DISABLED) 79 br_stp_enable_port(p);
84 br_stp_disable_port(p); 80 } else {
85 } 81 if (p->state != BR_STATE_DISABLED)
86 spin_unlock_bh(&br->lock); 82 br_stp_disable_port(p);
87 } 83 }
84 spin_unlock_bh(&br->lock);
88} 85}
89 86
90static void release_nbp(struct kobject *kobj) 87static void release_nbp(struct kobject *kobj)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 826cd5221536..6d04cfdf4541 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -141,7 +141,7 @@ struct sk_buff *br_handle_frame(struct sk_buff *skb)
141 const unsigned char *dest = eth_hdr(skb)->h_dest; 141 const unsigned char *dest = eth_hdr(skb)->h_dest;
142 int (*rhook)(struct sk_buff *skb); 142 int (*rhook)(struct sk_buff *skb);
143 143
144 if (skb->pkt_type == PACKET_LOOPBACK) 144 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
145 return skb; 145 return skb;
146 146
147 if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) 147 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 137f23259a93..77f7b5fda45a 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -209,6 +209,72 @@ static inline void nf_bridge_update_protocol(struct sk_buff *skb)
209 skb->protocol = htons(ETH_P_PPP_SES); 209 skb->protocol = htons(ETH_P_PPP_SES);
210} 210}
211 211
212/* When handing a packet over to the IP layer
213 * check whether we have a skb that is in the
214 * expected format
215 */
216
217int br_parse_ip_options(struct sk_buff *skb)
218{
219 struct ip_options *opt;
220 struct iphdr *iph;
221 struct net_device *dev = skb->dev;
222 u32 len;
223
224 iph = ip_hdr(skb);
225 opt = &(IPCB(skb)->opt);
226
227 /* Basic sanity checks */
228 if (iph->ihl < 5 || iph->version != 4)
229 goto inhdr_error;
230
231 if (!pskb_may_pull(skb, iph->ihl*4))
232 goto inhdr_error;
233
234 iph = ip_hdr(skb);
235 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
236 goto inhdr_error;
237
238 len = ntohs(iph->tot_len);
239 if (skb->len < len) {
240 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
241 goto drop;
242 } else if (len < (iph->ihl*4))
243 goto inhdr_error;
244
245 if (pskb_trim_rcsum(skb, len)) {
246 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
247 goto drop;
248 }
249
250 /* Zero out the CB buffer if no options present */
251 if (iph->ihl == 5) {
252 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
253 return 0;
254 }
255
256 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
257 if (ip_options_compile(dev_net(dev), opt, skb))
258 goto inhdr_error;
259
260 /* Check correct handling of SRR option */
261 if (unlikely(opt->srr)) {
262 struct in_device *in_dev = __in_dev_get_rcu(dev);
263 if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
264 goto drop;
265
266 if (ip_options_rcv_srr(skb))
267 goto drop;
268 }
269
270 return 0;
271
272inhdr_error:
273 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
274drop:
275 return -1;
276}
277
212/* Fill in the header for fragmented IP packets handled by 278/* Fill in the header for fragmented IP packets handled by
213 * the IPv4 connection tracking code. 279 * the IPv4 connection tracking code.
214 */ 280 */
@@ -549,7 +615,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
549{ 615{
550 struct net_bridge_port *p; 616 struct net_bridge_port *p;
551 struct net_bridge *br; 617 struct net_bridge *br;
552 struct iphdr *iph;
553 __u32 len = nf_bridge_encap_header_len(skb); 618 __u32 len = nf_bridge_encap_header_len(skb);
554 619
555 if (unlikely(!pskb_may_pull(skb, len))) 620 if (unlikely(!pskb_may_pull(skb, len)))
@@ -578,28 +643,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
578 643
579 nf_bridge_pull_encap_header_rcsum(skb); 644 nf_bridge_pull_encap_header_rcsum(skb);
580 645
581 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 646 if (br_parse_ip_options(skb))
582 goto inhdr_error; 647 /* Drop invalid packet */
583 648 goto out;
584 iph = ip_hdr(skb);
585 if (iph->ihl < 5 || iph->version != 4)
586 goto inhdr_error;
587
588 if (!pskb_may_pull(skb, 4 * iph->ihl))
589 goto inhdr_error;
590
591 iph = ip_hdr(skb);
592 if (ip_fast_csum((__u8 *) iph, iph->ihl) != 0)
593 goto inhdr_error;
594
595 len = ntohs(iph->tot_len);
596 if (skb->len < len || len < 4 * iph->ihl)
597 goto inhdr_error;
598
599 pskb_trim_rcsum(skb, len);
600
601 /* BUG: Should really parse the IP options here. */
602 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
603 649
604 nf_bridge_put(skb->nf_bridge); 650 nf_bridge_put(skb->nf_bridge);
605 if (!nf_bridge_alloc(skb)) 651 if (!nf_bridge_alloc(skb))
@@ -614,8 +660,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
614 660
615 return NF_STOLEN; 661 return NF_STOLEN;
616 662
617inhdr_error:
618// IP_INC_STATS_BH(IpInHdrErrors);
619out: 663out:
620 return NF_DROP; 664 return NF_DROP;
621} 665}
@@ -759,14 +803,19 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
759#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE) 803#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
760static int br_nf_dev_queue_xmit(struct sk_buff *skb) 804static int br_nf_dev_queue_xmit(struct sk_buff *skb)
761{ 805{
806 int ret;
807
762 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && 808 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
763 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && 809 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
764 !skb_is_gso(skb)) { 810 !skb_is_gso(skb)) {
765 /* BUG: Should really parse the IP options here. */ 811 if (br_parse_ip_options(skb))
766 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 812 /* Drop invalid packet */
767 return ip_fragment(skb, br_dev_queue_push_xmit); 813 return NF_DROP;
814 ret = ip_fragment(skb, br_dev_queue_push_xmit);
768 } else 815 } else
769 return br_dev_queue_push_xmit(skb); 816 ret = br_dev_queue_push_xmit(skb);
817
818 return ret;
770} 819}
771#else 820#else
772static int br_nf_dev_queue_xmit(struct sk_buff *skb) 821static int br_nf_dev_queue_xmit(struct sk_buff *skb)
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 0b586e9d1378..b99369a055d1 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -9,6 +9,8 @@
9 * and Sakari Ailus <sakari.ailus@nokia.com> 9 * and Sakari Ailus <sakari.ailus@nokia.com>
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
13
12#include <linux/version.h> 14#include <linux/version.h>
13#include <linux/module.h> 15#include <linux/module.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -171,7 +173,7 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
171 net = dev_net(dev); 173 net = dev_net(dev);
172 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 174 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
173 caifd = caif_get(dev); 175 caifd = caif_get(dev);
174 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) 176 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive)
175 return NET_RX_DROP; 177 return NET_RX_DROP;
176 178
177 if (caifd->layer.up->receive(caifd->layer.up, pkt)) 179 if (caifd->layer.up->receive(caifd->layer.up, pkt))
@@ -214,7 +216,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
214 216
215 switch (what) { 217 switch (what) {
216 case NETDEV_REGISTER: 218 case NETDEV_REGISTER:
217 pr_info("CAIF: %s():register %s\n", __func__, dev->name); 219 netdev_info(dev, "register\n");
218 caifd = caif_device_alloc(dev); 220 caifd = caif_device_alloc(dev);
219 if (caifd == NULL) 221 if (caifd == NULL)
220 break; 222 break;
@@ -225,14 +227,13 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
225 break; 227 break;
226 228
227 case NETDEV_UP: 229 case NETDEV_UP:
228 pr_info("CAIF: %s(): up %s\n", __func__, dev->name); 230 netdev_info(dev, "up\n");
229 caifd = caif_get(dev); 231 caifd = caif_get(dev);
230 if (caifd == NULL) 232 if (caifd == NULL)
231 break; 233 break;
232 caifdev = netdev_priv(dev); 234 caifdev = netdev_priv(dev);
233 if (atomic_read(&caifd->state) == NETDEV_UP) { 235 if (atomic_read(&caifd->state) == NETDEV_UP) {
234 pr_info("CAIF: %s():%s already up\n", 236 netdev_info(dev, "already up\n");
235 __func__, dev->name);
236 break; 237 break;
237 } 238 }
238 atomic_set(&caifd->state, what); 239 atomic_set(&caifd->state, what);
@@ -273,7 +274,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
273 caifd = caif_get(dev); 274 caifd = caif_get(dev);
274 if (caifd == NULL) 275 if (caifd == NULL)
275 break; 276 break;
276 pr_info("CAIF: %s():going down %s\n", __func__, dev->name); 277 netdev_info(dev, "going down\n");
277 278
278 if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN || 279 if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
279 atomic_read(&caifd->state) == NETDEV_DOWN) 280 atomic_read(&caifd->state) == NETDEV_DOWN)
@@ -295,11 +296,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
295 caifd = caif_get(dev); 296 caifd = caif_get(dev);
296 if (caifd == NULL) 297 if (caifd == NULL)
297 break; 298 break;
298 pr_info("CAIF: %s(): down %s\n", __func__, dev->name); 299 netdev_info(dev, "down\n");
299 if (atomic_read(&caifd->in_use)) 300 if (atomic_read(&caifd->in_use))
300 pr_warning("CAIF: %s(): " 301 netdev_warn(dev,
301 "Unregistering an active CAIF device: %s\n", 302 "Unregistering an active CAIF device\n");
302 __func__, dev->name);
303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); 303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
304 dev_put(dev); 304 dev_put(dev);
305 atomic_set(&caifd->state, what); 305 atomic_set(&caifd->state, what);
@@ -307,7 +307,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
307 307
308 case NETDEV_UNREGISTER: 308 case NETDEV_UNREGISTER:
309 caifd = caif_get(dev); 309 caifd = caif_get(dev);
310 pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name); 310 netdev_info(dev, "unregister\n");
311 atomic_set(&caifd->state, what); 311 atomic_set(&caifd->state, what);
312 caif_device_destroy(dev); 312 caif_device_destroy(dev);
313 break; 313 break;
@@ -391,7 +391,7 @@ static int __init caif_device_init(void)
391 int result; 391 int result;
392 cfg = cfcnfg_create(); 392 cfg = cfcnfg_create();
393 if (!cfg) { 393 if (!cfg) {
394 pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__); 394 pr_warn("can't create cfcnfg\n");
395 goto err_cfcnfg_create_failed; 395 goto err_cfcnfg_create_failed;
396 } 396 }
397 result = register_pernet_device(&caif_net_ops); 397 result = register_pernet_device(&caif_net_ops);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 8ce904786116..4d918f8f4e67 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/fs.h> 9#include <linux/fs.h>
8#include <linux/init.h> 10#include <linux/init.h>
9#include <linux/module.h> 11#include <linux/module.h>
@@ -28,9 +30,6 @@
28MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
29MODULE_ALIAS_NETPROTO(AF_CAIF); 31MODULE_ALIAS_NETPROTO(AF_CAIF);
30 32
31#define CAIF_DEF_SNDBUF (4096*10)
32#define CAIF_DEF_RCVBUF (4096*100)
33
34/* 33/*
35 * CAIF state is re-using the TCP socket states. 34 * CAIF state is re-using the TCP socket states.
36 * caif_states stored in sk_state reflect the state as reported by 35 * caif_states stored in sk_state reflect the state as reported by
@@ -157,9 +156,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
157 156
158 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 157 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
159 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 158 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
160 trace_printk("CAIF: %s():" 159 pr_debug("sending flow OFF (queue len = %d %d)\n",
161 " sending flow OFF (queue len = %d %d)\n",
162 __func__,
163 atomic_read(&cf_sk->sk.sk_rmem_alloc), 160 atomic_read(&cf_sk->sk.sk_rmem_alloc),
164 sk_rcvbuf_lowwater(cf_sk)); 161 sk_rcvbuf_lowwater(cf_sk));
165 set_rx_flow_off(cf_sk); 162 set_rx_flow_off(cf_sk);
@@ -172,9 +169,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
172 return err; 169 return err;
173 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { 170 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
174 set_rx_flow_off(cf_sk); 171 set_rx_flow_off(cf_sk);
175 trace_printk("CAIF: %s():" 172 pr_debug("sending flow OFF due to rmem_schedule\n");
176 " sending flow OFF due to rmem_schedule\n",
177 __func__);
178 dbfs_atomic_inc(&cnt.num_rx_flow_off); 173 dbfs_atomic_inc(&cnt.num_rx_flow_off);
179 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 174 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
180 } 175 }
@@ -275,8 +270,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
275 break; 270 break;
276 271
277 default: 272 default:
278 pr_debug("CAIF: %s(): Unexpected flow command %d\n", 273 pr_debug("Unexpected flow command %d\n", flow);
279 __func__, flow);
280 } 274 }
281} 275}
282 276
@@ -536,8 +530,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
536 530
537 /* Slight paranoia, probably not needed. */ 531 /* Slight paranoia, probably not needed. */
538 if (unlikely(loopcnt++ > 1000)) { 532 if (unlikely(loopcnt++ > 1000)) {
539 pr_warning("CAIF: %s(): transmit retries failed," 533 pr_warn("transmit retries failed, error = %d\n", ret);
540 " error = %d\n", __func__, ret);
541 break; 534 break;
542 } 535 }
543 536
@@ -902,8 +895,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
902 cf_sk->maxframe = dev->mtu - (headroom + tailroom); 895 cf_sk->maxframe = dev->mtu - (headroom + tailroom);
903 dev_put(dev); 896 dev_put(dev);
904 if (cf_sk->maxframe < 1) { 897 if (cf_sk->maxframe < 1) {
905 pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n", 898 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
906 __func__, dev->mtu);
907 err = -ENODEV; 899 err = -ENODEV;
908 goto out; 900 goto out;
909 } 901 }
@@ -1123,10 +1115,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1123 /* Store the protocol */ 1115 /* Store the protocol */
1124 sk->sk_protocol = (unsigned char) protocol; 1116 sk->sk_protocol = (unsigned char) protocol;
1125 1117
1126 /* Sendbuf dictates the amount of outbound packets not yet sent */
1127 sk->sk_sndbuf = CAIF_DEF_SNDBUF;
1128 sk->sk_rcvbuf = CAIF_DEF_RCVBUF;
1129
1130 /* 1118 /*
1131 * Lock in order to try to stop someone from opening the socket 1119 * Lock in order to try to stop someone from opening the socket
1132 * too early. 1120 * too early.
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 1c29189b344d..41adafd18914 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -3,6 +3,9 @@
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
6#include <linux/kernel.h> 9#include <linux/kernel.h>
7#include <linux/stddef.h> 10#include <linux/stddef.h>
8#include <linux/slab.h> 11#include <linux/slab.h>
@@ -78,7 +81,7 @@ struct cfcnfg *cfcnfg_create(void)
78 /* Initiate this layer */ 81 /* Initiate this layer */
79 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); 82 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
80 if (!this) { 83 if (!this) {
81 pr_warning("CAIF: %s(): Out of memory\n", __func__); 84 pr_warn("Out of memory\n");
82 return NULL; 85 return NULL;
83 } 86 }
84 this->mux = cfmuxl_create(); 87 this->mux = cfmuxl_create();
@@ -106,7 +109,7 @@ struct cfcnfg *cfcnfg_create(void)
106 layer_set_up(this->ctrl, this); 109 layer_set_up(this->ctrl, this);
107 return this; 110 return this;
108out_of_mem: 111out_of_mem:
109 pr_warning("CAIF: %s(): Out of memory\n", __func__); 112 pr_warn("Out of memory\n");
110 kfree(this->mux); 113 kfree(this->mux);
111 kfree(this->ctrl); 114 kfree(this->ctrl);
112 kfree(this); 115 kfree(this);
@@ -194,7 +197,7 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
194 caif_assert(adap_layer != NULL); 197 caif_assert(adap_layer != NULL);
195 channel_id = adap_layer->id; 198 channel_id = adap_layer->id;
196 if (adap_layer->dn == NULL || channel_id == 0) { 199 if (adap_layer->dn == NULL || channel_id == 0) {
197 pr_err("CAIF: %s():adap_layer->id is 0\n", __func__); 200 pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
198 ret = -ENOTCONN; 201 ret = -ENOTCONN;
199 goto end; 202 goto end;
200 } 203 }
@@ -204,9 +207,8 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
204 layer_set_up(servl, NULL); 207 layer_set_up(servl, NULL);
205 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); 208 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
206 if (servl == NULL) { 209 if (servl == NULL) {
207 pr_err("CAIF: %s(): PROTOCOL ERROR " 210 pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)",
208 "- Error removing service_layer Channel_Id(%d)", 211 channel_id);
209 __func__, channel_id);
210 ret = -EINVAL; 212 ret = -EINVAL;
211 goto end; 213 goto end;
212 } 214 }
@@ -216,18 +218,14 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
216 218
217 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid); 219 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
218 if (phyinfo == NULL) { 220 if (phyinfo == NULL) {
219 pr_warning("CAIF: %s(): " 221 pr_warn("No interface to send disconnect to\n");
220 "No interface to send disconnect to\n",
221 __func__);
222 ret = -ENODEV; 222 ret = -ENODEV;
223 goto end; 223 goto end;
224 } 224 }
225 if (phyinfo->id != phyid || 225 if (phyinfo->id != phyid ||
226 phyinfo->phy_layer->id != phyid || 226 phyinfo->phy_layer->id != phyid ||
227 phyinfo->frm_layer->id != phyid) { 227 phyinfo->frm_layer->id != phyid) {
228 pr_err("CAIF: %s(): " 228 pr_err("Inconsistency in phy registration\n");
229 "Inconsistency in phy registration\n",
230 __func__);
231 ret = -EINVAL; 229 ret = -EINVAL;
232 goto end; 230 goto end;
233 } 231 }
@@ -276,21 +274,20 @@ int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
276{ 274{
277 struct cflayer *frml; 275 struct cflayer *frml;
278 if (adap_layer == NULL) { 276 if (adap_layer == NULL) {
279 pr_err("CAIF: %s(): adap_layer is zero", __func__); 277 pr_err("adap_layer is zero\n");
280 return -EINVAL; 278 return -EINVAL;
281 } 279 }
282 if (adap_layer->receive == NULL) { 280 if (adap_layer->receive == NULL) {
283 pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__); 281 pr_err("adap_layer->receive is NULL\n");
284 return -EINVAL; 282 return -EINVAL;
285 } 283 }
286 if (adap_layer->ctrlcmd == NULL) { 284 if (adap_layer->ctrlcmd == NULL) {
287 pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__); 285 pr_err("adap_layer->ctrlcmd == NULL\n");
288 return -EINVAL; 286 return -EINVAL;
289 } 287 }
290 frml = cnfg->phy_layers[param->phyid].frm_layer; 288 frml = cnfg->phy_layers[param->phyid].frm_layer;
291 if (frml == NULL) { 289 if (frml == NULL) {
292 pr_err("CAIF: %s(): Specified PHY type does not exist!", 290 pr_err("Specified PHY type does not exist!\n");
293 __func__);
294 return -ENODEV; 291 return -ENODEV;
295 } 292 }
296 caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); 293 caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id);
@@ -330,9 +327,7 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
330 struct net_device *netdev; 327 struct net_device *netdev;
331 328
332 if (adapt_layer == NULL) { 329 if (adapt_layer == NULL) {
333 pr_debug("CAIF: %s(): link setup response " 330 pr_debug("link setup response but no client exist, send linkdown back\n");
334 "but no client exist, send linkdown back\n",
335 __func__);
336 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); 331 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
337 return; 332 return;
338 } 333 }
@@ -374,13 +369,11 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
374 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); 369 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
375 break; 370 break;
376 default: 371 default:
377 pr_err("CAIF: %s(): Protocol error. " 372 pr_err("Protocol error. Link setup response - unknown channel type\n");
378 "Link setup response - unknown channel type\n",
379 __func__);
380 return; 373 return;
381 } 374 }
382 if (!servicel) { 375 if (!servicel) {
383 pr_warning("CAIF: %s(): Out of memory\n", __func__); 376 pr_warn("Out of memory\n");
384 return; 377 return;
385 } 378 }
386 layer_set_dn(servicel, cnfg->mux); 379 layer_set_dn(servicel, cnfg->mux);
@@ -418,7 +411,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
418 } 411 }
419 } 412 }
420 if (*phyid == 0) { 413 if (*phyid == 0) {
421 pr_err("CAIF: %s(): No Available PHY ID\n", __func__); 414 pr_err("No Available PHY ID\n");
422 return; 415 return;
423 } 416 }
424 417
@@ -427,7 +420,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
427 phy_driver = 420 phy_driver =
428 cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); 421 cfserl_create(CFPHYTYPE_FRAG, *phyid, stx);
429 if (!phy_driver) { 422 if (!phy_driver) {
430 pr_warning("CAIF: %s(): Out of memory\n", __func__); 423 pr_warn("Out of memory\n");
431 return; 424 return;
432 } 425 }
433 426
@@ -436,7 +429,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
436 phy_driver = NULL; 429 phy_driver = NULL;
437 break; 430 break;
438 default: 431 default:
439 pr_err("CAIF: %s(): %d", __func__, phy_type); 432 pr_err("%d\n", phy_type);
440 return; 433 return;
441 break; 434 break;
442 } 435 }
@@ -455,7 +448,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
455 phy_layer->type = phy_type; 448 phy_layer->type = phy_type;
456 frml = cffrml_create(*phyid, fcs); 449 frml = cffrml_create(*phyid, fcs);
457 if (!frml) { 450 if (!frml) {
458 pr_warning("CAIF: %s(): Out of memory\n", __func__); 451 pr_warn("Out of memory\n");
459 return; 452 return;
460 } 453 }
461 cnfg->phy_layers[*phyid].frm_layer = frml; 454 cnfg->phy_layers[*phyid].frm_layer = frml;
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 563145fdc4c3..08f267a109aa 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/spinlock.h> 10#include <linux/spinlock.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -36,7 +38,7 @@ struct cflayer *cfctrl_create(void)
36 struct cfctrl *this = 38 struct cfctrl *this =
37 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC); 39 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
38 if (!this) { 40 if (!this) {
39 pr_warning("CAIF: %s(): Out of memory\n", __func__); 41 pr_warn("Out of memory\n");
40 return NULL; 42 return NULL;
41 } 43 }
42 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 44 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
@@ -132,9 +134,7 @@ struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
132 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 134 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
133 if (cfctrl_req_eq(req, p)) { 135 if (cfctrl_req_eq(req, p)) {
134 if (p != first) 136 if (p != first)
135 pr_warning("CAIF: %s(): Requests are not " 137 pr_warn("Requests are not received in order\n");
136 "received in order\n",
137 __func__);
138 138
139 atomic_set(&ctrl->rsp_seq_no, 139 atomic_set(&ctrl->rsp_seq_no,
140 p->sequence_no); 140 p->sequence_no);
@@ -177,7 +177,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
177 int ret; 177 int ret;
178 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 178 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
179 if (!pkt) { 179 if (!pkt) {
180 pr_warning("CAIF: %s(): Out of memory\n", __func__); 180 pr_warn("Out of memory\n");
181 return; 181 return;
182 } 182 }
183 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 183 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
@@ -189,8 +189,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
189 ret = 189 ret =
190 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 190 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
191 if (ret < 0) { 191 if (ret < 0) {
192 pr_err("CAIF: %s(): Could not transmit enum message\n", 192 pr_err("Could not transmit enum message\n");
193 __func__);
194 cfpkt_destroy(pkt); 193 cfpkt_destroy(pkt);
195 } 194 }
196} 195}
@@ -208,7 +207,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
208 char utility_name[16]; 207 char utility_name[16];
209 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 208 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
210 if (!pkt) { 209 if (!pkt) {
211 pr_warning("CAIF: %s(): Out of memory\n", __func__); 210 pr_warn("Out of memory\n");
212 return -ENOMEM; 211 return -ENOMEM;
213 } 212 }
214 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); 213 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
@@ -253,13 +252,13 @@ int cfctrl_linkup_request(struct cflayer *layer,
253 param->u.utility.paramlen); 252 param->u.utility.paramlen);
254 break; 253 break;
255 default: 254 default:
256 pr_warning("CAIF: %s():Request setup of bad link type = %d\n", 255 pr_warn("Request setup of bad link type = %d\n",
257 __func__, param->linktype); 256 param->linktype);
258 return -EINVAL; 257 return -EINVAL;
259 } 258 }
260 req = kzalloc(sizeof(*req), GFP_KERNEL); 259 req = kzalloc(sizeof(*req), GFP_KERNEL);
261 if (!req) { 260 if (!req) {
262 pr_warning("CAIF: %s(): Out of memory\n", __func__); 261 pr_warn("Out of memory\n");
263 return -ENOMEM; 262 return -ENOMEM;
264 } 263 }
265 req->client_layer = user_layer; 264 req->client_layer = user_layer;
@@ -276,8 +275,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
276 ret = 275 ret =
277 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 276 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
278 if (ret < 0) { 277 if (ret < 0) {
279 pr_err("CAIF: %s(): Could not transmit linksetup request\n", 278 pr_err("Could not transmit linksetup request\n");
280 __func__);
281 cfpkt_destroy(pkt); 279 cfpkt_destroy(pkt);
282 return -ENODEV; 280 return -ENODEV;
283 } 281 }
@@ -291,7 +289,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
291 struct cfctrl *cfctrl = container_obj(layer); 289 struct cfctrl *cfctrl = container_obj(layer);
292 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 290 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
293 if (!pkt) { 291 if (!pkt) {
294 pr_warning("CAIF: %s(): Out of memory\n", __func__); 292 pr_warn("Out of memory\n");
295 return -ENOMEM; 293 return -ENOMEM;
296 } 294 }
297 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); 295 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
@@ -300,8 +298,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
300 ret = 298 ret =
301 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 299 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
302 if (ret < 0) { 300 if (ret < 0) {
303 pr_err("CAIF: %s(): Could not transmit link-down request\n", 301 pr_err("Could not transmit link-down request\n");
304 __func__);
305 cfpkt_destroy(pkt); 302 cfpkt_destroy(pkt);
306 } 303 }
307 return ret; 304 return ret;
@@ -313,7 +310,7 @@ void cfctrl_sleep_req(struct cflayer *layer)
313 struct cfctrl *cfctrl = container_obj(layer); 310 struct cfctrl *cfctrl = container_obj(layer);
314 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 311 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
315 if (!pkt) { 312 if (!pkt) {
316 pr_warning("CAIF: %s(): Out of memory\n", __func__); 313 pr_warn("Out of memory\n");
317 return; 314 return;
318 } 315 }
319 cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP); 316 cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
@@ -330,7 +327,7 @@ void cfctrl_wake_req(struct cflayer *layer)
330 struct cfctrl *cfctrl = container_obj(layer); 327 struct cfctrl *cfctrl = container_obj(layer);
331 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 328 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
332 if (!pkt) { 329 if (!pkt) {
333 pr_warning("CAIF: %s(): Out of memory\n", __func__); 330 pr_warn("Out of memory\n");
334 return; 331 return;
335 } 332 }
336 cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE); 333 cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
@@ -347,7 +344,7 @@ void cfctrl_getstartreason_req(struct cflayer *layer)
347 struct cfctrl *cfctrl = container_obj(layer); 344 struct cfctrl *cfctrl = container_obj(layer);
348 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 345 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
349 if (!pkt) { 346 if (!pkt) {
350 pr_warning("CAIF: %s(): Out of memory\n", __func__); 347 pr_warn("Out of memory\n");
351 return; 348 return;
352 } 349 }
353 cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON); 350 cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
@@ -364,12 +361,11 @@ void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
364 struct cfctrl_request_info *p, *tmp; 361 struct cfctrl_request_info *p, *tmp;
365 struct cfctrl *ctrl = container_obj(layr); 362 struct cfctrl *ctrl = container_obj(layr);
366 spin_lock(&ctrl->info_list_lock); 363 spin_lock(&ctrl->info_list_lock);
367 pr_warning("CAIF: %s(): enter\n", __func__); 364 pr_warn("enter\n");
368 365
369 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 366 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
370 if (p->client_layer == adap_layer) { 367 if (p->client_layer == adap_layer) {
371 pr_warning("CAIF: %s(): cancel req :%d\n", __func__, 368 pr_warn("cancel req :%d\n", p->sequence_no);
372 p->sequence_no);
373 list_del(&p->list); 369 list_del(&p->list);
374 kfree(p); 370 kfree(p);
375 } 371 }
@@ -520,9 +516,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
520 cfpkt_extr_head(pkt, &param, len); 516 cfpkt_extr_head(pkt, &param, len);
521 break; 517 break;
522 default: 518 default:
523 pr_warning("CAIF: %s(): Request setup " 519 pr_warn("Request setup - invalid link type (%d)\n",
524 "- invalid link type (%d)", 520 serv);
525 __func__, serv);
526 goto error; 521 goto error;
527 } 522 }
528 523
@@ -532,9 +527,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
532 527
533 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || 528 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
534 cfpkt_erroneous(pkt)) { 529 cfpkt_erroneous(pkt)) {
535 pr_err("CAIF: %s(): Invalid O/E bit or parse " 530 pr_err("Invalid O/E bit or parse error on CAIF control channel\n");
536 "error on CAIF control channel",
537 __func__);
538 cfctrl->res.reject_rsp(cfctrl->serv.layer.up, 531 cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
539 0, 532 0,
540 req ? req->client_layer 533 req ? req->client_layer
@@ -556,8 +549,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
556 cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid); 549 cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
557 break; 550 break;
558 case CFCTRL_CMD_LINK_ERR: 551 case CFCTRL_CMD_LINK_ERR:
559 pr_err("CAIF: %s(): Frame Error Indication received\n", 552 pr_err("Frame Error Indication received\n");
560 __func__);
561 cfctrl->res.linkerror_ind(); 553 cfctrl->res.linkerror_ind();
562 break; 554 break;
563 case CFCTRL_CMD_ENUM: 555 case CFCTRL_CMD_ENUM:
@@ -576,7 +568,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
576 cfctrl->res.radioset_rsp(); 568 cfctrl->res.radioset_rsp();
577 break; 569 break;
578 default: 570 default:
579 pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__); 571 pr_err("Unrecognized Control Frame\n");
580 goto error; 572 goto error;
581 break; 573 break;
582 } 574 }
@@ -595,8 +587,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
595 case CAIF_CTRLCMD_FLOW_OFF_IND: 587 case CAIF_CTRLCMD_FLOW_OFF_IND:
596 spin_lock(&this->info_list_lock); 588 spin_lock(&this->info_list_lock);
597 if (!list_empty(&this->list)) { 589 if (!list_empty(&this->list)) {
598 pr_debug("CAIF: %s(): Received flow off in " 590 pr_debug("Received flow off in control layer\n");
599 "control layer", __func__);
600 } 591 }
601 spin_unlock(&this->info_list_lock); 592 spin_unlock(&this->info_list_lock);
602 break; 593 break;
@@ -620,7 +611,7 @@ static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
620 if (!ctrl->loop_linkused[linkid]) 611 if (!ctrl->loop_linkused[linkid])
621 goto found; 612 goto found;
622 spin_unlock(&ctrl->loop_linkid_lock); 613 spin_unlock(&ctrl->loop_linkid_lock);
623 pr_err("CAIF: %s(): Out of link-ids\n", __func__); 614 pr_err("Out of link-ids\n");
624 return -EINVAL; 615 return -EINVAL;
625found: 616found:
626 if (!ctrl->loop_linkused[linkid]) 617 if (!ctrl->loop_linkused[linkid])
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 676648cac8dd..496fda9ac66f 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/slab.h> 10#include <linux/slab.h>
9#include <net/caif/caif_layer.h> 11#include <net/caif/caif_layer.h>
@@ -17,7 +19,7 @@ struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
17{ 19{
18 struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 20 struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
19 if (!dbg) { 21 if (!dbg) {
20 pr_warning("CAIF: %s(): Out of memory\n", __func__); 22 pr_warn("Out of memory\n");
21 return NULL; 23 return NULL;
22 } 24 }
23 caif_assert(offsetof(struct cfsrvl, layer) == 0); 25 caif_assert(offsetof(struct cfsrvl, layer) == 0);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index ed9d53aff280..d3ed264ad6c4 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/spinlock.h> 10#include <linux/spinlock.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -26,7 +28,7 @@ struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
26{ 28{
27 struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 29 struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
28 if (!dgm) { 30 if (!dgm) {
29 pr_warning("CAIF: %s(): Out of memory\n", __func__); 31 pr_warn("Out of memory\n");
30 return NULL; 32 return NULL;
31 } 33 }
32 caif_assert(offsetof(struct cfsrvl, layer) == 0); 34 caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -49,14 +51,14 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
49 caif_assert(layr->ctrlcmd != NULL); 51 caif_assert(layr->ctrlcmd != NULL);
50 52
51 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { 53 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
52 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 54 pr_err("Packet is erroneous!\n");
53 cfpkt_destroy(pkt); 55 cfpkt_destroy(pkt);
54 return -EPROTO; 56 return -EPROTO;
55 } 57 }
56 58
57 if ((cmd & DGM_CMD_BIT) == 0) { 59 if ((cmd & DGM_CMD_BIT) == 0) {
58 if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) { 60 if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) {
59 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 61 pr_err("Packet is erroneous!\n");
60 cfpkt_destroy(pkt); 62 cfpkt_destroy(pkt);
61 return -EPROTO; 63 return -EPROTO;
62 } 64 }
@@ -75,8 +77,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
75 return 0; 77 return 0;
76 default: 78 default:
77 cfpkt_destroy(pkt); 79 cfpkt_destroy(pkt);
78 pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n", 80 pr_info("Unknown datagram control %d (0x%x)\n", cmd, cmd);
79 __func__, cmd, cmd);
80 return -EPROTO; 81 return -EPROTO;
81 } 82 }
82} 83}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index e86a4ca3b217..a445043931ae 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -6,6 +6,8 @@
6 * License terms: GNU General Public License (GPL) version 2 6 * License terms: GNU General Public License (GPL) version 2
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
10
9#include <linux/stddef.h> 11#include <linux/stddef.h>
10#include <linux/spinlock.h> 12#include <linux/spinlock.h>
11#include <linux/slab.h> 13#include <linux/slab.h>
@@ -32,7 +34,7 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
32{ 34{
33 struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC); 35 struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
34 if (!this) { 36 if (!this) {
35 pr_warning("CAIF: %s(): Out of memory\n", __func__); 37 pr_warn("Out of memory\n");
36 return NULL; 38 return NULL;
37 } 39 }
38 caif_assert(offsetof(struct cffrml, layer) == 0); 40 caif_assert(offsetof(struct cffrml, layer) == 0);
@@ -83,7 +85,7 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
83 85
84 if (cfpkt_setlen(pkt, len) < 0) { 86 if (cfpkt_setlen(pkt, len) < 0) {
85 ++cffrml_rcv_error; 87 ++cffrml_rcv_error;
86 pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len); 88 pr_err("Framing length error (%d)\n", len);
87 cfpkt_destroy(pkt); 89 cfpkt_destroy(pkt);
88 return -EPROTO; 90 return -EPROTO;
89 } 91 }
@@ -99,14 +101,14 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
99 cfpkt_add_trail(pkt, &tmp, 2); 101 cfpkt_add_trail(pkt, &tmp, 2);
100 ++cffrml_rcv_error; 102 ++cffrml_rcv_error;
101 ++cffrml_rcv_checsum_error; 103 ++cffrml_rcv_checsum_error;
102 pr_info("CAIF: %s(): Frame checksum error " 104 pr_info("Frame checksum error (0x%x != 0x%x)\n",
103 "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks); 105 hdrchks, pktchks);
104 return -EILSEQ; 106 return -EILSEQ;
105 } 107 }
106 } 108 }
107 if (cfpkt_erroneous(pkt)) { 109 if (cfpkt_erroneous(pkt)) {
108 ++cffrml_rcv_error; 110 ++cffrml_rcv_error;
109 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 111 pr_err("Packet is erroneous!\n");
110 cfpkt_destroy(pkt); 112 cfpkt_destroy(pkt);
111 return -EPROTO; 113 return -EPROTO;
112 } 114 }
@@ -132,7 +134,7 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
132 cfpkt_add_head(pkt, &tmp, 2); 134 cfpkt_add_head(pkt, &tmp, 2);
133 cfpkt_info(pkt)->hdr_len += 2; 135 cfpkt_info(pkt)->hdr_len += 2;
134 if (cfpkt_erroneous(pkt)) { 136 if (cfpkt_erroneous(pkt)) {
135 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 137 pr_err("Packet is erroneous!\n");
136 return -EPROTO; 138 return -EPROTO;
137 } 139 }
138 ret = layr->dn->transmit(layr->dn, pkt); 140 ret = layr->dn->transmit(layr->dn, pkt);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 80c8d332b258..46f34b2e0478 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -3,6 +3,9 @@
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com 3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
6#include <linux/stddef.h> 9#include <linux/stddef.h>
7#include <linux/spinlock.h> 10#include <linux/spinlock.h>
8#include <linux/slab.h> 11#include <linux/slab.h>
@@ -190,7 +193,7 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
190 u8 id; 193 u8 id;
191 struct cflayer *up; 194 struct cflayer *up;
192 if (cfpkt_extr_head(pkt, &id, 1) < 0) { 195 if (cfpkt_extr_head(pkt, &id, 1) < 0) {
193 pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__); 196 pr_err("erroneous Caif Packet\n");
194 cfpkt_destroy(pkt); 197 cfpkt_destroy(pkt);
195 return -EPROTO; 198 return -EPROTO;
196 } 199 }
@@ -199,8 +202,8 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
199 up = get_up(muxl, id); 202 up = get_up(muxl, id);
200 spin_unlock(&muxl->receive_lock); 203 spin_unlock(&muxl->receive_lock);
201 if (up == NULL) { 204 if (up == NULL) {
202 pr_info("CAIF: %s():Received data on unknown link ID = %d " 205 pr_info("Received data on unknown link ID = %d (0x%x) up == NULL",
203 "(0x%x) up == NULL", __func__, id, id); 206 id, id);
204 cfpkt_destroy(pkt); 207 cfpkt_destroy(pkt);
205 /* 208 /*
206 * Don't return ERROR, since modem misbehaves and sends out 209 * Don't return ERROR, since modem misbehaves and sends out
@@ -223,9 +226,8 @@ static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
223 struct caif_payload_info *info = cfpkt_info(pkt); 226 struct caif_payload_info *info = cfpkt_info(pkt);
224 dn = get_dn(muxl, cfpkt_info(pkt)->dev_info); 227 dn = get_dn(muxl, cfpkt_info(pkt)->dev_info);
225 if (dn == NULL) { 228 if (dn == NULL) {
226 pr_warning("CAIF: %s(): Send data on unknown phy " 229 pr_warn("Send data on unknown phy ID = %d (0x%x)\n",
227 "ID = %d (0x%x)\n", 230 info->dev_info->id, info->dev_info->id);
228 __func__, info->dev_info->id, info->dev_info->id);
229 return -ENOTCONN; 231 return -ENOTCONN;
230 } 232 }
231 info->hdr_len += 1; 233 info->hdr_len += 1;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index c49a6695793a..d7e865e2ff65 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/string.h> 9#include <linux/string.h>
8#include <linux/skbuff.h> 10#include <linux/skbuff.h>
9#include <linux/hardirq.h> 11#include <linux/hardirq.h>
@@ -12,11 +14,12 @@
12#define PKT_PREFIX 48 14#define PKT_PREFIX 48
13#define PKT_POSTFIX 2 15#define PKT_POSTFIX 2
14#define PKT_LEN_WHEN_EXTENDING 128 16#define PKT_LEN_WHEN_EXTENDING 128
15#define PKT_ERROR(pkt, errmsg) do { \ 17#define PKT_ERROR(pkt, errmsg) \
16 cfpkt_priv(pkt)->erronous = true; \ 18do { \
17 skb_reset_tail_pointer(&pkt->skb); \ 19 cfpkt_priv(pkt)->erronous = true; \
18 pr_warning("CAIF: " errmsg);\ 20 skb_reset_tail_pointer(&pkt->skb); \
19 } while (0) 21 pr_warn(errmsg); \
22} while (0)
20 23
21struct cfpktq { 24struct cfpktq {
22 struct sk_buff_head head; 25 struct sk_buff_head head;
@@ -130,13 +133,13 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
130 return -EPROTO; 133 return -EPROTO;
131 134
132 if (unlikely(len > skb->len)) { 135 if (unlikely(len > skb->len)) {
133 PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n"); 136 PKT_ERROR(pkt, "read beyond end of packet\n");
134 return -EPROTO; 137 return -EPROTO;
135 } 138 }
136 139
137 if (unlikely(len > skb_headlen(skb))) { 140 if (unlikely(len > skb_headlen(skb))) {
138 if (unlikely(skb_linearize(skb) != 0)) { 141 if (unlikely(skb_linearize(skb) != 0)) {
139 PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n"); 142 PKT_ERROR(pkt, "linearize failed\n");
140 return -EPROTO; 143 return -EPROTO;
141 } 144 }
142 } 145 }
@@ -156,11 +159,11 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
156 return -EPROTO; 159 return -EPROTO;
157 160
158 if (unlikely(skb_linearize(skb) != 0)) { 161 if (unlikely(skb_linearize(skb) != 0)) {
159 PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n"); 162 PKT_ERROR(pkt, "linearize failed\n");
160 return -EPROTO; 163 return -EPROTO;
161 } 164 }
162 if (unlikely(skb->data + len > skb_tail_pointer(skb))) { 165 if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
163 PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n"); 166 PKT_ERROR(pkt, "read beyond end of packet\n");
164 return -EPROTO; 167 return -EPROTO;
165 } 168 }
166 from = skb_tail_pointer(skb) - len; 169 from = skb_tail_pointer(skb) - len;
@@ -202,7 +205,7 @@ int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
202 205
203 /* Make sure data is writable */ 206 /* Make sure data is writable */
204 if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) { 207 if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) {
205 PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n"); 208 PKT_ERROR(pkt, "cow failed\n");
206 return -EPROTO; 209 return -EPROTO;
207 } 210 }
208 /* 211 /*
@@ -211,8 +214,7 @@ int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
211 * lengths of the top SKB. 214 * lengths of the top SKB.
212 */ 215 */
213 if (lastskb != skb) { 216 if (lastskb != skb) {
214 pr_warning("CAIF: %s(): Packet is non-linear\n", 217 pr_warn("Packet is non-linear\n");
215 __func__);
216 skb->len += len; 218 skb->len += len;
217 skb->data_len += len; 219 skb->data_len += len;
218 } 220 }
@@ -242,14 +244,14 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
242 if (unlikely(is_erronous(pkt))) 244 if (unlikely(is_erronous(pkt)))
243 return -EPROTO; 245 return -EPROTO;
244 if (unlikely(skb_headroom(skb) < len)) { 246 if (unlikely(skb_headroom(skb) < len)) {
245 PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n"); 247 PKT_ERROR(pkt, "no headroom\n");
246 return -EPROTO; 248 return -EPROTO;
247 } 249 }
248 250
249 /* Make sure data is writable */ 251 /* Make sure data is writable */
250 ret = skb_cow_data(skb, 0, &lastskb); 252 ret = skb_cow_data(skb, 0, &lastskb);
251 if (unlikely(ret < 0)) { 253 if (unlikely(ret < 0)) {
252 PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); 254 PKT_ERROR(pkt, "cow failed\n");
253 return ret; 255 return ret;
254 } 256 }
255 257
@@ -283,7 +285,7 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt,
283 if (unlikely(is_erronous(pkt))) 285 if (unlikely(is_erronous(pkt)))
284 return -EPROTO; 286 return -EPROTO;
285 if (unlikely(skb_linearize(&pkt->skb) != 0)) { 287 if (unlikely(skb_linearize(&pkt->skb) != 0)) {
286 PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n"); 288 PKT_ERROR(pkt, "linearize failed\n");
287 return -EPROTO; 289 return -EPROTO;
288 } 290 }
289 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); 291 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
@@ -309,7 +311,7 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
309 311
310 /* Need to expand SKB */ 312 /* Need to expand SKB */
311 if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len))) 313 if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
312 PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n"); 314 PKT_ERROR(pkt, "skb_pad_trail failed\n");
313 315
314 return cfpkt_getlen(pkt); 316 return cfpkt_getlen(pkt);
315} 317}
@@ -380,8 +382,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
380 return NULL; 382 return NULL;
381 383
382 if (skb->data + pos > skb_tail_pointer(skb)) { 384 if (skb->data + pos > skb_tail_pointer(skb)) {
383 PKT_ERROR(pkt, 385 PKT_ERROR(pkt, "trying to split beyond end of packet\n");
384 "cfpkt_split: trying to split beyond end of packet");
385 return NULL; 386 return NULL;
386 } 387 }
387 388
@@ -455,17 +456,17 @@ int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
455 return -EPROTO; 456 return -EPROTO;
456 /* Make sure SKB is writable */ 457 /* Make sure SKB is writable */
457 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { 458 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
458 PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n"); 459 PKT_ERROR(pkt, "skb_cow_data failed\n");
459 return -EPROTO; 460 return -EPROTO;
460 } 461 }
461 462
462 if (unlikely(skb_linearize(skb) != 0)) { 463 if (unlikely(skb_linearize(skb) != 0)) {
463 PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n"); 464 PKT_ERROR(pkt, "linearize failed\n");
464 return -EPROTO; 465 return -EPROTO;
465 } 466 }
466 467
467 if (unlikely(skb_tailroom(skb) < buflen)) { 468 if (unlikely(skb_tailroom(skb) < buflen)) {
468 PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n"); 469 PKT_ERROR(pkt, "buffer too short - failed\n");
469 return -EPROTO; 470 return -EPROTO;
470 } 471 }
471 472
@@ -483,14 +484,13 @@ int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
483 return -EPROTO; 484 return -EPROTO;
484 485
485 if (unlikely(buflen > skb->len)) { 486 if (unlikely(buflen > skb->len)) {
486 PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large " 487 PKT_ERROR(pkt, "buflen too large - failed\n");
487 "- failed\n");
488 return -EPROTO; 488 return -EPROTO;
489 } 489 }
490 490
491 if (unlikely(buflen > skb_headlen(skb))) { 491 if (unlikely(buflen > skb_headlen(skb))) {
492 if (unlikely(skb_linearize(skb) != 0)) { 492 if (unlikely(skb_linearize(skb) != 0)) {
493 PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n"); 493 PKT_ERROR(pkt, "linearize failed\n");
494 return -EPROTO; 494 return -EPROTO;
495 } 495 }
496 } 496 }
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 9a699242d104..bde8481e8d25 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/spinlock.h> 10#include <linux/spinlock.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -48,7 +50,7 @@ struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
48 kzalloc(sizeof(struct cfrfml), GFP_ATOMIC); 50 kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
49 51
50 if (!this) { 52 if (!this) {
51 pr_warning("CAIF: %s(): Out of memory\n", __func__); 53 pr_warn("Out of memory\n");
52 return NULL; 54 return NULL;
53 } 55 }
54 56
@@ -178,9 +180,7 @@ out:
178 cfpkt_destroy(rfml->incomplete_frm); 180 cfpkt_destroy(rfml->incomplete_frm);
179 rfml->incomplete_frm = NULL; 181 rfml->incomplete_frm = NULL;
180 182
181 pr_info("CAIF: %s(): " 183 pr_info("Connection error %d triggered on RFM link\n", err);
182 "Connection error %d triggered on RFM link\n",
183 __func__, err);
184 184
185 /* Trigger connection error upon failure.*/ 185 /* Trigger connection error upon failure.*/
186 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 186 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
@@ -280,9 +280,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
280out: 280out:
281 281
282 if (err != 0) { 282 if (err != 0) {
283 pr_info("CAIF: %s(): " 283 pr_info("Connection error %d triggered on RFM link\n", err);
284 "Connection error %d triggered on RFM link\n",
285 __func__, err);
286 /* Trigger connection error upon failure.*/ 284 /* Trigger connection error upon failure.*/
287 285
288 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 286 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index a11fbd68a13d..9297f7dea9d8 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/spinlock.h> 10#include <linux/spinlock.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -34,7 +36,7 @@ struct cflayer *cfserl_create(int type, int instance, bool use_stx)
34{ 36{
35 struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC); 37 struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
36 if (!this) { 38 if (!this) {
37 pr_warning("CAIF: %s(): Out of memory\n", __func__); 39 pr_warn("Out of memory\n");
38 return NULL; 40 return NULL;
39 } 41 }
40 caif_assert(offsetof(struct cfserl, layer) == 0); 42 caif_assert(offsetof(struct cfserl, layer) == 0);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index f40939a91211..ab5e542526bf 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/types.h> 10#include <linux/types.h>
9#include <linux/errno.h> 11#include <linux/errno.h>
@@ -79,8 +81,7 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
79 layr->up->ctrlcmd(layr->up, ctrl, phyid); 81 layr->up->ctrlcmd(layr->up, ctrl, phyid);
80 break; 82 break;
81 default: 83 default:
82 pr_warning("CAIF: %s(): " 84 pr_warn("Unexpected ctrl in cfsrvl (%d)\n", ctrl);
83 "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl);
84 /* We have both modem and phy flow on, send flow on */ 85 /* We have both modem and phy flow on, send flow on */
85 layr->up->ctrlcmd(layr->up, ctrl, phyid); 86 layr->up->ctrlcmd(layr->up, ctrl, phyid);
86 service->phy_flow_on = true; 87 service->phy_flow_on = true;
@@ -107,14 +108,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
107 u8 flow_on = SRVL_FLOW_ON; 108 u8 flow_on = SRVL_FLOW_ON;
108 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 109 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
109 if (!pkt) { 110 if (!pkt) {
110 pr_warning("CAIF: %s(): Out of memory\n", 111 pr_warn("Out of memory\n");
111 __func__);
112 return -ENOMEM; 112 return -ENOMEM;
113 } 113 }
114 114
115 if (cfpkt_add_head(pkt, &flow_on, 1) < 0) { 115 if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
116 pr_err("CAIF: %s(): Packet is erroneous!\n", 116 pr_err("Packet is erroneous!\n");
117 __func__);
118 cfpkt_destroy(pkt); 117 cfpkt_destroy(pkt);
119 return -EPROTO; 118 return -EPROTO;
120 } 119 }
@@ -131,14 +130,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
131 u8 flow_off = SRVL_FLOW_OFF; 130 u8 flow_off = SRVL_FLOW_OFF;
132 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); 131 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
133 if (!pkt) { 132 if (!pkt) {
134 pr_warning("CAIF: %s(): Out of memory\n", 133 pr_warn("Out of memory\n");
135 __func__);
136 return -ENOMEM; 134 return -ENOMEM;
137 } 135 }
138 136
139 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { 137 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
140 pr_err("CAIF: %s(): Packet is erroneous!\n", 138 pr_err("Packet is erroneous!\n");
141 __func__);
142 cfpkt_destroy(pkt); 139 cfpkt_destroy(pkt);
143 return -EPROTO; 140 return -EPROTO;
144 } 141 }
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 02795aff57a4..efad410e4c82 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/types.h> 10#include <linux/types.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -26,7 +28,7 @@ struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
26{ 28{
27 struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 29 struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
28 if (!util) { 30 if (!util) {
29 pr_warning("CAIF: %s(): Out of memory\n", __func__); 31 pr_warn("Out of memory\n");
30 return NULL; 32 return NULL;
31 } 33 }
32 caif_assert(offsetof(struct cfsrvl, layer) == 0); 34 caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -47,7 +49,7 @@ static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
47 caif_assert(layr->up->receive != NULL); 49 caif_assert(layr->up->receive != NULL);
48 caif_assert(layr->up->ctrlcmd != NULL); 50 caif_assert(layr->up->ctrlcmd != NULL);
49 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { 51 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
50 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 52 pr_err("Packet is erroneous!\n");
51 cfpkt_destroy(pkt); 53 cfpkt_destroy(pkt);
52 return -EPROTO; 54 return -EPROTO;
53 } 55 }
@@ -64,16 +66,14 @@ static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
64 cfpkt_destroy(pkt); 66 cfpkt_destroy(pkt);
65 return 0; 67 return 0;
66 case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */ 68 case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */
67 pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n", 69 pr_err("REMOTE SHUTDOWN REQUEST RECEIVED\n");
68 __func__);
69 layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0); 70 layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0);
70 service->open = false; 71 service->open = false;
71 cfpkt_destroy(pkt); 72 cfpkt_destroy(pkt);
72 return 0; 73 return 0;
73 default: 74 default:
74 cfpkt_destroy(pkt); 75 cfpkt_destroy(pkt);
75 pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n", 76 pr_warn("Unknown service control %d (0x%x)\n", cmd, cmd);
76 __func__, cmd, cmd);
77 return -EPROTO; 77 return -EPROTO;
78 } 78 }
79} 79}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 77cc09faac9a..3b425b189a99 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/stddef.h> 9#include <linux/stddef.h>
8#include <linux/slab.h> 10#include <linux/slab.h>
9#include <net/caif/caif_layer.h> 11#include <net/caif/caif_layer.h>
@@ -25,7 +27,7 @@ struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
25{ 27{
26 struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 28 struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
27 if (!vei) { 29 if (!vei) {
28 pr_warning("CAIF: %s(): Out of memory\n", __func__); 30 pr_warn("Out of memory\n");
29 return NULL; 31 return NULL;
30 } 32 }
31 caif_assert(offsetof(struct cfsrvl, layer) == 0); 33 caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -47,7 +49,7 @@ static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
47 49
48 50
49 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) { 51 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
50 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 52 pr_err("Packet is erroneous!\n");
51 cfpkt_destroy(pkt); 53 cfpkt_destroy(pkt);
52 return -EPROTO; 54 return -EPROTO;
53 } 55 }
@@ -67,8 +69,7 @@ static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
67 cfpkt_destroy(pkt); 69 cfpkt_destroy(pkt);
68 return 0; 70 return 0;
69 default: /* SET RS232 PIN */ 71 default: /* SET RS232 PIN */
70 pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n", 72 pr_warn("Unknown VEI control packet %d (0x%x)!\n", cmd, cmd);
71 __func__, cmd, cmd);
72 cfpkt_destroy(pkt); 73 cfpkt_destroy(pkt);
73 return -EPROTO; 74 return -EPROTO;
74 } 75 }
@@ -86,7 +87,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
86 caif_assert(layr->dn->transmit != NULL); 87 caif_assert(layr->dn->transmit != NULL);
87 88
88 if (cfpkt_add_head(pkt, &tmp, 1) < 0) { 89 if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
89 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 90 pr_err("Packet is erroneous!\n");
90 return -EPROTO; 91 return -EPROTO;
91 } 92 }
92 93
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index ada6ee2d48f5..bf6fef2a0eff 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -4,6 +4,8 @@
4 * License terms: GNU General Public License (GPL) version 2 4 * License terms: GNU General Public License (GPL) version 2
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
8
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/types.h> 10#include <linux/types.h>
9#include <linux/slab.h> 11#include <linux/slab.h>
@@ -21,7 +23,7 @@ struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
21{ 23{
22 struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 24 struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
23 if (!vid) { 25 if (!vid) {
24 pr_warning("CAIF: %s(): Out of memory\n", __func__); 26 pr_warn("Out of memory\n");
25 return NULL; 27 return NULL;
26 } 28 }
27 caif_assert(offsetof(struct cfsrvl, layer) == 0); 29 caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -38,7 +40,7 @@ static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt)
38{ 40{
39 u32 videoheader; 41 u32 videoheader;
40 if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) { 42 if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) {
41 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 43 pr_err("Packet is erroneous!\n");
42 cfpkt_destroy(pkt); 44 cfpkt_destroy(pkt);
43 return -EPROTO; 45 return -EPROTO;
44 } 46 }
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 4293e190ec53..84a422c98941 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -5,6 +5,8 @@
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 */ 6 */
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
9
8#include <linux/version.h> 10#include <linux/version.h>
9#include <linux/fs.h> 11#include <linux/fs.h>
10#include <linux/init.h> 12#include <linux/init.h>
@@ -28,9 +30,6 @@
28#define CONNECT_TIMEOUT (5 * HZ) 30#define CONNECT_TIMEOUT (5 * HZ)
29#define CAIF_NET_DEFAULT_QUEUE_LEN 500 31#define CAIF_NET_DEFAULT_QUEUE_LEN 500
30 32
31#undef pr_debug
32#define pr_debug pr_warning
33
34/*This list is protected by the rtnl lock. */ 33/*This list is protected by the rtnl lock. */
35static LIST_HEAD(chnl_net_list); 34static LIST_HEAD(chnl_net_list);
36 35
@@ -142,8 +141,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
142 int phyid) 141 int phyid)
143{ 142{
144 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); 143 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
145 pr_debug("CAIF: %s(): NET flowctrl func called flow: %s\n", 144 pr_debug("NET flowctrl func called flow: %s\n",
146 __func__,
147 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" : 145 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
148 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" : 146 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" :
149 flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" : 147 flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
@@ -196,12 +194,12 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
196 priv = netdev_priv(dev); 194 priv = netdev_priv(dev);
197 195
198 if (skb->len > priv->netdev->mtu) { 196 if (skb->len > priv->netdev->mtu) {
199 pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__); 197 pr_warn("Size of skb exceeded MTU\n");
200 return -ENOSPC; 198 return -ENOSPC;
201 } 199 }
202 200
203 if (!priv->flowenabled) { 201 if (!priv->flowenabled) {
204 pr_debug("CAIF: %s(): dropping packets flow off\n", __func__); 202 pr_debug("dropping packets flow off\n");
205 return NETDEV_TX_BUSY; 203 return NETDEV_TX_BUSY;
206 } 204 }
207 205
@@ -237,7 +235,7 @@ static int chnl_net_open(struct net_device *dev)
237 ASSERT_RTNL(); 235 ASSERT_RTNL();
238 priv = netdev_priv(dev); 236 priv = netdev_priv(dev);
239 if (!priv) { 237 if (!priv) {
240 pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__); 238 pr_debug("chnl_net_open: no priv\n");
241 return -ENODEV; 239 return -ENODEV;
242 } 240 }
243 241
@@ -246,18 +244,17 @@ static int chnl_net_open(struct net_device *dev)
246 result = caif_connect_client(&priv->conn_req, &priv->chnl, 244 result = caif_connect_client(&priv->conn_req, &priv->chnl,
247 &llifindex, &headroom, &tailroom); 245 &llifindex, &headroom, &tailroom);
248 if (result != 0) { 246 if (result != 0) {
249 pr_debug("CAIF: %s(): err: " 247 pr_debug("err: "
250 "Unable to register and open device," 248 "Unable to register and open device,"
251 " Err:%d\n", 249 " Err:%d\n",
252 __func__, 250 result);
253 result);
254 goto error; 251 goto error;
255 } 252 }
256 253
257 lldev = dev_get_by_index(dev_net(dev), llifindex); 254 lldev = dev_get_by_index(dev_net(dev), llifindex);
258 255
259 if (lldev == NULL) { 256 if (lldev == NULL) {
260 pr_debug("CAIF: %s(): no interface?\n", __func__); 257 pr_debug("no interface?\n");
261 result = -ENODEV; 258 result = -ENODEV;
262 goto error; 259 goto error;
263 } 260 }
@@ -279,9 +276,7 @@ static int chnl_net_open(struct net_device *dev)
279 dev_put(lldev); 276 dev_put(lldev);
280 277
281 if (mtu < 100) { 278 if (mtu < 100) {
282 pr_warning("CAIF: %s(): " 279 pr_warn("CAIF Interface MTU too small (%d)\n", mtu);
283 "CAIF Interface MTU too small (%d)\n",
284 __func__, mtu);
285 result = -ENODEV; 280 result = -ENODEV;
286 goto error; 281 goto error;
287 } 282 }
@@ -296,33 +291,32 @@ static int chnl_net_open(struct net_device *dev)
296 rtnl_lock(); 291 rtnl_lock();
297 292
298 if (result == -ERESTARTSYS) { 293 if (result == -ERESTARTSYS) {
299 pr_debug("CAIF: %s(): wait_event_interruptible" 294 pr_debug("wait_event_interruptible woken by a signal\n");
300 " woken by a signal\n", __func__);
301 result = -ERESTARTSYS; 295 result = -ERESTARTSYS;
302 goto error; 296 goto error;
303 } 297 }
304 298
305 if (result == 0) { 299 if (result == 0) {
306 pr_debug("CAIF: %s(): connect timeout\n", __func__); 300 pr_debug("connect timeout\n");
307 caif_disconnect_client(&priv->chnl); 301 caif_disconnect_client(&priv->chnl);
308 priv->state = CAIF_DISCONNECTED; 302 priv->state = CAIF_DISCONNECTED;
309 pr_debug("CAIF: %s(): state disconnected\n", __func__); 303 pr_debug("state disconnected\n");
310 result = -ETIMEDOUT; 304 result = -ETIMEDOUT;
311 goto error; 305 goto error;
312 } 306 }
313 307
314 if (priv->state != CAIF_CONNECTED) { 308 if (priv->state != CAIF_CONNECTED) {
315 pr_debug("CAIF: %s(): connect failed\n", __func__); 309 pr_debug("connect failed\n");
316 result = -ECONNREFUSED; 310 result = -ECONNREFUSED;
317 goto error; 311 goto error;
318 } 312 }
319 pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__); 313 pr_debug("CAIF Netdevice connected\n");
320 return 0; 314 return 0;
321 315
322error: 316error:
323 caif_disconnect_client(&priv->chnl); 317 caif_disconnect_client(&priv->chnl);
324 priv->state = CAIF_DISCONNECTED; 318 priv->state = CAIF_DISCONNECTED;
325 pr_debug("CAIF: %s(): state disconnected\n", __func__); 319 pr_debug("state disconnected\n");
326 return result; 320 return result;
327 321
328} 322}
@@ -413,7 +407,7 @@ static void caif_netlink_parms(struct nlattr *data[],
413 struct caif_connect_request *conn_req) 407 struct caif_connect_request *conn_req)
414{ 408{
415 if (!data) { 409 if (!data) {
416 pr_warning("CAIF: %s: no params data found\n", __func__); 410 pr_warn("no params data found\n");
417 return; 411 return;
418 } 412 }
419 if (data[IFLA_CAIF_IPV4_CONNID]) 413 if (data[IFLA_CAIF_IPV4_CONNID])
@@ -442,8 +436,7 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
442 436
443 ret = register_netdevice(dev); 437 ret = register_netdevice(dev);
444 if (ret) 438 if (ret)
445 pr_warning("CAIF: %s(): device rtml registration failed\n", 439 pr_warn("device rtml registration failed\n");
446 __func__);
447 return ret; 440 return ret;
448} 441}
449 442
diff --git a/net/can/raw.c b/net/can/raw.c
index a10e3338f084..7d77e67e57af 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -647,12 +647,12 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
647 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 647 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
648 if (err < 0) 648 if (err < 0)
649 goto free_skb; 649 goto free_skb;
650 err = sock_tx_timestamp(msg, sk, skb_tx(skb)); 650 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
651 if (err < 0) 651 if (err < 0)
652 goto free_skb; 652 goto free_skb;
653 653
654 /* to be able to check the received tx sock reference in raw_rcv() */ 654 /* to be able to check the received tx sock reference in raw_rcv() */
655 skb_tx(skb)->prevent_sk_orphan = 1; 655 skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
656 656
657 skb->dev = dev; 657 skb->dev = dev;
658 skb->sk = sk; 658 skb->sk = sk;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 251997a95483..4df1b7a6c1bf 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -746,13 +746,12 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
746 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 746 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
747 mask |= POLLERR; 747 mask |= POLLERR;
748 if (sk->sk_shutdown & RCV_SHUTDOWN) 748 if (sk->sk_shutdown & RCV_SHUTDOWN)
749 mask |= POLLRDHUP; 749 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
750 if (sk->sk_shutdown == SHUTDOWN_MASK) 750 if (sk->sk_shutdown == SHUTDOWN_MASK)
751 mask |= POLLHUP; 751 mask |= POLLHUP;
752 752
753 /* readable? */ 753 /* readable? */
754 if (!skb_queue_empty(&sk->sk_receive_queue) || 754 if (!skb_queue_empty(&sk->sk_receive_queue))
755 (sk->sk_shutdown & RCV_SHUTDOWN))
756 mask |= POLLIN | POLLRDNORM; 755 mask |= POLLIN | POLLRDNORM;
757 756
758 /* Connection-based need to check for termination and startup */ 757 /* Connection-based need to check for termination and startup */
diff --git a/net/core/dev.c b/net/core/dev.c
index 660dd41aaaa6..42b200fdf12e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -129,6 +129,7 @@
129#include <linux/random.h> 129#include <linux/random.h>
130#include <trace/events/napi.h> 130#include <trace/events/napi.h>
131#include <linux/pci.h> 131#include <linux/pci.h>
132#include <linux/inetdevice.h>
132 133
133#include "net-sysfs.h" 134#include "net-sysfs.h"
134 135
@@ -371,6 +372,14 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
371 * --ANK (980803) 372 * --ANK (980803)
372 */ 373 */
373 374
375static inline struct list_head *ptype_head(const struct packet_type *pt)
376{
377 if (pt->type == htons(ETH_P_ALL))
378 return &ptype_all;
379 else
380 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
381}
382
374/** 383/**
375 * dev_add_pack - add packet handler 384 * dev_add_pack - add packet handler
376 * @pt: packet type declaration 385 * @pt: packet type declaration
@@ -386,16 +395,11 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
386 395
387void dev_add_pack(struct packet_type *pt) 396void dev_add_pack(struct packet_type *pt)
388{ 397{
389 int hash; 398 struct list_head *head = ptype_head(pt);
390 399
391 spin_lock_bh(&ptype_lock); 400 spin_lock(&ptype_lock);
392 if (pt->type == htons(ETH_P_ALL)) 401 list_add_rcu(&pt->list, head);
393 list_add_rcu(&pt->list, &ptype_all); 402 spin_unlock(&ptype_lock);
394 else {
395 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
396 list_add_rcu(&pt->list, &ptype_base[hash]);
397 }
398 spin_unlock_bh(&ptype_lock);
399} 403}
400EXPORT_SYMBOL(dev_add_pack); 404EXPORT_SYMBOL(dev_add_pack);
401 405
@@ -414,15 +418,10 @@ EXPORT_SYMBOL(dev_add_pack);
414 */ 418 */
415void __dev_remove_pack(struct packet_type *pt) 419void __dev_remove_pack(struct packet_type *pt)
416{ 420{
417 struct list_head *head; 421 struct list_head *head = ptype_head(pt);
418 struct packet_type *pt1; 422 struct packet_type *pt1;
419 423
420 spin_lock_bh(&ptype_lock); 424 spin_lock(&ptype_lock);
421
422 if (pt->type == htons(ETH_P_ALL))
423 head = &ptype_all;
424 else
425 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
426 425
427 list_for_each_entry(pt1, head, list) { 426 list_for_each_entry(pt1, head, list) {
428 if (pt == pt1) { 427 if (pt == pt1) {
@@ -433,7 +432,7 @@ void __dev_remove_pack(struct packet_type *pt)
433 432
434 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); 433 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
435out: 434out:
436 spin_unlock_bh(&ptype_lock); 435 spin_unlock(&ptype_lock);
437} 436}
438EXPORT_SYMBOL(__dev_remove_pack); 437EXPORT_SYMBOL(__dev_remove_pack);
439 438
@@ -1902,14 +1901,14 @@ static int dev_gso_segment(struct sk_buff *skb)
1902 1901
1903/* 1902/*
1904 * Try to orphan skb early, right before transmission by the device. 1903 * Try to orphan skb early, right before transmission by the device.
1905 * We cannot orphan skb if tx timestamp is requested, since 1904 * We cannot orphan skb if tx timestamp is requested or the sk-reference
1906 * drivers need to call skb_tstamp_tx() to send the timestamp. 1905 * is needed on driver level for other reasons, e.g. see net/can/raw.c
1907 */ 1906 */
1908static inline void skb_orphan_try(struct sk_buff *skb) 1907static inline void skb_orphan_try(struct sk_buff *skb)
1909{ 1908{
1910 struct sock *sk = skb->sk; 1909 struct sock *sk = skb->sk;
1911 1910
1912 if (sk && !skb_tx(skb)->flags) { 1911 if (sk && !skb_shinfo(skb)->tx_flags) {
1913 /* skb_tx_hash() wont be able to get sk. 1912 /* skb_tx_hash() wont be able to get sk.
1914 * We copy sk_hash into skb->rxhash 1913 * We copy sk_hash into skb->rxhash
1915 */ 1914 */
@@ -1930,7 +1929,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
1930 struct net_device *dev) 1929 struct net_device *dev)
1931{ 1930{
1932 return skb_is_nonlinear(skb) && 1931 return skb_is_nonlinear(skb) &&
1933 ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) || 1932 ((skb_has_frag_list(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
1934 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) || 1933 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
1935 illegal_highdma(dev, skb)))); 1934 illegal_highdma(dev, skb))));
1936} 1935}
@@ -2259,69 +2258,44 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2259 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 2258 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2260} 2259}
2261 2260
2262#ifdef CONFIG_RPS
2263
2264/* One global table that all flow-based protocols share. */
2265struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2266EXPORT_SYMBOL(rps_sock_flow_table);
2267
2268/* 2261/*
2269 * get_rps_cpu is called from netif_receive_skb and returns the target 2262 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2270 * CPU from the RPS map of the receiving queue for a given skb. 2263 * and src/dst port numbers. Returns a non-zero hash number on success
2271 * rcu_read_lock must be held on entry. 2264 * and 0 on failure.
2272 */ 2265 */
2273static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, 2266__u32 __skb_get_rxhash(struct sk_buff *skb)
2274 struct rps_dev_flow **rflowp)
2275{ 2267{
2268 int nhoff, hash = 0, poff;
2276 struct ipv6hdr *ip6; 2269 struct ipv6hdr *ip6;
2277 struct iphdr *ip; 2270 struct iphdr *ip;
2278 struct netdev_rx_queue *rxqueue;
2279 struct rps_map *map;
2280 struct rps_dev_flow_table *flow_table;
2281 struct rps_sock_flow_table *sock_flow_table;
2282 int cpu = -1;
2283 u8 ip_proto; 2271 u8 ip_proto;
2284 u16 tcpu;
2285 u32 addr1, addr2, ihl; 2272 u32 addr1, addr2, ihl;
2286 union { 2273 union {
2287 u32 v32; 2274 u32 v32;
2288 u16 v16[2]; 2275 u16 v16[2];
2289 } ports; 2276 } ports;
2290 2277
2291 if (skb_rx_queue_recorded(skb)) { 2278 nhoff = skb_network_offset(skb);
2292 u16 index = skb_get_rx_queue(skb);
2293 if (unlikely(index >= dev->num_rx_queues)) {
2294 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2295 "on queue %u, but number of RX queues is %u\n",
2296 dev->name, index, dev->num_rx_queues);
2297 goto done;
2298 }
2299 rxqueue = dev->_rx + index;
2300 } else
2301 rxqueue = dev->_rx;
2302
2303 if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
2304 goto done;
2305
2306 if (skb->rxhash)
2307 goto got_hash; /* Skip hash computation on packet header */
2308 2279
2309 switch (skb->protocol) { 2280 switch (skb->protocol) {
2310 case __constant_htons(ETH_P_IP): 2281 case __constant_htons(ETH_P_IP):
2311 if (!pskb_may_pull(skb, sizeof(*ip))) 2282 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2312 goto done; 2283 goto done;
2313 2284
2314 ip = (struct iphdr *) skb->data; 2285 ip = (struct iphdr *) (skb->data + nhoff);
2315 ip_proto = ip->protocol; 2286 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2287 ip_proto = 0;
2288 else
2289 ip_proto = ip->protocol;
2316 addr1 = (__force u32) ip->saddr; 2290 addr1 = (__force u32) ip->saddr;
2317 addr2 = (__force u32) ip->daddr; 2291 addr2 = (__force u32) ip->daddr;
2318 ihl = ip->ihl; 2292 ihl = ip->ihl;
2319 break; 2293 break;
2320 case __constant_htons(ETH_P_IPV6): 2294 case __constant_htons(ETH_P_IPV6):
2321 if (!pskb_may_pull(skb, sizeof(*ip6))) 2295 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2322 goto done; 2296 goto done;
2323 2297
2324 ip6 = (struct ipv6hdr *) skb->data; 2298 ip6 = (struct ipv6hdr *) (skb->data + nhoff);
2325 ip_proto = ip6->nexthdr; 2299 ip_proto = ip6->nexthdr;
2326 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2300 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2327 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2301 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -2330,33 +2304,80 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2330 default: 2304 default:
2331 goto done; 2305 goto done;
2332 } 2306 }
2333 switch (ip_proto) { 2307
2334 case IPPROTO_TCP: 2308 ports.v32 = 0;
2335 case IPPROTO_UDP: 2309 poff = proto_ports_offset(ip_proto);
2336 case IPPROTO_DCCP: 2310 if (poff >= 0) {
2337 case IPPROTO_ESP: 2311 nhoff += ihl * 4 + poff;
2338 case IPPROTO_AH: 2312 if (pskb_may_pull(skb, nhoff + 4)) {
2339 case IPPROTO_SCTP: 2313 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2340 case IPPROTO_UDPLITE:
2341 if (pskb_may_pull(skb, (ihl * 4) + 4)) {
2342 ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
2343 if (ports.v16[1] < ports.v16[0]) 2314 if (ports.v16[1] < ports.v16[0])
2344 swap(ports.v16[0], ports.v16[1]); 2315 swap(ports.v16[0], ports.v16[1]);
2345 break;
2346 } 2316 }
2347 default:
2348 ports.v32 = 0;
2349 break;
2350 } 2317 }
2351 2318
2352 /* get a consistent hash (same value on both flow directions) */ 2319 /* get a consistent hash (same value on both flow directions) */
2353 if (addr2 < addr1) 2320 if (addr2 < addr1)
2354 swap(addr1, addr2); 2321 swap(addr1, addr2);
2355 skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2356 if (!skb->rxhash)
2357 skb->rxhash = 1;
2358 2322
2359got_hash: 2323 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2324 if (!hash)
2325 hash = 1;
2326
2327done:
2328 return hash;
2329}
2330EXPORT_SYMBOL(__skb_get_rxhash);
2331
2332#ifdef CONFIG_RPS
2333
2334/* One global table that all flow-based protocols share. */
2335struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2336EXPORT_SYMBOL(rps_sock_flow_table);
2337
2338/*
2339 * get_rps_cpu is called from netif_receive_skb and returns the target
2340 * CPU from the RPS map of the receiving queue for a given skb.
2341 * rcu_read_lock must be held on entry.
2342 */
2343static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2344 struct rps_dev_flow **rflowp)
2345{
2346 struct netdev_rx_queue *rxqueue;
2347 struct rps_map *map = NULL;
2348 struct rps_dev_flow_table *flow_table;
2349 struct rps_sock_flow_table *sock_flow_table;
2350 int cpu = -1;
2351 u16 tcpu;
2352
2353 if (skb_rx_queue_recorded(skb)) {
2354 u16 index = skb_get_rx_queue(skb);
2355 if (unlikely(index >= dev->num_rx_queues)) {
2356 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2357 "on queue %u, but number of RX queues is %u\n",
2358 dev->name, index, dev->num_rx_queues);
2359 goto done;
2360 }
2361 rxqueue = dev->_rx + index;
2362 } else
2363 rxqueue = dev->_rx;
2364
2365 if (rxqueue->rps_map) {
2366 map = rcu_dereference(rxqueue->rps_map);
2367 if (map && map->len == 1) {
2368 tcpu = map->cpus[0];
2369 if (cpu_online(tcpu))
2370 cpu = tcpu;
2371 goto done;
2372 }
2373 } else if (!rxqueue->rps_flow_table) {
2374 goto done;
2375 }
2376
2377 skb_reset_network_header(skb);
2378 if (!skb_get_rxhash(skb))
2379 goto done;
2380
2360 flow_table = rcu_dereference(rxqueue->rps_flow_table); 2381 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2361 sock_flow_table = rcu_dereference(rps_sock_flow_table); 2382 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2362 if (flow_table && sock_flow_table) { 2383 if (flow_table && sock_flow_table) {
@@ -2396,7 +2417,6 @@ got_hash:
2396 } 2417 }
2397 } 2418 }
2398 2419
2399 map = rcu_dereference(rxqueue->rps_map);
2400 if (map) { 2420 if (map) {
2401 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; 2421 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2402 2422
@@ -2828,8 +2848,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
2828 if (!netdev_tstamp_prequeue) 2848 if (!netdev_tstamp_prequeue)
2829 net_timestamp_check(skb); 2849 net_timestamp_check(skb);
2830 2850
2831 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb)) 2851 if (vlan_tx_tag_present(skb))
2832 return NET_RX_SUCCESS; 2852 vlan_hwaccel_do_receive(skb);
2833 2853
2834 /* if we've gotten here through NAPI, check netpoll */ 2854 /* if we've gotten here through NAPI, check netpoll */
2835 if (netpoll_receive_skb(skb)) 2855 if (netpoll_receive_skb(skb))
@@ -3050,7 +3070,7 @@ out:
3050 return netif_receive_skb(skb); 3070 return netif_receive_skb(skb);
3051} 3071}
3052 3072
3053static void napi_gro_flush(struct napi_struct *napi) 3073inline void napi_gro_flush(struct napi_struct *napi)
3054{ 3074{
3055 struct sk_buff *skb, *next; 3075 struct sk_buff *skb, *next;
3056 3076
@@ -3063,6 +3083,7 @@ static void napi_gro_flush(struct napi_struct *napi)
3063 napi->gro_count = 0; 3083 napi->gro_count = 0;
3064 napi->gro_list = NULL; 3084 napi->gro_list = NULL;
3065} 3085}
3086EXPORT_SYMBOL(napi_gro_flush);
3066 3087
3067enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3088enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3068{ 3089{
@@ -3077,7 +3098,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3077 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) 3098 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3078 goto normal; 3099 goto normal;
3079 3100
3080 if (skb_is_gso(skb) || skb_has_frags(skb)) 3101 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3081 goto normal; 3102 goto normal;
3082 3103
3083 rcu_read_lock(); 3104 rcu_read_lock();
@@ -3156,16 +3177,18 @@ normal:
3156} 3177}
3157EXPORT_SYMBOL(dev_gro_receive); 3178EXPORT_SYMBOL(dev_gro_receive);
3158 3179
3159static gro_result_t 3180static inline gro_result_t
3160__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 3181__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3161{ 3182{
3162 struct sk_buff *p; 3183 struct sk_buff *p;
3163 3184
3164 for (p = napi->gro_list; p; p = p->next) { 3185 for (p = napi->gro_list; p; p = p->next) {
3165 NAPI_GRO_CB(p)->same_flow = 3186 unsigned long diffs;
3166 (p->dev == skb->dev) && 3187
3167 !compare_ether_header(skb_mac_header(p), 3188 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3189 diffs |= compare_ether_header(skb_mac_header(p),
3168 skb_gro_mac_header(skb)); 3190 skb_gro_mac_header(skb));
3191 NAPI_GRO_CB(p)->same_flow = !diffs;
3169 NAPI_GRO_CB(p)->flush = 0; 3192 NAPI_GRO_CB(p)->flush = 0;
3170 } 3193 }
3171 3194
@@ -4941,6 +4964,34 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4941} 4964}
4942EXPORT_SYMBOL(netif_stacked_transfer_operstate); 4965EXPORT_SYMBOL(netif_stacked_transfer_operstate);
4943 4966
4967static int netif_alloc_rx_queues(struct net_device *dev)
4968{
4969#ifdef CONFIG_RPS
4970 unsigned int i, count = dev->num_rx_queues;
4971
4972 if (count) {
4973 struct netdev_rx_queue *rx;
4974
4975 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
4976 if (!rx) {
4977 pr_err("netdev: Unable to allocate %u rx queues.\n",
4978 count);
4979 return -ENOMEM;
4980 }
4981 dev->_rx = rx;
4982 atomic_set(&rx->count, count);
4983
4984 /*
4985 * Set a pointer to first element in the array which holds the
4986 * reference count.
4987 */
4988 for (i = 0; i < count; i++)
4989 rx[i].first = rx;
4990 }
4991#endif
4992 return 0;
4993}
4994
4944/** 4995/**
4945 * register_netdevice - register a network device 4996 * register_netdevice - register a network device
4946 * @dev: device to register 4997 * @dev: device to register
@@ -4978,24 +5029,10 @@ int register_netdevice(struct net_device *dev)
4978 5029
4979 dev->iflink = -1; 5030 dev->iflink = -1;
4980 5031
4981#ifdef CONFIG_RPS 5032 ret = netif_alloc_rx_queues(dev);
4982 if (!dev->num_rx_queues) { 5033 if (ret)
4983 /* 5034 goto out;
4984 * Allocate a single RX queue if driver never called
4985 * alloc_netdev_mq
4986 */
4987
4988 dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
4989 if (!dev->_rx) {
4990 ret = -ENOMEM;
4991 goto out;
4992 }
4993 5035
4994 dev->_rx->first = dev->_rx;
4995 atomic_set(&dev->_rx->count, 1);
4996 dev->num_rx_queues = 1;
4997 }
4998#endif
4999 /* Init, if this function is available */ 5036 /* Init, if this function is available */
5000 if (dev->netdev_ops->ndo_init) { 5037 if (dev->netdev_ops->ndo_init) {
5001 ret = dev->netdev_ops->ndo_init(dev); 5038 ret = dev->netdev_ops->ndo_init(dev);
@@ -5035,6 +5072,12 @@ int register_netdevice(struct net_device *dev)
5035 if (dev->features & NETIF_F_SG) 5072 if (dev->features & NETIF_F_SG)
5036 dev->features |= NETIF_F_GSO; 5073 dev->features |= NETIF_F_GSO;
5037 5074
5075 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5076 * vlan_dev_init() will do the dev->features check, so these features
5077 * are enabled only if supported by underlying device.
5078 */
5079 dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
5080
5038 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 5081 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5039 ret = notifier_to_errno(ret); 5082 ret = notifier_to_errno(ret);
5040 if (ret) 5083 if (ret)
@@ -5264,7 +5307,7 @@ void netdev_run_todo(void)
5264 5307
5265 /* paranoia */ 5308 /* paranoia */
5266 BUG_ON(atomic_read(&dev->refcnt)); 5309 BUG_ON(atomic_read(&dev->refcnt));
5267 WARN_ON(dev->ip_ptr); 5310 WARN_ON(rcu_dereference_raw(dev->ip_ptr));
5268 WARN_ON(dev->ip6_ptr); 5311 WARN_ON(dev->ip6_ptr);
5269 WARN_ON(dev->dn_ptr); 5312 WARN_ON(dev->dn_ptr);
5270 5313
@@ -5386,10 +5429,6 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5386 struct net_device *dev; 5429 struct net_device *dev;
5387 size_t alloc_size; 5430 size_t alloc_size;
5388 struct net_device *p; 5431 struct net_device *p;
5389#ifdef CONFIG_RPS
5390 struct netdev_rx_queue *rx;
5391 int i;
5392#endif
5393 5432
5394 BUG_ON(strlen(name) >= sizeof(dev->name)); 5433 BUG_ON(strlen(name) >= sizeof(dev->name));
5395 5434
@@ -5415,29 +5454,12 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5415 goto free_p; 5454 goto free_p;
5416 } 5455 }
5417 5456
5418#ifdef CONFIG_RPS
5419 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5420 if (!rx) {
5421 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5422 "rx queues.\n");
5423 goto free_tx;
5424 }
5425
5426 atomic_set(&rx->count, queue_count);
5427
5428 /*
5429 * Set a pointer to first element in the array which holds the
5430 * reference count.
5431 */
5432 for (i = 0; i < queue_count; i++)
5433 rx[i].first = rx;
5434#endif
5435 5457
5436 dev = PTR_ALIGN(p, NETDEV_ALIGN); 5458 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5437 dev->padded = (char *)dev - (char *)p; 5459 dev->padded = (char *)dev - (char *)p;
5438 5460
5439 if (dev_addr_init(dev)) 5461 if (dev_addr_init(dev))
5440 goto free_rx; 5462 goto free_tx;
5441 5463
5442 dev_mc_init(dev); 5464 dev_mc_init(dev);
5443 dev_uc_init(dev); 5465 dev_uc_init(dev);
@@ -5449,7 +5471,6 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5449 dev->real_num_tx_queues = queue_count; 5471 dev->real_num_tx_queues = queue_count;
5450 5472
5451#ifdef CONFIG_RPS 5473#ifdef CONFIG_RPS
5452 dev->_rx = rx;
5453 dev->num_rx_queues = queue_count; 5474 dev->num_rx_queues = queue_count;
5454#endif 5475#endif
5455 5476
@@ -5467,11 +5488,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5467 strcpy(dev->name, name); 5488 strcpy(dev->name, name);
5468 return dev; 5489 return dev;
5469 5490
5470free_rx:
5471#ifdef CONFIG_RPS
5472 kfree(rx);
5473free_tx: 5491free_tx:
5474#endif
5475 kfree(tx); 5492 kfree(tx);
5476free_p: 5493free_p:
5477 kfree(p); 5494 kfree(p);
@@ -5658,6 +5675,10 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5658 5675
5659 /* Notify protocols, that we are about to destroy 5676 /* Notify protocols, that we are about to destroy
5660 this device. They should clean all the things. 5677 this device. They should clean all the things.
5678
5679 Note that dev->reg_state stays at NETREG_REGISTERED.
5680 This is wanted because this way 8021q and macvlan know
5681 the device is just moving and can keep their slaves up.
5661 */ 5682 */
5662 call_netdevice_notifiers(NETDEV_UNREGISTER, dev); 5683 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5663 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev); 5684 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 7a85367b3c2f..7d7e572cedc7 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -19,6 +19,7 @@
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/bitops.h> 20#include <linux/bitops.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/vmalloc.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23 24
24/* 25/*
@@ -205,18 +206,24 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
205 struct ethtool_drvinfo info; 206 struct ethtool_drvinfo info;
206 const struct ethtool_ops *ops = dev->ethtool_ops; 207 const struct ethtool_ops *ops = dev->ethtool_ops;
207 208
208 if (!ops->get_drvinfo)
209 return -EOPNOTSUPP;
210
211 memset(&info, 0, sizeof(info)); 209 memset(&info, 0, sizeof(info));
212 info.cmd = ETHTOOL_GDRVINFO; 210 info.cmd = ETHTOOL_GDRVINFO;
213 ops->get_drvinfo(dev, &info); 211 if (ops && ops->get_drvinfo) {
212 ops->get_drvinfo(dev, &info);
213 } else if (dev->dev.parent && dev->dev.parent->driver) {
214 strlcpy(info.bus_info, dev_name(dev->dev.parent),
215 sizeof(info.bus_info));
216 strlcpy(info.driver, dev->dev.parent->driver->name,
217 sizeof(info.driver));
218 } else {
219 return -EOPNOTSUPP;
220 }
214 221
215 /* 222 /*
216 * this method of obtaining string set info is deprecated; 223 * this method of obtaining string set info is deprecated;
217 * Use ETHTOOL_GSSET_INFO instead. 224 * Use ETHTOOL_GSSET_INFO instead.
218 */ 225 */
219 if (ops->get_sset_count) { 226 if (ops && ops->get_sset_count) {
220 int rc; 227 int rc;
221 228
222 rc = ops->get_sset_count(dev, ETH_SS_TEST); 229 rc = ops->get_sset_count(dev, ETH_SS_TEST);
@@ -229,9 +236,9 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
229 if (rc >= 0) 236 if (rc >= 0)
230 info.n_priv_flags = rc; 237 info.n_priv_flags = rc;
231 } 238 }
232 if (ops->get_regs_len) 239 if (ops && ops->get_regs_len)
233 info.regdump_len = ops->get_regs_len(dev); 240 info.regdump_len = ops->get_regs_len(dev);
234 if (ops->get_eeprom_len) 241 if (ops && ops->get_eeprom_len)
235 info.eedump_len = ops->get_eeprom_len(dev); 242 info.eedump_len = ops->get_eeprom_len(dev);
236 243
237 if (copy_to_user(useraddr, &info, sizeof(info))) 244 if (copy_to_user(useraddr, &info, sizeof(info)))
@@ -479,6 +486,38 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
479 list->count++; 486 list->count++;
480} 487}
481 488
489/*
490 * ethtool does not (or did not) set masks for flow parameters that are
491 * not specified, so if both value and mask are 0 then this must be
492 * treated as equivalent to a mask with all bits set. Implement that
493 * here rather than in drivers.
494 */
495static void rx_ntuple_fix_masks(struct ethtool_rx_ntuple_flow_spec *fs)
496{
497 struct ethtool_tcpip4_spec *entry = &fs->h_u.tcp_ip4_spec;
498 struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec;
499
500 if (fs->flow_type != TCP_V4_FLOW &&
501 fs->flow_type != UDP_V4_FLOW &&
502 fs->flow_type != SCTP_V4_FLOW)
503 return;
504
505 if (!(entry->ip4src | mask->ip4src))
506 mask->ip4src = htonl(0xffffffff);
507 if (!(entry->ip4dst | mask->ip4dst))
508 mask->ip4dst = htonl(0xffffffff);
509 if (!(entry->psrc | mask->psrc))
510 mask->psrc = htons(0xffff);
511 if (!(entry->pdst | mask->pdst))
512 mask->pdst = htons(0xffff);
513 if (!(entry->tos | mask->tos))
514 mask->tos = 0xff;
515 if (!(fs->vlan_tag | fs->vlan_tag_mask))
516 fs->vlan_tag_mask = 0xffff;
517 if (!(fs->data | fs->data_mask))
518 fs->data_mask = 0xffffffffffffffffULL;
519}
520
482static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, 521static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
483 void __user *useraddr) 522 void __user *useraddr)
484{ 523{
@@ -493,6 +532,8 @@ static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
493 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 532 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
494 return -EFAULT; 533 return -EFAULT;
495 534
535 rx_ntuple_fix_masks(&cmd.fs);
536
496 /* 537 /*
497 * Cache filter in dev struct for GET operation only if 538 * Cache filter in dev struct for GET operation only if
498 * the underlying driver doesn't have its own GET operation, and 539 * the underlying driver doesn't have its own GET operation, and
@@ -667,19 +708,19 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
667 break; 708 break;
668 case IP_USER_FLOW: 709 case IP_USER_FLOW:
669 sprintf(p, "\tSrc IP addr: 0x%x\n", 710 sprintf(p, "\tSrc IP addr: 0x%x\n",
670 fsc->fs.h_u.raw_ip4_spec.ip4src); 711 fsc->fs.h_u.usr_ip4_spec.ip4src);
671 p += ETH_GSTRING_LEN; 712 p += ETH_GSTRING_LEN;
672 num_strings++; 713 num_strings++;
673 sprintf(p, "\tSrc IP mask: 0x%x\n", 714 sprintf(p, "\tSrc IP mask: 0x%x\n",
674 fsc->fs.m_u.raw_ip4_spec.ip4src); 715 fsc->fs.m_u.usr_ip4_spec.ip4src);
675 p += ETH_GSTRING_LEN; 716 p += ETH_GSTRING_LEN;
676 num_strings++; 717 num_strings++;
677 sprintf(p, "\tDest IP addr: 0x%x\n", 718 sprintf(p, "\tDest IP addr: 0x%x\n",
678 fsc->fs.h_u.raw_ip4_spec.ip4dst); 719 fsc->fs.h_u.usr_ip4_spec.ip4dst);
679 p += ETH_GSTRING_LEN; 720 p += ETH_GSTRING_LEN;
680 num_strings++; 721 num_strings++;
681 sprintf(p, "\tDest IP mask: 0x%x\n", 722 sprintf(p, "\tDest IP mask: 0x%x\n",
682 fsc->fs.m_u.raw_ip4_spec.ip4dst); 723 fsc->fs.m_u.usr_ip4_spec.ip4dst);
683 p += ETH_GSTRING_LEN; 724 p += ETH_GSTRING_LEN;
684 num_strings++; 725 num_strings++;
685 break; 726 break;
@@ -775,7 +816,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
775 if (regs.len > reglen) 816 if (regs.len > reglen)
776 regs.len = reglen; 817 regs.len = reglen;
777 818
778 regbuf = kmalloc(reglen, GFP_USER); 819 regbuf = vmalloc(reglen);
779 if (!regbuf) 820 if (!regbuf)
780 return -ENOMEM; 821 return -ENOMEM;
781 822
@@ -790,7 +831,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
790 ret = 0; 831 ret = 0;
791 832
792 out: 833 out:
793 kfree(regbuf); 834 vfree(regbuf);
794 return ret; 835 return ret;
795} 836}
796 837
@@ -1175,8 +1216,11 @@ static int ethtool_set_gro(struct net_device *dev, char __user *useraddr)
1175 return -EFAULT; 1216 return -EFAULT;
1176 1217
1177 if (edata.data) { 1218 if (edata.data) {
1178 if (!dev->ethtool_ops->get_rx_csum || 1219 u32 rxcsum = dev->ethtool_ops->get_rx_csum ?
1179 !dev->ethtool_ops->get_rx_csum(dev)) 1220 dev->ethtool_ops->get_rx_csum(dev) :
1221 ethtool_op_get_rx_csum(dev);
1222
1223 if (!rxcsum)
1180 return -EINVAL; 1224 return -EINVAL;
1181 dev->features |= NETIF_F_GRO; 1225 dev->features |= NETIF_F_GRO;
1182 } else 1226 } else
@@ -1402,14 +1446,22 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1402 if (!dev || !netif_device_present(dev)) 1446 if (!dev || !netif_device_present(dev))
1403 return -ENODEV; 1447 return -ENODEV;
1404 1448
1405 if (!dev->ethtool_ops)
1406 return -EOPNOTSUPP;
1407
1408 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd))) 1449 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1409 return -EFAULT; 1450 return -EFAULT;
1410 1451
1452 if (!dev->ethtool_ops) {
1453 /* ETHTOOL_GDRVINFO does not require any driver support.
1454 * It is also unprivileged and does not change anything,
1455 * so we can take a shortcut to it. */
1456 if (ethcmd == ETHTOOL_GDRVINFO)
1457 return ethtool_get_drvinfo(dev, useraddr);
1458 else
1459 return -EOPNOTSUPP;
1460 }
1461
1411 /* Allow some commands to be done by anyone */ 1462 /* Allow some commands to be done by anyone */
1412 switch (ethcmd) { 1463 switch (ethcmd) {
1464 case ETHTOOL_GSET:
1413 case ETHTOOL_GDRVINFO: 1465 case ETHTOOL_GDRVINFO:
1414 case ETHTOOL_GMSGLVL: 1466 case ETHTOOL_GMSGLVL:
1415 case ETHTOOL_GCOALESCE: 1467 case ETHTOOL_GCOALESCE:
diff --git a/net/core/flow.c b/net/core/flow.c
index f67dcbfe54ef..127c8a7ffd61 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -53,8 +53,7 @@ struct flow_flush_info {
53 53
54struct flow_cache { 54struct flow_cache {
55 u32 hash_shift; 55 u32 hash_shift;
56 unsigned long order; 56 struct flow_cache_percpu __percpu *percpu;
57 struct flow_cache_percpu *percpu;
58 struct notifier_block hotcpu_notifier; 57 struct notifier_block hotcpu_notifier;
59 int low_watermark; 58 int low_watermark;
60 int high_watermark; 59 int high_watermark;
@@ -64,7 +63,7 @@ struct flow_cache {
64atomic_t flow_cache_genid = ATOMIC_INIT(0); 63atomic_t flow_cache_genid = ATOMIC_INIT(0);
65EXPORT_SYMBOL(flow_cache_genid); 64EXPORT_SYMBOL(flow_cache_genid);
66static struct flow_cache flow_cache_global; 65static struct flow_cache flow_cache_global;
67static struct kmem_cache *flow_cachep; 66static struct kmem_cache *flow_cachep __read_mostly;
68 67
69static DEFINE_SPINLOCK(flow_cache_gc_lock); 68static DEFINE_SPINLOCK(flow_cache_gc_lock);
70static LIST_HEAD(flow_cache_gc_list); 69static LIST_HEAD(flow_cache_gc_list);
@@ -177,15 +176,11 @@ static u32 flow_hash_code(struct flow_cache *fc,
177{ 176{
178 u32 *k = (u32 *) key; 177 u32 *k = (u32 *) key;
179 178
180 return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) 179 return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
181 & (flow_cache_hash_size(fc) - 1)); 180 & (flow_cache_hash_size(fc) - 1);
182} 181}
183 182
184#if (BITS_PER_LONG == 64) 183typedef unsigned long flow_compare_t;
185typedef u64 flow_compare_t;
186#else
187typedef u32 flow_compare_t;
188#endif
189 184
190/* I hear what you're saying, use memcmp. But memcmp cannot make 185/* I hear what you're saying, use memcmp. But memcmp cannot make
191 * important assumptions that we can here, such as alignment and 186 * important assumptions that we can here, such as alignment and
@@ -357,62 +352,73 @@ void flow_cache_flush(void)
357 put_online_cpus(); 352 put_online_cpus();
358} 353}
359 354
360static void __init flow_cache_cpu_prepare(struct flow_cache *fc, 355static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
361 struct flow_cache_percpu *fcp)
362{ 356{
363 fcp->hash_table = (struct hlist_head *) 357 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
364 __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order); 358 size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
365 if (!fcp->hash_table)
366 panic("NET: failed to allocate flow cache order %lu\n", fc->order);
367 359
368 fcp->hash_rnd_recalc = 1; 360 if (!fcp->hash_table) {
369 fcp->hash_count = 0; 361 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
370 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); 362 if (!fcp->hash_table) {
363 pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
364 return -ENOMEM;
365 }
366 fcp->hash_rnd_recalc = 1;
367 fcp->hash_count = 0;
368 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
369 }
370 return 0;
371} 371}
372 372
373static int flow_cache_cpu(struct notifier_block *nfb, 373static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
374 unsigned long action, 374 unsigned long action,
375 void *hcpu) 375 void *hcpu)
376{ 376{
377 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier); 377 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
378 int cpu = (unsigned long) hcpu; 378 int res, cpu = (unsigned long) hcpu;
379 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); 379 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
380 380
381 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) 381 switch (action) {
382 case CPU_UP_PREPARE:
383 case CPU_UP_PREPARE_FROZEN:
384 res = flow_cache_cpu_prepare(fc, cpu);
385 if (res)
386 return notifier_from_errno(res);
387 break;
388 case CPU_DEAD:
389 case CPU_DEAD_FROZEN:
382 __flow_cache_shrink(fc, fcp, 0); 390 __flow_cache_shrink(fc, fcp, 0);
391 break;
392 }
383 return NOTIFY_OK; 393 return NOTIFY_OK;
384} 394}
385 395
386static int flow_cache_init(struct flow_cache *fc) 396static int __init flow_cache_init(struct flow_cache *fc)
387{ 397{
388 unsigned long order;
389 int i; 398 int i;
390 399
391 fc->hash_shift = 10; 400 fc->hash_shift = 10;
392 fc->low_watermark = 2 * flow_cache_hash_size(fc); 401 fc->low_watermark = 2 * flow_cache_hash_size(fc);
393 fc->high_watermark = 4 * flow_cache_hash_size(fc); 402 fc->high_watermark = 4 * flow_cache_hash_size(fc);
394 403
395 for (order = 0;
396 (PAGE_SIZE << order) <
397 (sizeof(struct hlist_head)*flow_cache_hash_size(fc));
398 order++)
399 /* NOTHING */;
400 fc->order = order;
401 fc->percpu = alloc_percpu(struct flow_cache_percpu); 404 fc->percpu = alloc_percpu(struct flow_cache_percpu);
405 if (!fc->percpu)
406 return -ENOMEM;
402 407
403 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, 408 for_each_online_cpu(i) {
404 (unsigned long) fc); 409 if (flow_cache_cpu_prepare(fc, i))
405 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; 410 return -ENOMEM;
406 add_timer(&fc->rnd_timer); 411 }
407
408 for_each_possible_cpu(i)
409 flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
410
411 fc->hotcpu_notifier = (struct notifier_block){ 412 fc->hotcpu_notifier = (struct notifier_block){
412 .notifier_call = flow_cache_cpu, 413 .notifier_call = flow_cache_cpu,
413 }; 414 };
414 register_hotcpu_notifier(&fc->hotcpu_notifier); 415 register_hotcpu_notifier(&fc->hotcpu_notifier);
415 416
417 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
418 (unsigned long) fc);
419 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
420 add_timer(&fc->rnd_timer);
421
416 return 0; 422 return 0;
417} 423}
418 424
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 6743146e4d6b..7c2373321b74 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -274,9 +274,9 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
274 while ((e = gen_find_node(bstats, rate_est))) { 274 while ((e = gen_find_node(bstats, rate_est))) {
275 rb_erase(&e->node, &est_root); 275 rb_erase(&e->node, &est_root);
276 276
277 write_lock_bh(&est_lock); 277 write_lock(&est_lock);
278 e->bstats = NULL; 278 e->bstats = NULL;
279 write_unlock_bh(&est_lock); 279 write_unlock(&est_lock);
280 280
281 list_del_rcu(&e->list); 281 list_del_rcu(&e->list);
282 call_rcu(&e->e_rcu, __gen_kill_estimator); 282 call_rcu(&e->e_rcu, __gen_kill_estimator);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 1cd98df412df..f4657c2127b4 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -41,7 +41,9 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
41 41
42 if (m->msg_namelen) { 42 if (m->msg_namelen) {
43 if (mode == VERIFY_READ) { 43 if (mode == VERIFY_READ) {
44 err = move_addr_to_kernel(m->msg_name, m->msg_namelen, 44 void __user *namep;
45 namep = (void __user __force *) m->msg_name;
46 err = move_addr_to_kernel(namep, m->msg_namelen,
45 address); 47 address);
46 if (err < 0) 48 if (err < 0)
47 return err; 49 return err;
@@ -52,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
52 } 54 }
53 55
54 size = m->msg_iovlen * sizeof(struct iovec); 56 size = m->msg_iovlen * sizeof(struct iovec);
55 if (copy_from_user(iov, m->msg_iov, size)) 57 if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
56 return -EFAULT; 58 return -EFAULT;
57 59
58 m->msg_iov = iov; 60 m->msg_iov = iov;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a4e0a7482c2b..96b1a749abb4 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -122,7 +122,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh)
122 122
123unsigned long neigh_rand_reach_time(unsigned long base) 123unsigned long neigh_rand_reach_time(unsigned long base)
124{ 124{
125 return (base ? (net_random() % base) + (base >> 1) : 0); 125 return base ? (net_random() % base) + (base >> 1) : 0;
126} 126}
127EXPORT_SYMBOL(neigh_rand_reach_time); 127EXPORT_SYMBOL(neigh_rand_reach_time);
128 128
@@ -766,9 +766,9 @@ next_elt:
766static __inline__ int neigh_max_probes(struct neighbour *n) 766static __inline__ int neigh_max_probes(struct neighbour *n)
767{ 767{
768 struct neigh_parms *p = n->parms; 768 struct neigh_parms *p = n->parms;
769 return (n->nud_state & NUD_PROBE ? 769 return (n->nud_state & NUD_PROBE) ?
770 p->ucast_probes : 770 p->ucast_probes :
771 p->ucast_probes + p->app_probes + p->mcast_probes); 771 p->ucast_probes + p->app_probes + p->mcast_probes;
772} 772}
773 773
774static void neigh_invalidate(struct neighbour *neigh) 774static void neigh_invalidate(struct neighbour *neigh)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index af4dfbadf2a0..76485a3f910b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -515,7 +515,7 @@ static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
515 return attribute->store(queue, attribute, buf, count); 515 return attribute->store(queue, attribute, buf, count);
516} 516}
517 517
518static struct sysfs_ops rx_queue_sysfs_ops = { 518static const struct sysfs_ops rx_queue_sysfs_ops = {
519 .show = rx_queue_attr_show, 519 .show = rx_queue_attr_show,
520 .store = rx_queue_attr_store, 520 .store = rx_queue_attr_store,
521}; 521};
@@ -789,12 +789,13 @@ static const void *net_netlink_ns(struct sock *sk)
789 return sock_net(sk); 789 return sock_net(sk);
790} 790}
791 791
792static struct kobj_ns_type_operations net_ns_type_operations = { 792struct kobj_ns_type_operations net_ns_type_operations = {
793 .type = KOBJ_NS_TYPE_NET, 793 .type = KOBJ_NS_TYPE_NET,
794 .current_ns = net_current_ns, 794 .current_ns = net_current_ns,
795 .netlink_ns = net_netlink_ns, 795 .netlink_ns = net_netlink_ns,
796 .initial_ns = net_initial_ns, 796 .initial_ns = net_initial_ns,
797}; 797};
798EXPORT_SYMBOL_GPL(net_ns_type_operations);
798 799
799static void net_kobj_ns_exit(struct net *net) 800static void net_kobj_ns_exit(struct net *net)
800{ 801{
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 10a1ea72010d..2c0df0f95b3d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -729,16 +729,14 @@ static int hex32_arg(const char __user *user_buffer, unsigned long maxlen,
729 *num = 0; 729 *num = 0;
730 730
731 for (; i < maxlen; i++) { 731 for (; i < maxlen; i++) {
732 int value;
732 char c; 733 char c;
733 *num <<= 4; 734 *num <<= 4;
734 if (get_user(c, &user_buffer[i])) 735 if (get_user(c, &user_buffer[i]))
735 return -EFAULT; 736 return -EFAULT;
736 if ((c >= '0') && (c <= '9')) 737 value = hex_to_bin(c);
737 *num |= c - '0'; 738 if (value >= 0)
738 else if ((c >= 'a') && (c <= 'f')) 739 *num |= value;
739 *num |= c - 'a' + 10;
740 else if ((c >= 'A') && (c <= 'F'))
741 *num |= c - 'A' + 10;
742 else 740 else
743 break; 741 break;
744 } 742 }
@@ -3907,8 +3905,6 @@ static void __exit pg_cleanup(void)
3907{ 3905{
3908 struct pktgen_thread *t; 3906 struct pktgen_thread *t;
3909 struct list_head *q, *n; 3907 struct list_head *q, *n;
3910 wait_queue_head_t queue;
3911 init_waitqueue_head(&queue);
3912 3908
3913 /* Stop all interfaces & threads */ 3909 /* Stop all interfaces & threads */
3914 3910
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f78d821bd935..b2a718dfd720 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -612,36 +612,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
612 612
613static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b) 613static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
614{ 614{
615 struct rtnl_link_stats64 a; 615 memcpy(v, b, sizeof(*b));
616
617 a.rx_packets = b->rx_packets;
618 a.tx_packets = b->tx_packets;
619 a.rx_bytes = b->rx_bytes;
620 a.tx_bytes = b->tx_bytes;
621 a.rx_errors = b->rx_errors;
622 a.tx_errors = b->tx_errors;
623 a.rx_dropped = b->rx_dropped;
624 a.tx_dropped = b->tx_dropped;
625
626 a.multicast = b->multicast;
627 a.collisions = b->collisions;
628
629 a.rx_length_errors = b->rx_length_errors;
630 a.rx_over_errors = b->rx_over_errors;
631 a.rx_crc_errors = b->rx_crc_errors;
632 a.rx_frame_errors = b->rx_frame_errors;
633 a.rx_fifo_errors = b->rx_fifo_errors;
634 a.rx_missed_errors = b->rx_missed_errors;
635
636 a.tx_aborted_errors = b->tx_aborted_errors;
637 a.tx_carrier_errors = b->tx_carrier_errors;
638 a.tx_fifo_errors = b->tx_fifo_errors;
639 a.tx_heartbeat_errors = b->tx_heartbeat_errors;
640 a.tx_window_errors = b->tx_window_errors;
641
642 a.rx_compressed = b->rx_compressed;
643 a.tx_compressed = b->tx_compressed;
644 memcpy(v, &a, sizeof(a));
645} 616}
646 617
647/* All VF info */ 618/* All VF info */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c83b421341c0..752c1972b3a7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -202,8 +202,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
202 skb->data = data; 202 skb->data = data;
203 skb_reset_tail_pointer(skb); 203 skb_reset_tail_pointer(skb);
204 skb->end = skb->tail + size; 204 skb->end = skb->tail + size;
205 kmemcheck_annotate_bitfield(skb, flags1);
206 kmemcheck_annotate_bitfield(skb, flags2);
207#ifdef NET_SKBUFF_DATA_USES_OFFSET 205#ifdef NET_SKBUFF_DATA_USES_OFFSET
208 skb->mac_header = ~0U; 206 skb->mac_header = ~0U;
209#endif 207#endif
@@ -340,7 +338,7 @@ static void skb_release_data(struct sk_buff *skb)
340 put_page(skb_shinfo(skb)->frags[i].page); 338 put_page(skb_shinfo(skb)->frags[i].page);
341 } 339 }
342 340
343 if (skb_has_frags(skb)) 341 if (skb_has_frag_list(skb))
344 skb_drop_fraglist(skb); 342 skb_drop_fraglist(skb);
345 343
346 kfree(skb->head); 344 kfree(skb->head);
@@ -685,16 +683,10 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
685 683
686struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 684struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
687{ 685{
688 int headerlen = skb->data - skb->head; 686 int headerlen = skb_headroom(skb);
689 /* 687 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
690 * Allocate the copy buffer 688 struct sk_buff *n = alloc_skb(size, gfp_mask);
691 */ 689
692 struct sk_buff *n;
693#ifdef NET_SKBUFF_DATA_USES_OFFSET
694 n = alloc_skb(skb->end + skb->data_len, gfp_mask);
695#else
696 n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
697#endif
698 if (!n) 690 if (!n)
699 return NULL; 691 return NULL;
700 692
@@ -726,20 +718,14 @@ EXPORT_SYMBOL(skb_copy);
726 718
727struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 719struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
728{ 720{
729 /* 721 unsigned int size = skb_end_pointer(skb) - skb->head;
730 * Allocate the copy buffer 722 struct sk_buff *n = alloc_skb(size, gfp_mask);
731 */ 723
732 struct sk_buff *n;
733#ifdef NET_SKBUFF_DATA_USES_OFFSET
734 n = alloc_skb(skb->end, gfp_mask);
735#else
736 n = alloc_skb(skb->end - skb->head, gfp_mask);
737#endif
738 if (!n) 724 if (!n)
739 goto out; 725 goto out;
740 726
741 /* Set the data pointer */ 727 /* Set the data pointer */
742 skb_reserve(n, skb->data - skb->head); 728 skb_reserve(n, skb_headroom(skb));
743 /* Set the tail pointer and length */ 729 /* Set the tail pointer and length */
744 skb_put(n, skb_headlen(skb)); 730 skb_put(n, skb_headlen(skb));
745 /* Copy the bytes */ 731 /* Copy the bytes */
@@ -759,7 +745,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
759 skb_shinfo(n)->nr_frags = i; 745 skb_shinfo(n)->nr_frags = i;
760 } 746 }
761 747
762 if (skb_has_frags(skb)) { 748 if (skb_has_frag_list(skb)) {
763 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 749 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
764 skb_clone_fraglist(n); 750 skb_clone_fraglist(n);
765 } 751 }
@@ -791,12 +777,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
791{ 777{
792 int i; 778 int i;
793 u8 *data; 779 u8 *data;
794#ifdef NET_SKBUFF_DATA_USES_OFFSET 780 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
795 int size = nhead + skb->end + ntail;
796#else
797 int size = nhead + (skb->end - skb->head) + ntail;
798#endif
799 long off; 781 long off;
782 bool fastpath;
800 783
801 BUG_ON(nhead < 0); 784 BUG_ON(nhead < 0);
802 785
@@ -810,23 +793,36 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
810 goto nodata; 793 goto nodata;
811 794
812 /* Copy only real data... and, alas, header. This should be 795 /* Copy only real data... and, alas, header. This should be
813 * optimized for the cases when header is void. */ 796 * optimized for the cases when header is void.
814#ifdef NET_SKBUFF_DATA_USES_OFFSET 797 */
815 memcpy(data + nhead, skb->head, skb->tail); 798 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
816#else 799
817 memcpy(data + nhead, skb->head, skb->tail - skb->head); 800 memcpy((struct skb_shared_info *)(data + size),
818#endif 801 skb_shinfo(skb),
819 memcpy(data + size, skb_end_pointer(skb),
820 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 802 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
821 803
822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 804 /* Check if we can avoid taking references on fragments if we own
823 get_page(skb_shinfo(skb)->frags[i].page); 805 * the last reference on skb->head. (see skb_release_data())
806 */
807 if (!skb->cloned)
808 fastpath = true;
809 else {
810 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
824 811
825 if (skb_has_frags(skb)) 812 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
826 skb_clone_fraglist(skb); 813 }
827 814
828 skb_release_data(skb); 815 if (fastpath) {
816 kfree(skb->head);
817 } else {
818 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
819 get_page(skb_shinfo(skb)->frags[i].page);
829 820
821 if (skb_has_frag_list(skb))
822 skb_clone_fraglist(skb);
823
824 skb_release_data(skb);
825 }
830 off = (data + nhead) - skb->head; 826 off = (data + nhead) - skb->head;
831 827
832 skb->head = data; 828 skb->head = data;
@@ -1099,7 +1095,7 @@ drop_pages:
1099 for (; i < nfrags; i++) 1095 for (; i < nfrags; i++)
1100 put_page(skb_shinfo(skb)->frags[i].page); 1096 put_page(skb_shinfo(skb)->frags[i].page);
1101 1097
1102 if (skb_has_frags(skb)) 1098 if (skb_has_frag_list(skb))
1103 skb_drop_fraglist(skb); 1099 skb_drop_fraglist(skb);
1104 goto done; 1100 goto done;
1105 } 1101 }
@@ -1194,7 +1190,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1194 /* Optimization: no fragments, no reasons to preestimate 1190 /* Optimization: no fragments, no reasons to preestimate
1195 * size of pulled pages. Superb. 1191 * size of pulled pages. Superb.
1196 */ 1192 */
1197 if (!skb_has_frags(skb)) 1193 if (!skb_has_frag_list(skb))
1198 goto pull_pages; 1194 goto pull_pages;
1199 1195
1200 /* Estimate size of pulled pages. */ 1196 /* Estimate size of pulled pages. */
@@ -2323,7 +2319,7 @@ next_skb:
2323 st->frag_data = NULL; 2319 st->frag_data = NULL;
2324 } 2320 }
2325 2321
2326 if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) { 2322 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2327 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2323 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2328 st->frag_idx = 0; 2324 st->frag_idx = 0;
2329 goto next_skb; 2325 goto next_skb;
@@ -2893,7 +2889,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2893 return -ENOMEM; 2889 return -ENOMEM;
2894 2890
2895 /* Easy case. Most of packets will go this way. */ 2891 /* Easy case. Most of packets will go this way. */
2896 if (!skb_has_frags(skb)) { 2892 if (!skb_has_frag_list(skb)) {
2897 /* A little of trouble, not enough of space for trailer. 2893 /* A little of trouble, not enough of space for trailer.
2898 * This should not happen, when stack is tuned to generate 2894 * This should not happen, when stack is tuned to generate
2899 * good frames. OK, on miss we reallocate and reserve even more 2895 * good frames. OK, on miss we reallocate and reserve even more
@@ -2928,7 +2924,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2928 2924
2929 if (skb1->next == NULL && tailbits) { 2925 if (skb1->next == NULL && tailbits) {
2930 if (skb_shinfo(skb1)->nr_frags || 2926 if (skb_shinfo(skb1)->nr_frags ||
2931 skb_has_frags(skb1) || 2927 skb_has_frag_list(skb1) ||
2932 skb_tailroom(skb1) < tailbits) 2928 skb_tailroom(skb1) < tailbits)
2933 ntail = tailbits + 128; 2929 ntail = tailbits + 128;
2934 } 2930 }
@@ -2937,7 +2933,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2937 skb_cloned(skb1) || 2933 skb_cloned(skb1) ||
2938 ntail || 2934 ntail ||
2939 skb_shinfo(skb1)->nr_frags || 2935 skb_shinfo(skb1)->nr_frags ||
2940 skb_has_frags(skb1)) { 2936 skb_has_frag_list(skb1)) {
2941 struct sk_buff *skb2; 2937 struct sk_buff *skb2;
2942 2938
2943 /* Fuck, we are miserable poor guys... */ 2939 /* Fuck, we are miserable poor guys... */
@@ -3020,7 +3016,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
3020 } else { 3016 } else {
3021 /* 3017 /*
3022 * no hardware time stamps available, 3018 * no hardware time stamps available,
3023 * so keep the skb_shared_tx and only 3019 * so keep the shared tx_flags and only
3024 * store software time stamp 3020 * store software time stamp
3025 */ 3021 */
3026 skb->tstamp = ktime_get_real(); 3022 skb->tstamp = ktime_get_real();
diff --git a/net/core/sock.c b/net/core/sock.c
index ef30e9d286e7..42365deeba27 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1557,6 +1557,8 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1557EXPORT_SYMBOL(sock_alloc_send_skb); 1557EXPORT_SYMBOL(sock_alloc_send_skb);
1558 1558
1559static void __lock_sock(struct sock *sk) 1559static void __lock_sock(struct sock *sk)
1560 __releases(&sk->sk_lock.slock)
1561 __acquires(&sk->sk_lock.slock)
1560{ 1562{
1561 DEFINE_WAIT(wait); 1563 DEFINE_WAIT(wait);
1562 1564
@@ -1573,6 +1575,8 @@ static void __lock_sock(struct sock *sk)
1573} 1575}
1574 1576
1575static void __release_sock(struct sock *sk) 1577static void __release_sock(struct sock *sk)
1578 __releases(&sk->sk_lock.slock)
1579 __acquires(&sk->sk_lock.slock)
1576{ 1580{
1577 struct sk_buff *skb = sk->sk_backlog.head; 1581 struct sk_buff *skb = sk->sk_backlog.head;
1578 1582
diff --git a/net/core/utils.c b/net/core/utils.c
index f41854470539..5fea0ab21902 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -75,7 +75,7 @@ __be32 in_aton(const char *str)
75 str++; 75 str++;
76 } 76 }
77 } 77 }
78 return(htonl(l)); 78 return htonl(l);
79} 79}
80EXPORT_SYMBOL(in_aton); 80EXPORT_SYMBOL(in_aton);
81 81
@@ -92,18 +92,19 @@ EXPORT_SYMBOL(in_aton);
92 92
93static inline int xdigit2bin(char c, int delim) 93static inline int xdigit2bin(char c, int delim)
94{ 94{
95 int val;
96
95 if (c == delim || c == '\0') 97 if (c == delim || c == '\0')
96 return IN6PTON_DELIM; 98 return IN6PTON_DELIM;
97 if (c == ':') 99 if (c == ':')
98 return IN6PTON_COLON_MASK; 100 return IN6PTON_COLON_MASK;
99 if (c == '.') 101 if (c == '.')
100 return IN6PTON_DOT; 102 return IN6PTON_DOT;
101 if (c >= '0' && c <= '9') 103
102 return (IN6PTON_XDIGIT | IN6PTON_DIGIT| (c - '0')); 104 val = hex_to_bin(c);
103 if (c >= 'a' && c <= 'f') 105 if (val >= 0)
104 return (IN6PTON_XDIGIT | (c - 'a' + 10)); 106 return val | IN6PTON_XDIGIT | (val < 10 ? IN6PTON_DIGIT : 0);
105 if (c >= 'A' && c <= 'F') 107
106 return (IN6PTON_XDIGIT | (c - 'A' + 10));
107 if (delim == -1) 108 if (delim == -1)
108 return IN6PTON_DELIM; 109 return IN6PTON_DELIM;
109 return IN6PTON_UNKNOWN; 110 return IN6PTON_UNKNOWN;
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 6df6f8ac9636..6d16a9070ff0 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -62,18 +62,14 @@ struct ccid_operations {
62 void (*ccid_hc_tx_exit)(struct sock *sk); 62 void (*ccid_hc_tx_exit)(struct sock *sk);
63 void (*ccid_hc_rx_packet_recv)(struct sock *sk, 63 void (*ccid_hc_rx_packet_recv)(struct sock *sk,
64 struct sk_buff *skb); 64 struct sk_buff *skb);
65 int (*ccid_hc_rx_parse_options)(struct sock *sk, 65 int (*ccid_hc_rx_parse_options)(struct sock *sk, u8 pkt,
66 unsigned char option, 66 u8 opt, u8 *val, u8 len);
67 unsigned char len, u16 idx,
68 unsigned char* value);
69 int (*ccid_hc_rx_insert_options)(struct sock *sk, 67 int (*ccid_hc_rx_insert_options)(struct sock *sk,
70 struct sk_buff *skb); 68 struct sk_buff *skb);
71 void (*ccid_hc_tx_packet_recv)(struct sock *sk, 69 void (*ccid_hc_tx_packet_recv)(struct sock *sk,
72 struct sk_buff *skb); 70 struct sk_buff *skb);
73 int (*ccid_hc_tx_parse_options)(struct sock *sk, 71 int (*ccid_hc_tx_parse_options)(struct sock *sk, u8 pkt,
74 unsigned char option, 72 u8 opt, u8 *val, u8 len);
75 unsigned char len, u16 idx,
76 unsigned char* value);
77 int (*ccid_hc_tx_send_packet)(struct sock *sk, 73 int (*ccid_hc_tx_send_packet)(struct sock *sk,
78 struct sk_buff *skb); 74 struct sk_buff *skb);
79 void (*ccid_hc_tx_packet_sent)(struct sock *sk, 75 void (*ccid_hc_tx_packet_sent)(struct sock *sk,
@@ -168,27 +164,31 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
168 ccid->ccid_ops->ccid_hc_tx_packet_recv(sk, skb); 164 ccid->ccid_ops->ccid_hc_tx_packet_recv(sk, skb);
169} 165}
170 166
167/**
168 * ccid_hc_tx_parse_options - Parse CCID-specific options sent by the receiver
169 * @pkt: type of packet that @opt appears on (RFC 4340, 5.1)
170 * @opt: the CCID-specific option type (RFC 4340, 5.8 and 10.3)
171 * @val: value of @opt
172 * @len: length of @val in bytes
173 */
171static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, 174static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
172 unsigned char option, 175 u8 pkt, u8 opt, u8 *val, u8 len)
173 unsigned char len, u16 idx,
174 unsigned char* value)
175{ 176{
176 int rc = 0; 177 if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
177 if (ccid->ccid_ops->ccid_hc_tx_parse_options != NULL) 178 return 0;
178 rc = ccid->ccid_ops->ccid_hc_tx_parse_options(sk, option, len, idx, 179 return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
179 value);
180 return rc;
181} 180}
182 181
182/**
183 * ccid_hc_rx_parse_options - Parse CCID-specific options sent by the sender
184 * Arguments are analogous to ccid_hc_tx_parse_options()
185 */
183static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, 186static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
184 unsigned char option, 187 u8 pkt, u8 opt, u8 *val, u8 len)
185 unsigned char len, u16 idx,
186 unsigned char* value)
187{ 188{
188 int rc = 0; 189 if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
189 if (ccid->ccid_ops->ccid_hc_rx_parse_options != NULL) 190 return 0;
190 rc = ccid->ccid_ops->ccid_hc_rx_parse_options(sk, option, len, idx, value); 191 return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
191 return rc;
192} 192}
193 193
194static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk, 194static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk,
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index 8408398cd44e..0581143cb800 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -47,37 +47,6 @@ config IP_DCCP_CCID3_DEBUG
47 47
48 If in doubt, say N. 48 If in doubt, say N.
49 49
50config IP_DCCP_CCID3_RTO
51 int "Use higher bound for nofeedback timer"
52 default 100
53 depends on IP_DCCP_CCID3 && EXPERIMENTAL
54 ---help---
55 Use higher lower bound for nofeedback timer expiration.
56
57 The TFRC nofeedback timer normally expires after the maximum of 4
58 RTTs and twice the current send interval (RFC 3448, 4.3). On LANs
59 with a small RTT this can mean a high processing load and reduced
60 performance, since then the nofeedback timer is triggered very
61 frequently.
62
63 This option enables to set a higher lower bound for the nofeedback
64 value. Values in units of milliseconds can be set here.
65
66 A value of 0 disables this feature by enforcing the value specified
67 in RFC 3448. The following values have been suggested as bounds for
68 experimental use:
69 * 16-20ms to match the typical multimedia inter-frame interval
70 * 100ms as a reasonable compromise [default]
71 * 1000ms corresponds to the lower TCP RTO bound (RFC 2988, 2.4)
72
73 The default of 100ms is a compromise between a large value for
74 efficient DCCP implementations, and a small value to avoid disrupting
75 the network in times of congestion.
76
77 The purpose of the nofeedback timer is to slow DCCP down when there
78 is serious network congestion: experimenting with larger values should
79 therefore not be performed on WANs.
80
81config IP_DCCP_TFRC_LIB 50config IP_DCCP_TFRC_LIB
82 def_bool y if IP_DCCP_CCID3 51 def_bool y if IP_DCCP_CCID3
83 52
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 9b3ae9922be1..dc18172b1e59 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -25,59 +25,14 @@
25 */ 25 */
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include "../feat.h" 27#include "../feat.h"
28#include "../ccid.h"
29#include "../dccp.h"
30#include "ccid2.h" 28#include "ccid2.h"
31 29
32 30
33#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 31#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
34static int ccid2_debug; 32static int ccid2_debug;
35#define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a) 33#define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
36
37static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hc)
38{
39 int len = 0;
40 int pipe = 0;
41 struct ccid2_seq *seqp = hc->tx_seqh;
42
43 /* there is data in the chain */
44 if (seqp != hc->tx_seqt) {
45 seqp = seqp->ccid2s_prev;
46 len++;
47 if (!seqp->ccid2s_acked)
48 pipe++;
49
50 while (seqp != hc->tx_seqt) {
51 struct ccid2_seq *prev = seqp->ccid2s_prev;
52
53 len++;
54 if (!prev->ccid2s_acked)
55 pipe++;
56
57 /* packets are sent sequentially */
58 BUG_ON(dccp_delta_seqno(seqp->ccid2s_seq,
59 prev->ccid2s_seq ) >= 0);
60 BUG_ON(time_before(seqp->ccid2s_sent,
61 prev->ccid2s_sent));
62
63 seqp = prev;
64 }
65 }
66
67 BUG_ON(pipe != hc->tx_pipe);
68 ccid2_pr_debug("len of chain=%d\n", len);
69
70 do {
71 seqp = seqp->ccid2s_prev;
72 len++;
73 } while (seqp != hc->tx_seqh);
74
75 ccid2_pr_debug("total len=%d\n", len);
76 BUG_ON(len != hc->tx_seqbufc * CCID2_SEQBUF_LEN);
77}
78#else 34#else
79#define ccid2_pr_debug(format, a...) 35#define ccid2_pr_debug(format, a...)
80#define ccid2_hc_tx_check_sanity(hc)
81#endif 36#endif
82 37
83static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc) 38static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
@@ -156,19 +111,10 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
156 dp->dccps_l_ack_ratio = val; 111 dp->dccps_l_ack_ratio = val;
157} 112}
158 113
159static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hc, long val)
160{
161 ccid2_pr_debug("change SRTT to %ld\n", val);
162 hc->tx_srtt = val;
163}
164
165static void ccid2_start_rto_timer(struct sock *sk);
166
167static void ccid2_hc_tx_rto_expire(unsigned long data) 114static void ccid2_hc_tx_rto_expire(unsigned long data)
168{ 115{
169 struct sock *sk = (struct sock *)data; 116 struct sock *sk = (struct sock *)data;
170 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 117 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
171 long s;
172 118
173 bh_lock_sock(sk); 119 bh_lock_sock(sk);
174 if (sock_owned_by_user(sk)) { 120 if (sock_owned_by_user(sk)) {
@@ -178,23 +124,19 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
178 124
179 ccid2_pr_debug("RTO_EXPIRE\n"); 125 ccid2_pr_debug("RTO_EXPIRE\n");
180 126
181 ccid2_hc_tx_check_sanity(hc);
182
183 /* back-off timer */ 127 /* back-off timer */
184 hc->tx_rto <<= 1; 128 hc->tx_rto <<= 1;
129 if (hc->tx_rto > DCCP_RTO_MAX)
130 hc->tx_rto = DCCP_RTO_MAX;
185 131
186 s = hc->tx_rto / HZ; 132 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
187 if (s > 60)
188 hc->tx_rto = 60 * HZ;
189
190 ccid2_start_rto_timer(sk);
191 133
192 /* adjust pipe, cwnd etc */ 134 /* adjust pipe, cwnd etc */
193 hc->tx_ssthresh = hc->tx_cwnd / 2; 135 hc->tx_ssthresh = hc->tx_cwnd / 2;
194 if (hc->tx_ssthresh < 2) 136 if (hc->tx_ssthresh < 2)
195 hc->tx_ssthresh = 2; 137 hc->tx_ssthresh = 2;
196 hc->tx_cwnd = 1; 138 hc->tx_cwnd = 1;
197 hc->tx_pipe = 0; 139 hc->tx_pipe = 0;
198 140
199 /* clear state about stuff we sent */ 141 /* clear state about stuff we sent */
200 hc->tx_seqt = hc->tx_seqh; 142 hc->tx_seqt = hc->tx_seqh;
@@ -204,22 +146,11 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
204 hc->tx_rpseq = 0; 146 hc->tx_rpseq = 0;
205 hc->tx_rpdupack = -1; 147 hc->tx_rpdupack = -1;
206 ccid2_change_l_ack_ratio(sk, 1); 148 ccid2_change_l_ack_ratio(sk, 1);
207 ccid2_hc_tx_check_sanity(hc);
208out: 149out:
209 bh_unlock_sock(sk); 150 bh_unlock_sock(sk);
210 sock_put(sk); 151 sock_put(sk);
211} 152}
212 153
213static void ccid2_start_rto_timer(struct sock *sk)
214{
215 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
216
217 ccid2_pr_debug("setting RTO timeout=%ld\n", hc->tx_rto);
218
219 BUG_ON(timer_pending(&hc->tx_rtotimer));
220 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
221}
222
223static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) 154static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
224{ 155{
225 struct dccp_sock *dp = dccp_sk(sk); 156 struct dccp_sock *dp = dccp_sk(sk);
@@ -230,7 +161,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
230 161
231 hc->tx_seqh->ccid2s_seq = dp->dccps_gss; 162 hc->tx_seqh->ccid2s_seq = dp->dccps_gss;
232 hc->tx_seqh->ccid2s_acked = 0; 163 hc->tx_seqh->ccid2s_acked = 0;
233 hc->tx_seqh->ccid2s_sent = jiffies; 164 hc->tx_seqh->ccid2s_sent = ccid2_time_stamp;
234 165
235 next = hc->tx_seqh->ccid2s_next; 166 next = hc->tx_seqh->ccid2s_next;
236 /* check if we need to alloc more space */ 167 /* check if we need to alloc more space */
@@ -296,23 +227,20 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
296 } 227 }
297#endif 228#endif
298 229
299 /* setup RTO timer */ 230 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
300 if (!timer_pending(&hc->tx_rtotimer))
301 ccid2_start_rto_timer(sk);
302 231
303#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 232#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
304 do { 233 do {
305 struct ccid2_seq *seqp = hc->tx_seqt; 234 struct ccid2_seq *seqp = hc->tx_seqt;
306 235
307 while (seqp != hc->tx_seqh) { 236 while (seqp != hc->tx_seqh) {
308 ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n", 237 ccid2_pr_debug("out seq=%llu acked=%d time=%u\n",
309 (unsigned long long)seqp->ccid2s_seq, 238 (unsigned long long)seqp->ccid2s_seq,
310 seqp->ccid2s_acked, seqp->ccid2s_sent); 239 seqp->ccid2s_acked, seqp->ccid2s_sent);
311 seqp = seqp->ccid2s_next; 240 seqp = seqp->ccid2s_next;
312 } 241 }
313 } while (0); 242 } while (0);
314 ccid2_pr_debug("=========\n"); 243 ccid2_pr_debug("=========\n");
315 ccid2_hc_tx_check_sanity(hc);
316#endif 244#endif
317} 245}
318 246
@@ -378,17 +306,87 @@ out_invalid_option:
378 return -1; 306 return -1;
379} 307}
380 308
381static void ccid2_hc_tx_kill_rto_timer(struct sock *sk) 309/**
310 * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
311 * This code is almost identical with TCP's tcp_rtt_estimator(), since
312 * - it has a higher sampling frequency (recommended by RFC 1323),
313 * - the RTO does not collapse into RTT due to RTTVAR going towards zero,
314 * - it is simple (cf. more complex proposals such as Eifel timer or research
315 * which suggests that the gain should be set according to window size),
316 * - in tests it was found to work well with CCID2 [gerrit].
317 */
318static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
382{ 319{
383 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 320 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
321 long m = mrtt ? : 1;
384 322
385 sk_stop_timer(sk, &hc->tx_rtotimer); 323 if (hc->tx_srtt == 0) {
386 ccid2_pr_debug("deleted RTO timer\n"); 324 /* First measurement m */
325 hc->tx_srtt = m << 3;
326 hc->tx_mdev = m << 1;
327
328 hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk));
329 hc->tx_rttvar = hc->tx_mdev_max;
330
331 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
332 } else {
333 /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */
334 m -= (hc->tx_srtt >> 3);
335 hc->tx_srtt += m;
336
337 /* Similarly, update scaled mdev with regard to |m| */
338 if (m < 0) {
339 m = -m;
340 m -= (hc->tx_mdev >> 2);
341 /*
342 * This neutralises RTO increase when RTT < SRTT - mdev
343 * (see P. Sarolahti, A. Kuznetsov,"Congestion Control
344 * in Linux TCP", USENIX 2002, pp. 49-62).
345 */
346 if (m > 0)
347 m >>= 3;
348 } else {
349 m -= (hc->tx_mdev >> 2);
350 }
351 hc->tx_mdev += m;
352
353 if (hc->tx_mdev > hc->tx_mdev_max) {
354 hc->tx_mdev_max = hc->tx_mdev;
355 if (hc->tx_mdev_max > hc->tx_rttvar)
356 hc->tx_rttvar = hc->tx_mdev_max;
357 }
358
359 /*
360 * Decay RTTVAR at most once per flight, exploiting that
361 * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2)
362 * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1)
363 * GAR is a useful bound for FlightSize = pipe.
364 * AWL is probably too low here, as it over-estimates pipe.
365 */
366 if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) {
367 if (hc->tx_mdev_max < hc->tx_rttvar)
368 hc->tx_rttvar -= (hc->tx_rttvar -
369 hc->tx_mdev_max) >> 2;
370 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
371 hc->tx_mdev_max = tcp_rto_min(sk);
372 }
373 }
374
375 /*
376 * Set RTO from SRTT and RTTVAR
377 * As in TCP, 4 * RTTVAR >= TCP_RTO_MIN, giving a minimum RTO of 200 ms.
378 * This agrees with RFC 4341, 5:
379 * "Because DCCP does not retransmit data, DCCP does not require
380 * TCP's recommended minimum timeout of one second".
381 */
382 hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar;
383
384 if (hc->tx_rto > DCCP_RTO_MAX)
385 hc->tx_rto = DCCP_RTO_MAX;
387} 386}
388 387
389static inline void ccid2_new_ack(struct sock *sk, 388static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
390 struct ccid2_seq *seqp, 389 unsigned int *maxincr)
391 unsigned int *maxincr)
392{ 390{
393 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 391 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
394 392
@@ -402,93 +400,27 @@ static inline void ccid2_new_ack(struct sock *sk,
402 hc->tx_cwnd += 1; 400 hc->tx_cwnd += 1;
403 hc->tx_packets_acked = 0; 401 hc->tx_packets_acked = 0;
404 } 402 }
405 403 /*
406 /* update RTO */ 404 * FIXME: RTT is sampled several times per acknowledgment (for each
407 if (hc->tx_srtt == -1 || 405 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
408 time_after(jiffies, hc->tx_lastrtt + hc->tx_srtt)) { 406 * This causes the RTT to be over-estimated, since the older entries
409 unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent; 407 * in the Ack Vector have earlier sending times.
410 int s; 408 * The cleanest solution is to not use the ccid2s_sent field at all
411 409 * and instead use DCCP timestamps: requires changes in other places.
412 /* first measurement */ 410 */
413 if (hc->tx_srtt == -1) { 411 ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent);
414 ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
415 r, jiffies,
416 (unsigned long long)seqp->ccid2s_seq);
417 ccid2_change_srtt(hc, r);
418 hc->tx_rttvar = r >> 1;
419 } else {
420 /* RTTVAR */
421 long tmp = hc->tx_srtt - r;
422 long srtt;
423
424 if (tmp < 0)
425 tmp *= -1;
426
427 tmp >>= 2;
428 hc->tx_rttvar *= 3;
429 hc->tx_rttvar >>= 2;
430 hc->tx_rttvar += tmp;
431
432 /* SRTT */
433 srtt = hc->tx_srtt;
434 srtt *= 7;
435 srtt >>= 3;
436 tmp = r >> 3;
437 srtt += tmp;
438 ccid2_change_srtt(hc, srtt);
439 }
440 s = hc->tx_rttvar << 2;
441 /* clock granularity is 1 when based on jiffies */
442 if (!s)
443 s = 1;
444 hc->tx_rto = hc->tx_srtt + s;
445
446 /* must be at least a second */
447 s = hc->tx_rto / HZ;
448 /* DCCP doesn't require this [but I like it cuz my code sux] */
449#if 1
450 if (s < 1)
451 hc->tx_rto = HZ;
452#endif
453 /* max 60 seconds */
454 if (s > 60)
455 hc->tx_rto = HZ * 60;
456
457 hc->tx_lastrtt = jiffies;
458
459 ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
460 hc->tx_srtt, hc->tx_rttvar,
461 hc->tx_rto, HZ, r);
462 }
463
464 /* we got a new ack, so re-start RTO timer */
465 ccid2_hc_tx_kill_rto_timer(sk);
466 ccid2_start_rto_timer(sk);
467}
468
469static void ccid2_hc_tx_dec_pipe(struct sock *sk)
470{
471 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
472
473 if (hc->tx_pipe == 0)
474 DCCP_BUG("pipe == 0");
475 else
476 hc->tx_pipe--;
477
478 if (hc->tx_pipe == 0)
479 ccid2_hc_tx_kill_rto_timer(sk);
480} 412}
481 413
482static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) 414static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
483{ 415{
484 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 416 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
485 417
486 if (time_before(seqp->ccid2s_sent, hc->tx_last_cong)) { 418 if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) {
487 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n"); 419 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
488 return; 420 return;
489 } 421 }
490 422
491 hc->tx_last_cong = jiffies; 423 hc->tx_last_cong = ccid2_time_stamp;
492 424
493 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; 425 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
494 hc->tx_ssthresh = max(hc->tx_cwnd, 2U); 426 hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
@@ -510,7 +442,6 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
510 int done = 0; 442 int done = 0;
511 unsigned int maxincr = 0; 443 unsigned int maxincr = 0;
512 444
513 ccid2_hc_tx_check_sanity(hc);
514 /* check reverse path congestion */ 445 /* check reverse path congestion */
515 seqno = DCCP_SKB_CB(skb)->dccpd_seq; 446 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
516 447
@@ -620,7 +551,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
620 seqp->ccid2s_acked = 1; 551 seqp->ccid2s_acked = 1;
621 ccid2_pr_debug("Got ack for %llu\n", 552 ccid2_pr_debug("Got ack for %llu\n",
622 (unsigned long long)seqp->ccid2s_seq); 553 (unsigned long long)seqp->ccid2s_seq);
623 ccid2_hc_tx_dec_pipe(sk); 554 hc->tx_pipe--;
624 } 555 }
625 if (seqp == hc->tx_seqt) { 556 if (seqp == hc->tx_seqt) {
626 done = 1; 557 done = 1;
@@ -677,7 +608,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
677 * one ack vector. 608 * one ack vector.
678 */ 609 */
679 ccid2_congestion_event(sk, seqp); 610 ccid2_congestion_event(sk, seqp);
680 ccid2_hc_tx_dec_pipe(sk); 611 hc->tx_pipe--;
681 } 612 }
682 if (seqp == hc->tx_seqt) 613 if (seqp == hc->tx_seqt)
683 break; 614 break;
@@ -695,7 +626,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
695 hc->tx_seqt = hc->tx_seqt->ccid2s_next; 626 hc->tx_seqt = hc->tx_seqt->ccid2s_next;
696 } 627 }
697 628
698 ccid2_hc_tx_check_sanity(hc); 629 /* restart RTO timer if not all outstanding data has been acked */
630 if (hc->tx_pipe == 0)
631 sk_stop_timer(sk, &hc->tx_rtotimer);
632 else
633 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
699} 634}
700 635
701static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) 636static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
@@ -707,12 +642,8 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
707 /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */ 642 /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
708 hc->tx_ssthresh = ~0U; 643 hc->tx_ssthresh = ~0U;
709 644
710 /* 645 /* Use larger initial windows (RFC 4341, section 5). */
711 * RFC 4341, 5: "The cwnd parameter is initialized to at most four 646 hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache);
712 * packets for new connections, following the rules from [RFC3390]".
713 * We need to convert the bytes of RFC3390 into the packets of RFC 4341.
714 */
715 hc->tx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U);
716 647
717 /* Make sure that Ack Ratio is enabled and within bounds. */ 648 /* Make sure that Ack Ratio is enabled and within bounds. */
718 max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2); 649 max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
@@ -723,15 +654,11 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
723 if (ccid2_hc_tx_alloc_seq(hc)) 654 if (ccid2_hc_tx_alloc_seq(hc))
724 return -ENOMEM; 655 return -ENOMEM;
725 656
726 hc->tx_rto = 3 * HZ; 657 hc->tx_rto = DCCP_TIMEOUT_INIT;
727 ccid2_change_srtt(hc, -1);
728 hc->tx_rttvar = -1;
729 hc->tx_rpdupack = -1; 658 hc->tx_rpdupack = -1;
730 hc->tx_last_cong = jiffies; 659 hc->tx_last_cong = ccid2_time_stamp;
731 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, 660 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
732 (unsigned long)sk); 661 (unsigned long)sk);
733
734 ccid2_hc_tx_check_sanity(hc);
735 return 0; 662 return 0;
736} 663}
737 664
@@ -740,7 +667,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
740 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 667 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
741 int i; 668 int i;
742 669
743 ccid2_hc_tx_kill_rto_timer(sk); 670 sk_stop_timer(sk, &hc->tx_rtotimer);
744 671
745 for (i = 0; i < hc->tx_seqbufc; i++) 672 for (i = 0; i < hc->tx_seqbufc; i++)
746 kfree(hc->tx_seqbuf[i]); 673 kfree(hc->tx_seqbuf[i]);
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 1ec6a30103bb..9731c2dc1487 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -18,18 +18,23 @@
18#ifndef _DCCP_CCID2_H_ 18#ifndef _DCCP_CCID2_H_
19#define _DCCP_CCID2_H_ 19#define _DCCP_CCID2_H_
20 20
21#include <linux/dccp.h>
22#include <linux/timer.h> 21#include <linux/timer.h>
23#include <linux/types.h> 22#include <linux/types.h>
24#include "../ccid.h" 23#include "../ccid.h"
24#include "../dccp.h"
25
26/*
27 * CCID-2 timestamping faces the same issues as TCP timestamping.
28 * Hence we reuse/share as much of the code as possible.
29 */
30#define ccid2_time_stamp tcp_time_stamp
31
25/* NUMDUPACK parameter from RFC 4341, p. 6 */ 32/* NUMDUPACK parameter from RFC 4341, p. 6 */
26#define NUMDUPACK 3 33#define NUMDUPACK 3
27 34
28struct sock;
29
30struct ccid2_seq { 35struct ccid2_seq {
31 u64 ccid2s_seq; 36 u64 ccid2s_seq;
32 unsigned long ccid2s_sent; 37 u32 ccid2s_sent;
33 int ccid2s_acked; 38 int ccid2s_acked;
34 struct ccid2_seq *ccid2s_prev; 39 struct ccid2_seq *ccid2s_prev;
35 struct ccid2_seq *ccid2s_next; 40 struct ccid2_seq *ccid2s_next;
@@ -42,7 +47,12 @@ struct ccid2_seq {
42 * struct ccid2_hc_tx_sock - CCID2 TX half connection 47 * struct ccid2_hc_tx_sock - CCID2 TX half connection
43 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5 48 * @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
44 * @tx_packets_acked: Ack counter for deriving cwnd growth (RFC 3465) 49 * @tx_packets_acked: Ack counter for deriving cwnd growth (RFC 3465)
45 * @tx_lastrtt: time RTT was last measured 50 * @tx_srtt: smoothed RTT estimate, scaled by 2^3
51 * @tx_mdev: smoothed RTT variation, scaled by 2^2
52 * @tx_mdev_max: maximum of @mdev during one flight
53 * @tx_rttvar: moving average/maximum of @mdev_max
54 * @tx_rto: RTO value deriving from SRTT and RTTVAR (RFC 2988)
55 * @tx_rtt_seq: to decay RTTVAR at most once per flight
46 * @tx_rpseq: last consecutive seqno 56 * @tx_rpseq: last consecutive seqno
47 * @tx_rpdupack: dupacks since rpseq 57 * @tx_rpdupack: dupacks since rpseq
48 */ 58 */
@@ -55,14 +65,19 @@ struct ccid2_hc_tx_sock {
55 int tx_seqbufc; 65 int tx_seqbufc;
56 struct ccid2_seq *tx_seqh; 66 struct ccid2_seq *tx_seqh;
57 struct ccid2_seq *tx_seqt; 67 struct ccid2_seq *tx_seqt;
58 long tx_rto; 68
59 long tx_srtt; 69 /* RTT measurement: variables/principles are the same as in TCP */
60 long tx_rttvar; 70 u32 tx_srtt,
61 unsigned long tx_lastrtt; 71 tx_mdev,
72 tx_mdev_max,
73 tx_rttvar,
74 tx_rto;
75 u64 tx_rtt_seq:48;
62 struct timer_list tx_rtotimer; 76 struct timer_list tx_rtotimer;
77
63 u64 tx_rpseq; 78 u64 tx_rpseq;
64 int tx_rpdupack; 79 int tx_rpdupack;
65 unsigned long tx_last_cong; 80 u32 tx_last_cong;
66 u64 tx_high_ack; 81 u64 tx_high_ack;
67}; 82};
68 83
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 95f752986497..c3f3a25bbd7a 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -54,7 +54,6 @@ static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
54 [TFRC_SSTATE_NO_SENT] = "NO_SENT", 54 [TFRC_SSTATE_NO_SENT] = "NO_SENT",
55 [TFRC_SSTATE_NO_FBACK] = "NO_FBACK", 55 [TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
56 [TFRC_SSTATE_FBACK] = "FBACK", 56 [TFRC_SSTATE_FBACK] = "FBACK",
57 [TFRC_SSTATE_TERM] = "TERM",
58 }; 57 };
59 58
60 return ccid3_state_names[state]; 59 return ccid3_state_names[state];
@@ -91,19 +90,16 @@ static inline u64 rfc3390_initial_rate(struct sock *sk)
91 return scaled_div(w_init << 6, hc->tx_rtt); 90 return scaled_div(w_init << 6, hc->tx_rtt);
92} 91}
93 92
94/* 93/**
95 * Recalculate t_ipi and delta (should be called whenever X changes) 94 * ccid3_update_send_interval - Calculate new t_ipi = s / X_inst
95 * This respects the granularity of X_inst (64 * bytes/second).
96 */ 96 */
97static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc) 97static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
98{ 98{
99 /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */
100 hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x); 99 hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x);
101 100
102 /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */ 101 ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
103 hc->tx_delta = min_t(u32, hc->tx_t_ipi / 2, TFRC_OPSYS_HALF_TIME_GRAN); 102 hc->tx_s, (unsigned)(hc->tx_x >> 6));
104
105 ccid3_pr_debug("t_ipi=%u, delta=%u, s=%u, X=%u\n", hc->tx_t_ipi,
106 hc->tx_delta, hc->tx_s, (unsigned)(hc->tx_x >> 6));
107} 103}
108 104
109static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now) 105static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
@@ -211,16 +207,19 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
211 ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk, 207 ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk,
212 ccid3_tx_state_name(hc->tx_state)); 208 ccid3_tx_state_name(hc->tx_state));
213 209
210 /* Ignore and do not restart after leaving the established state */
211 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
212 goto out;
213
214 /* Reset feedback state to "no feedback received" */
214 if (hc->tx_state == TFRC_SSTATE_FBACK) 215 if (hc->tx_state == TFRC_SSTATE_FBACK)
215 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); 216 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
216 else if (hc->tx_state != TFRC_SSTATE_NO_FBACK)
217 goto out;
218 217
219 /* 218 /*
220 * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4 219 * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4
220 * RTO is 0 if and only if no feedback has been received yet.
221 */ 221 */
222 if (hc->tx_t_rto == 0 || /* no feedback received yet */ 222 if (hc->tx_t_rto == 0 || hc->tx_p == 0) {
223 hc->tx_p == 0) {
224 223
225 /* halve send rate directly */ 224 /* halve send rate directly */
226 hc->tx_x = max(hc->tx_x / 2, 225 hc->tx_x = max(hc->tx_x / 2,
@@ -256,7 +255,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
256 * Set new timeout for the nofeedback timer. 255 * Set new timeout for the nofeedback timer.
257 * See comments in packet_recv() regarding the value of t_RTO. 256 * See comments in packet_recv() regarding the value of t_RTO.
258 */ 257 */
259 if (unlikely(hc->tx_t_rto == 0)) /* no feedback yet */ 258 if (unlikely(hc->tx_t_rto == 0)) /* no feedback received yet */
260 t_nfb = TFRC_INITIAL_TIMEOUT; 259 t_nfb = TFRC_INITIAL_TIMEOUT;
261 else 260 else
262 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi); 261 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
@@ -290,8 +289,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
290 if (unlikely(skb->len == 0)) 289 if (unlikely(skb->len == 0))
291 return -EBADMSG; 290 return -EBADMSG;
292 291
293 switch (hc->tx_state) { 292 if (hc->tx_state == TFRC_SSTATE_NO_SENT) {
294 case TFRC_SSTATE_NO_SENT:
295 sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies + 293 sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies +
296 usecs_to_jiffies(TFRC_INITIAL_TIMEOUT))); 294 usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
297 hc->tx_last_win_count = 0; 295 hc->tx_last_win_count = 0;
@@ -326,27 +324,22 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
326 ccid3_update_send_interval(hc); 324 ccid3_update_send_interval(hc);
327 325
328 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); 326 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
329 break; 327
330 case TFRC_SSTATE_NO_FBACK: 328 } else {
331 case TFRC_SSTATE_FBACK:
332 delay = ktime_us_delta(hc->tx_t_nom, now); 329 delay = ktime_us_delta(hc->tx_t_nom, now);
333 ccid3_pr_debug("delay=%ld\n", (long)delay); 330 ccid3_pr_debug("delay=%ld\n", (long)delay);
334 /* 331 /*
335 * Scheduling of packet transmissions [RFC 3448, 4.6] 332 * Scheduling of packet transmissions (RFC 5348, 8.3)
336 * 333 *
337 * if (t_now > t_nom - delta) 334 * if (t_now > t_nom - delta)
338 * // send the packet now 335 * // send the packet now
339 * else 336 * else
340 * // send the packet in (t_nom - t_now) milliseconds. 337 * // send the packet in (t_nom - t_now) milliseconds.
341 */ 338 */
342 if (delay - (s64)hc->tx_delta >= 1000) 339 if (delay >= TFRC_T_DELTA)
343 return (u32)delay / 1000L; 340 return (u32)delay / USEC_PER_MSEC;
344 341
345 ccid3_hc_tx_update_win_count(hc, now); 342 ccid3_hc_tx_update_win_count(hc, now);
346 break;
347 case TFRC_SSTATE_TERM:
348 DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
349 return -EINVAL;
350 } 343 }
351 344
352 /* prepare to send now (add options etc.) */ 345 /* prepare to send now (add options etc.) */
@@ -372,48 +365,34 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
372static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) 365static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
373{ 366{
374 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); 367 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
375 struct ccid3_options_received *opt_recv; 368 struct tfrc_tx_hist_entry *acked;
376 ktime_t now; 369 ktime_t now;
377 unsigned long t_nfb; 370 unsigned long t_nfb;
378 u32 pinv, r_sample; 371 u32 r_sample;
379 372
380 /* we are only interested in ACKs */ 373 /* we are only interested in ACKs */
381 if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK || 374 if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
382 DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK)) 375 DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
383 return; 376 return;
384 /* ... and only in the established state */
385 if (hc->tx_state != TFRC_SSTATE_FBACK &&
386 hc->tx_state != TFRC_SSTATE_NO_FBACK)
387 return;
388
389 opt_recv = &hc->tx_options_received;
390 now = ktime_get_real();
391
392 /* Estimate RTT from history if ACK number is valid */
393 r_sample = tfrc_tx_hist_rtt(hc->tx_hist,
394 DCCP_SKB_CB(skb)->dccpd_ack_seq, now);
395 if (r_sample == 0) {
396 DCCP_WARN("%s(%p): %s with bogus ACK-%llu\n", dccp_role(sk), sk,
397 dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type),
398 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq);
399 return;
400 }
401
402 /* Update receive rate in units of 64 * bytes/second */
403 hc->tx_x_recv = opt_recv->ccid3or_receive_rate;
404 hc->tx_x_recv <<= 6;
405
406 /* Update loss event rate (which is scaled by 1e6) */
407 pinv = opt_recv->ccid3or_loss_event_rate;
408 if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */
409 hc->tx_p = 0;
410 else /* can not exceed 100% */
411 hc->tx_p = scaled_div(1, pinv);
412 /* 377 /*
413 * Validate new RTT sample and update moving average 378 * Locate the acknowledged packet in the TX history.
379 *
380 * Returning "entry not found" here can for instance happen when
381 * - the host has not sent out anything (e.g. a passive server),
382 * - the Ack is outdated (packet with higher Ack number was received),
383 * - it is a bogus Ack (for a packet not sent on this connection).
414 */ 384 */
415 r_sample = dccp_sample_rtt(sk, r_sample); 385 acked = tfrc_tx_hist_find_entry(hc->tx_hist, dccp_hdr_ack_seq(skb));
386 if (acked == NULL)
387 return;
388 /* For the sake of RTT sampling, ignore/remove all older entries */
389 tfrc_tx_hist_purge(&acked->next);
390
391 /* Update the moving average for the RTT estimate (RFC 3448, 4.3) */
392 now = ktime_get_real();
393 r_sample = dccp_sample_rtt(sk, ktime_us_delta(now, acked->stamp));
416 hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9); 394 hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9);
395
417 /* 396 /*
418 * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3 397 * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3
419 */ 398 */
@@ -461,13 +440,12 @@ done_computing_x:
461 sk->sk_write_space(sk); 440 sk->sk_write_space(sk);
462 441
463 /* 442 /*
464 * Update timeout interval for the nofeedback timer. 443 * Update timeout interval for the nofeedback timer. In order to control
465 * We use a configuration option to increase the lower bound. 444 * rate halving on networks with very low RTTs (<= 1 ms), use per-route
466 * This can help avoid triggering the nofeedback timer too 445 * tunable RTAX_RTO_MIN value as the lower bound.
467 * often ('spinning') on LANs with small RTTs.
468 */ 446 */
469 hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt, (CONFIG_IP_DCCP_CCID3_RTO * 447 hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt,
470 (USEC_PER_SEC / 1000))); 448 USEC_PER_SEC/HZ * tcp_rto_min(sk));
471 /* 449 /*
472 * Schedule no feedback timer to expire in 450 * Schedule no feedback timer to expire in
473 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) 451 * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi)
@@ -482,66 +460,41 @@ done_computing_x:
482 jiffies + usecs_to_jiffies(t_nfb)); 460 jiffies + usecs_to_jiffies(t_nfb));
483} 461}
484 462
485static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option, 463static int ccid3_hc_tx_parse_options(struct sock *sk, u8 packet_type,
486 unsigned char len, u16 idx, 464 u8 option, u8 *optval, u8 optlen)
487 unsigned char *value)
488{ 465{
489 int rc = 0;
490 const struct dccp_sock *dp = dccp_sk(sk);
491 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); 466 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
492 struct ccid3_options_received *opt_recv;
493 __be32 opt_val; 467 __be32 opt_val;
494 468
495 opt_recv = &hc->tx_options_received;
496
497 if (opt_recv->ccid3or_seqno != dp->dccps_gsr) {
498 opt_recv->ccid3or_seqno = dp->dccps_gsr;
499 opt_recv->ccid3or_loss_event_rate = ~0;
500 opt_recv->ccid3or_loss_intervals_idx = 0;
501 opt_recv->ccid3or_loss_intervals_len = 0;
502 opt_recv->ccid3or_receive_rate = 0;
503 }
504
505 switch (option) { 469 switch (option) {
470 case TFRC_OPT_RECEIVE_RATE:
506 case TFRC_OPT_LOSS_EVENT_RATE: 471 case TFRC_OPT_LOSS_EVENT_RATE:
507 if (unlikely(len != 4)) { 472 /* Must be ignored on Data packets, cf. RFC 4342 8.3 and 8.5 */
508 DCCP_WARN("%s(%p), invalid len %d " 473 if (packet_type == DCCP_PKT_DATA)
509 "for TFRC_OPT_LOSS_EVENT_RATE\n", 474 break;
510 dccp_role(sk), sk, len); 475 if (unlikely(optlen != 4)) {
511 rc = -EINVAL; 476 DCCP_WARN("%s(%p), invalid len %d for %u\n",
512 } else { 477 dccp_role(sk), sk, optlen, option);
513 opt_val = get_unaligned((__be32 *)value); 478 return -EINVAL;
514 opt_recv->ccid3or_loss_event_rate = ntohl(opt_val);
515 ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
516 dccp_role(sk), sk,
517 opt_recv->ccid3or_loss_event_rate);
518 } 479 }
519 break; 480 opt_val = ntohl(get_unaligned((__be32 *)optval));
520 case TFRC_OPT_LOSS_INTERVALS: 481
521 opt_recv->ccid3or_loss_intervals_idx = idx; 482 if (option == TFRC_OPT_RECEIVE_RATE) {
522 opt_recv->ccid3or_loss_intervals_len = len; 483 /* Receive Rate is kept in units of 64 bytes/second */
523 ccid3_pr_debug("%s(%p), LOSS_INTERVALS=(%u, %u)\n", 484 hc->tx_x_recv = opt_val;
524 dccp_role(sk), sk, 485 hc->tx_x_recv <<= 6;
525 opt_recv->ccid3or_loss_intervals_idx, 486
526 opt_recv->ccid3or_loss_intervals_len);
527 break;
528 case TFRC_OPT_RECEIVE_RATE:
529 if (unlikely(len != 4)) {
530 DCCP_WARN("%s(%p), invalid len %d "
531 "for TFRC_OPT_RECEIVE_RATE\n",
532 dccp_role(sk), sk, len);
533 rc = -EINVAL;
534 } else {
535 opt_val = get_unaligned((__be32 *)value);
536 opt_recv->ccid3or_receive_rate = ntohl(opt_val);
537 ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n", 487 ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
538 dccp_role(sk), sk, 488 dccp_role(sk), sk, opt_val);
539 opt_recv->ccid3or_receive_rate); 489 } else {
490 /* Update the fixpoint Loss Event Rate fraction */
491 hc->tx_p = tfrc_invert_loss_event_rate(opt_val);
492
493 ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
494 dccp_role(sk), sk, opt_val);
540 } 495 }
541 break;
542 } 496 }
543 497 return 0;
544 return rc;
545} 498}
546 499
547static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) 500static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
@@ -559,42 +512,36 @@ static void ccid3_hc_tx_exit(struct sock *sk)
559{ 512{
560 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); 513 struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
561 514
562 ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
563 sk_stop_timer(sk, &hc->tx_no_feedback_timer); 515 sk_stop_timer(sk, &hc->tx_no_feedback_timer);
564
565 tfrc_tx_hist_purge(&hc->tx_hist); 516 tfrc_tx_hist_purge(&hc->tx_hist);
566} 517}
567 518
568static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) 519static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
569{ 520{
570 struct ccid3_hc_tx_sock *hc; 521 info->tcpi_rto = ccid3_hc_tx_sk(sk)->tx_t_rto;
571 522 info->tcpi_rtt = ccid3_hc_tx_sk(sk)->tx_rtt;
572 /* Listen socks doesn't have a private CCID block */
573 if (sk->sk_state == DCCP_LISTEN)
574 return;
575
576 hc = ccid3_hc_tx_sk(sk);
577 info->tcpi_rto = hc->tx_t_rto;
578 info->tcpi_rtt = hc->tx_rtt;
579} 523}
580 524
581static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, 525static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
582 u32 __user *optval, int __user *optlen) 526 u32 __user *optval, int __user *optlen)
583{ 527{
584 const struct ccid3_hc_tx_sock *hc; 528 const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
529 struct tfrc_tx_info tfrc;
585 const void *val; 530 const void *val;
586 531
587 /* Listen socks doesn't have a private CCID block */
588 if (sk->sk_state == DCCP_LISTEN)
589 return -EINVAL;
590
591 hc = ccid3_hc_tx_sk(sk);
592 switch (optname) { 532 switch (optname) {
593 case DCCP_SOCKOPT_CCID_TX_INFO: 533 case DCCP_SOCKOPT_CCID_TX_INFO:
594 if (len < sizeof(hc->tx_tfrc)) 534 if (len < sizeof(tfrc))
595 return -EINVAL; 535 return -EINVAL;
596 len = sizeof(hc->tx_tfrc); 536 tfrc.tfrctx_x = hc->tx_x;
597 val = &hc->tx_tfrc; 537 tfrc.tfrctx_x_recv = hc->tx_x_recv;
538 tfrc.tfrctx_x_calc = hc->tx_x_calc;
539 tfrc.tfrctx_rtt = hc->tx_rtt;
540 tfrc.tfrctx_p = hc->tx_p;
541 tfrc.tfrctx_rto = hc->tx_t_rto;
542 tfrc.tfrctx_ipi = hc->tx_t_ipi;
543 len = sizeof(tfrc);
544 val = &tfrc;
598 break; 545 break;
599 default: 546 default:
600 return -ENOPROTOOPT; 547 return -ENOPROTOOPT;
@@ -624,7 +571,6 @@ static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
624 static const char *const ccid3_rx_state_names[] = { 571 static const char *const ccid3_rx_state_names[] = {
625 [TFRC_RSTATE_NO_DATA] = "NO_DATA", 572 [TFRC_RSTATE_NO_DATA] = "NO_DATA",
626 [TFRC_RSTATE_DATA] = "DATA", 573 [TFRC_RSTATE_DATA] = "DATA",
627 [TFRC_RSTATE_TERM] = "TERM",
628 }; 574 };
629 575
630 return ccid3_rx_state_names[state]; 576 return ccid3_rx_state_names[state];
@@ -650,14 +596,9 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
650{ 596{
651 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); 597 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
652 struct dccp_sock *dp = dccp_sk(sk); 598 struct dccp_sock *dp = dccp_sk(sk);
653 ktime_t now; 599 ktime_t now = ktime_get_real();
654 s64 delta = 0; 600 s64 delta = 0;
655 601
656 if (unlikely(hc->rx_state == TFRC_RSTATE_TERM))
657 return;
658
659 now = ktime_get_real();
660
661 switch (fbtype) { 602 switch (fbtype) {
662 case CCID3_FBACK_INITIAL: 603 case CCID3_FBACK_INITIAL:
663 hc->rx_x_recv = 0; 604 hc->rx_x_recv = 0;
@@ -701,14 +642,12 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
701 642
702static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) 643static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
703{ 644{
704 const struct ccid3_hc_rx_sock *hc; 645 const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
705 __be32 x_recv, pinv; 646 __be32 x_recv, pinv;
706 647
707 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) 648 if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
708 return 0; 649 return 0;
709 650
710 hc = ccid3_hc_rx_sk(sk);
711
712 if (dccp_packet_without_ack(skb)) 651 if (dccp_packet_without_ack(skb))
713 return 0; 652 return 0;
714 653
@@ -749,10 +688,11 @@ static u32 ccid3_first_li(struct sock *sk)
749 x_recv = scaled_div32(hc->rx_bytes_recv, delta); 688 x_recv = scaled_div32(hc->rx_bytes_recv, delta);
750 if (x_recv == 0) { /* would also trigger divide-by-zero */ 689 if (x_recv == 0) { /* would also trigger divide-by-zero */
751 DCCP_WARN("X_recv==0\n"); 690 DCCP_WARN("X_recv==0\n");
752 if ((x_recv = hc->rx_x_recv) == 0) { 691 if (hc->rx_x_recv == 0) {
753 DCCP_BUG("stored value of X_recv is zero"); 692 DCCP_BUG("stored value of X_recv is zero");
754 return ~0U; 693 return ~0U;
755 } 694 }
695 x_recv = hc->rx_x_recv;
756 } 696 }
757 697
758 fval = scaled_div(hc->rx_s, hc->rx_rtt); 698 fval = scaled_div(hc->rx_s, hc->rx_rtt);
@@ -862,46 +802,31 @@ static void ccid3_hc_rx_exit(struct sock *sk)
862{ 802{
863 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); 803 struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
864 804
865 ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
866
867 tfrc_rx_hist_purge(&hc->rx_hist); 805 tfrc_rx_hist_purge(&hc->rx_hist);
868 tfrc_lh_cleanup(&hc->rx_li_hist); 806 tfrc_lh_cleanup(&hc->rx_li_hist);
869} 807}
870 808
871static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) 809static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
872{ 810{
873 const struct ccid3_hc_rx_sock *hc; 811 info->tcpi_ca_state = ccid3_hc_rx_sk(sk)->rx_state;
874
875 /* Listen socks doesn't have a private CCID block */
876 if (sk->sk_state == DCCP_LISTEN)
877 return;
878
879 hc = ccid3_hc_rx_sk(sk);
880 info->tcpi_ca_state = hc->rx_state;
881 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 812 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
882 info->tcpi_rcv_rtt = hc->rx_rtt; 813 info->tcpi_rcv_rtt = ccid3_hc_rx_sk(sk)->rx_rtt;
883} 814}
884 815
885static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, 816static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
886 u32 __user *optval, int __user *optlen) 817 u32 __user *optval, int __user *optlen)
887{ 818{
888 const struct ccid3_hc_rx_sock *hc; 819 const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
889 struct tfrc_rx_info rx_info; 820 struct tfrc_rx_info rx_info;
890 const void *val; 821 const void *val;
891 822
892 /* Listen socks doesn't have a private CCID block */
893 if (sk->sk_state == DCCP_LISTEN)
894 return -EINVAL;
895
896 hc = ccid3_hc_rx_sk(sk);
897 switch (optname) { 823 switch (optname) {
898 case DCCP_SOCKOPT_CCID_RX_INFO: 824 case DCCP_SOCKOPT_CCID_RX_INFO:
899 if (len < sizeof(rx_info)) 825 if (len < sizeof(rx_info))
900 return -EINVAL; 826 return -EINVAL;
901 rx_info.tfrcrx_x_recv = hc->rx_x_recv; 827 rx_info.tfrcrx_x_recv = hc->rx_x_recv;
902 rx_info.tfrcrx_rtt = hc->rx_rtt; 828 rx_info.tfrcrx_rtt = hc->rx_rtt;
903 rx_info.tfrcrx_p = hc->rx_pinv == 0 ? ~0U : 829 rx_info.tfrcrx_p = tfrc_invert_loss_event_rate(hc->rx_pinv);
904 scaled_div(1, hc->rx_pinv);
905 len = sizeof(rx_info); 830 len = sizeof(rx_info);
906 val = &rx_info; 831 val = &rx_info;
907 break; 832 break;
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 032635776653..1a9933c29672 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -42,35 +42,36 @@
42#include "lib/tfrc.h" 42#include "lib/tfrc.h"
43#include "../ccid.h" 43#include "../ccid.h"
44 44
45/* Two seconds as per RFC 3448 4.2 */ 45/* Two seconds as per RFC 5348, 4.2 */
46#define TFRC_INITIAL_TIMEOUT (2 * USEC_PER_SEC) 46#define TFRC_INITIAL_TIMEOUT (2 * USEC_PER_SEC)
47 47
48/* In usecs - half the scheduling granularity as per RFC3448 4.6 */
49#define TFRC_OPSYS_HALF_TIME_GRAN (USEC_PER_SEC / (2 * HZ))
50
51/* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */ 48/* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */
52#define TFRC_T_MBI 64 49#define TFRC_T_MBI 64
53 50
51/*
52 * The t_delta parameter (RFC 5348, 8.3): delays of less than %USEC_PER_MSEC are
53 * rounded down to 0, since sk_reset_timer() here uses millisecond granularity.
54 * Hence we can use a constant t_delta = %USEC_PER_MSEC when HZ >= 500. A coarse
55 * resolution of HZ < 500 means that the error is below one timer tick (t_gran)
56 * when using the constant t_delta = t_gran / 2 = %USEC_PER_SEC / (2 * HZ).
57 */
58#if (HZ >= 500)
59# define TFRC_T_DELTA USEC_PER_MSEC
60#else
61# define TFRC_T_DELTA (USEC_PER_SEC / (2 * HZ))
62#endif
63
54enum ccid3_options { 64enum ccid3_options {
55 TFRC_OPT_LOSS_EVENT_RATE = 192, 65 TFRC_OPT_LOSS_EVENT_RATE = 192,
56 TFRC_OPT_LOSS_INTERVALS = 193, 66 TFRC_OPT_LOSS_INTERVALS = 193,
57 TFRC_OPT_RECEIVE_RATE = 194, 67 TFRC_OPT_RECEIVE_RATE = 194,
58}; 68};
59 69
60struct ccid3_options_received {
61 u64 ccid3or_seqno:48,
62 ccid3or_loss_intervals_idx:16;
63 u16 ccid3or_loss_intervals_len;
64 u32 ccid3or_loss_event_rate;
65 u32 ccid3or_receive_rate;
66};
67
68/* TFRC sender states */ 70/* TFRC sender states */
69enum ccid3_hc_tx_states { 71enum ccid3_hc_tx_states {
70 TFRC_SSTATE_NO_SENT = 1, 72 TFRC_SSTATE_NO_SENT = 1,
71 TFRC_SSTATE_NO_FBACK, 73 TFRC_SSTATE_NO_FBACK,
72 TFRC_SSTATE_FBACK, 74 TFRC_SSTATE_FBACK,
73 TFRC_SSTATE_TERM,
74}; 75};
75 76
76/** 77/**
@@ -90,19 +91,16 @@ enum ccid3_hc_tx_states {
90 * @tx_no_feedback_timer: Handle to no feedback timer 91 * @tx_no_feedback_timer: Handle to no feedback timer
91 * @tx_t_ld: Time last doubled during slow start 92 * @tx_t_ld: Time last doubled during slow start
92 * @tx_t_nom: Nominal send time of next packet 93 * @tx_t_nom: Nominal send time of next packet
93 * @tx_delta: Send timer delta (RFC 3448, 4.6) in usecs
94 * @tx_hist: Packet history 94 * @tx_hist: Packet history
95 * @tx_options_received: Parsed set of retrieved options
96 */ 95 */
97struct ccid3_hc_tx_sock { 96struct ccid3_hc_tx_sock {
98 struct tfrc_tx_info tx_tfrc; 97 u64 tx_x;
99#define tx_x tx_tfrc.tfrctx_x 98 u64 tx_x_recv;
100#define tx_x_recv tx_tfrc.tfrctx_x_recv 99 u32 tx_x_calc;
101#define tx_x_calc tx_tfrc.tfrctx_x_calc 100 u32 tx_rtt;
102#define tx_rtt tx_tfrc.tfrctx_rtt 101 u32 tx_p;
103#define tx_p tx_tfrc.tfrctx_p 102 u32 tx_t_rto;
104#define tx_t_rto tx_tfrc.tfrctx_rto 103 u32 tx_t_ipi;
105#define tx_t_ipi tx_tfrc.tfrctx_ipi
106 u16 tx_s; 104 u16 tx_s;
107 enum ccid3_hc_tx_states tx_state:8; 105 enum ccid3_hc_tx_states tx_state:8;
108 u8 tx_last_win_count; 106 u8 tx_last_win_count;
@@ -110,9 +108,7 @@ struct ccid3_hc_tx_sock {
110 struct timer_list tx_no_feedback_timer; 108 struct timer_list tx_no_feedback_timer;
111 ktime_t tx_t_ld; 109 ktime_t tx_t_ld;
112 ktime_t tx_t_nom; 110 ktime_t tx_t_nom;
113 u32 tx_delta;
114 struct tfrc_tx_hist_entry *tx_hist; 111 struct tfrc_tx_hist_entry *tx_hist;
115 struct ccid3_options_received tx_options_received;
116}; 112};
117 113
118static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk) 114static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
@@ -126,21 +122,16 @@ static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
126enum ccid3_hc_rx_states { 122enum ccid3_hc_rx_states {
127 TFRC_RSTATE_NO_DATA = 1, 123 TFRC_RSTATE_NO_DATA = 1,
128 TFRC_RSTATE_DATA, 124 TFRC_RSTATE_DATA,
129 TFRC_RSTATE_TERM = 127,
130}; 125};
131 126
132/** 127/**
133 * struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket 128 * struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket
134 * @rx_x_recv: Receiver estimate of send rate (RFC 3448 4.3)
135 * @rx_rtt: Receiver estimate of rtt (non-standard)
136 * @rx_p: Current loss event rate (RFC 3448 5.4)
137 * @rx_last_counter: Tracks window counter (RFC 4342, 8.1) 129 * @rx_last_counter: Tracks window counter (RFC 4342, 8.1)
138 * @rx_state: Receiver state, one of %ccid3_hc_rx_states 130 * @rx_state: Receiver state, one of %ccid3_hc_rx_states
139 * @rx_bytes_recv: Total sum of DCCP payload bytes 131 * @rx_bytes_recv: Total sum of DCCP payload bytes
140 * @rx_x_recv: Receiver estimate of send rate (RFC 3448, sec. 4.3) 132 * @rx_x_recv: Receiver estimate of send rate (RFC 3448, sec. 4.3)
141 * @rx_rtt: Receiver estimate of RTT 133 * @rx_rtt: Receiver estimate of RTT
142 * @rx_tstamp_last_feedback: Time at which last feedback was sent 134 * @rx_tstamp_last_feedback: Time at which last feedback was sent
143 * @rx_tstamp_last_ack: Time at which last feedback was sent
144 * @rx_hist: Packet history (loss detection + RTT sampling) 135 * @rx_hist: Packet history (loss detection + RTT sampling)
145 * @rx_li_hist: Loss Interval database 136 * @rx_li_hist: Loss Interval database
146 * @rx_s: Received packet size in bytes 137 * @rx_s: Received packet size in bytes
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 8fc3cbf79071..497723c4d4bb 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -116,7 +116,7 @@ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb)
116 cur->li_length = len; 116 cur->li_length = len;
117 tfrc_lh_calc_i_mean(lh); 117 tfrc_lh_calc_i_mean(lh);
118 118
119 return (lh->i_mean < old_i_mean); 119 return lh->i_mean < old_i_mean;
120} 120}
121 121
122/* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */ 122/* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index 3a4f414e94a0..de8fe294bf0b 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -38,18 +38,6 @@
38#include "packet_history.h" 38#include "packet_history.h"
39#include "../../dccp.h" 39#include "../../dccp.h"
40 40
41/**
42 * tfrc_tx_hist_entry - Simple singly-linked TX history list
43 * @next: next oldest entry (LIFO order)
44 * @seqno: sequence number of this entry
45 * @stamp: send time of packet with sequence number @seqno
46 */
47struct tfrc_tx_hist_entry {
48 struct tfrc_tx_hist_entry *next;
49 u64 seqno;
50 ktime_t stamp;
51};
52
53/* 41/*
54 * Transmitter History Routines 42 * Transmitter History Routines
55 */ 43 */
@@ -71,15 +59,6 @@ void tfrc_tx_packet_history_exit(void)
71 } 59 }
72} 60}
73 61
74static struct tfrc_tx_hist_entry *
75 tfrc_tx_hist_find_entry(struct tfrc_tx_hist_entry *head, u64 seqno)
76{
77 while (head != NULL && head->seqno != seqno)
78 head = head->next;
79
80 return head;
81}
82
83int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno) 62int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno)
84{ 63{
85 struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any()); 64 struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any());
@@ -107,24 +86,6 @@ void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp)
107 *headp = NULL; 86 *headp = NULL;
108} 87}
109 88
110u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno,
111 const ktime_t now)
112{
113 u32 rtt = 0;
114 struct tfrc_tx_hist_entry *packet = tfrc_tx_hist_find_entry(head, seqno);
115
116 if (packet != NULL) {
117 rtt = ktime_us_delta(now, packet->stamp);
118 /*
119 * Garbage-collect older (irrelevant) entries:
120 */
121 tfrc_tx_hist_purge(&packet->next);
122 }
123
124 return rtt;
125}
126
127
128/* 89/*
129 * Receiver History Routines 90 * Receiver History Routines
130 */ 91 */
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 7df6c5299999..7ee4a9d9d335 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -40,12 +40,28 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include "tfrc.h" 41#include "tfrc.h"
42 42
43struct tfrc_tx_hist_entry; 43/**
44 * tfrc_tx_hist_entry - Simple singly-linked TX history list
45 * @next: next oldest entry (LIFO order)
46 * @seqno: sequence number of this entry
47 * @stamp: send time of packet with sequence number @seqno
48 */
49struct tfrc_tx_hist_entry {
50 struct tfrc_tx_hist_entry *next;
51 u64 seqno;
52 ktime_t stamp;
53};
54
55static inline struct tfrc_tx_hist_entry *
56 tfrc_tx_hist_find_entry(struct tfrc_tx_hist_entry *head, u64 seqno)
57{
58 while (head != NULL && head->seqno != seqno)
59 head = head->next;
60 return head;
61}
44 62
45extern int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno); 63extern int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
46extern void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp); 64extern void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp);
47extern u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head,
48 const u64 seqno, const ktime_t now);
49 65
50/* Subtraction a-b modulo-16, respects circular wrap-around */ 66/* Subtraction a-b modulo-16, respects circular wrap-around */
51#define SUB16(a, b) (((a) + 16 - (b)) & 0xF) 67#define SUB16(a, b) (((a) + 16 - (b)) & 0xF)
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index 01bb48e96c2e..f8ee3f549770 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -57,6 +57,7 @@ static inline u32 tfrc_ewma(const u32 avg, const u32 newval, const u8 weight)
57 57
58extern u32 tfrc_calc_x(u16 s, u32 R, u32 p); 58extern u32 tfrc_calc_x(u16 s, u32 R, u32 p);
59extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue); 59extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
60extern u32 tfrc_invert_loss_event_rate(u32 loss_event_rate);
60 61
61extern int tfrc_tx_packet_history_init(void); 62extern int tfrc_tx_packet_history_init(void);
62extern void tfrc_tx_packet_history_exit(void); 63extern void tfrc_tx_packet_history_exit(void);
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index 22ca1cf0eb55..a052a4377e26 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -687,3 +687,17 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
687 index = tfrc_binsearch(fvalue, 0); 687 index = tfrc_binsearch(fvalue, 0);
688 return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE; 688 return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE;
689} 689}
690
691/**
692 * tfrc_invert_loss_event_rate - Compute p so that 10^6 corresponds to 100%
693 * When @loss_event_rate is large, there is a chance that p is truncated to 0.
694 * To avoid re-entering slow-start in that case, we set p = TFRC_SMALLEST_P > 0.
695 */
696u32 tfrc_invert_loss_event_rate(u32 loss_event_rate)
697{
698 if (loss_event_rate == UINT_MAX) /* see RFC 4342, 8.5 */
699 return 0;
700 if (unlikely(loss_event_rate == 0)) /* map 1/0 into 100% */
701 return 1000000;
702 return max_t(u32, scaled_div(1, loss_event_rate), TFRC_SMALLEST_P);
703}
diff --git a/net/dccp/options.c b/net/dccp/options.c
index bfda087bd90d..92718511eac5 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -96,18 +96,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
96 } 96 }
97 97
98 /* 98 /*
99 * CCID-Specific Options (from RFC 4340, sec. 10.3):
100 *
101 * Option numbers 128 through 191 are for options sent from the
102 * HC-Sender to the HC-Receiver; option numbers 192 through 255
103 * are for options sent from the HC-Receiver to the HC-Sender.
104 *
105 * CCID-specific options are ignored during connection setup, as 99 * CCID-specific options are ignored during connection setup, as
106 * negotiation may still be in progress (see RFC 4340, 10.3). 100 * negotiation may still be in progress (see RFC 4340, 10.3).
107 * The same applies to Ack Vectors, as these depend on the CCID. 101 * The same applies to Ack Vectors, as these depend on the CCID.
108 *
109 */ 102 */
110 if (dreq != NULL && (opt >= 128 || 103 if (dreq != NULL && (opt >= DCCPO_MIN_RX_CCID_SPECIFIC ||
111 opt == DCCPO_ACK_VECTOR_0 || opt == DCCPO_ACK_VECTOR_1)) 104 opt == DCCPO_ACK_VECTOR_0 || opt == DCCPO_ACK_VECTOR_1))
112 goto ignore_option; 105 goto ignore_option;
113 106
@@ -226,23 +219,15 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
226 dccp_pr_debug("%s rx opt: ELAPSED_TIME=%d\n", 219 dccp_pr_debug("%s rx opt: ELAPSED_TIME=%d\n",
227 dccp_role(sk), elapsed_time); 220 dccp_role(sk), elapsed_time);
228 break; 221 break;
229 case 128 ... 191: { 222 case DCCPO_MIN_RX_CCID_SPECIFIC ... DCCPO_MAX_RX_CCID_SPECIFIC:
230 const u16 idx = value - options;
231
232 if (ccid_hc_rx_parse_options(dp->dccps_hc_rx_ccid, sk, 223 if (ccid_hc_rx_parse_options(dp->dccps_hc_rx_ccid, sk,
233 opt, len, idx, 224 pkt_type, opt, value, len))
234 value) != 0)
235 goto out_invalid_option; 225 goto out_invalid_option;
236 }
237 break; 226 break;
238 case 192 ... 255: { 227 case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC:
239 const u16 idx = value - options;
240
241 if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk, 228 if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk,
242 opt, len, idx, 229 pkt_type, opt, value, len))
243 value) != 0)
244 goto out_invalid_option; 230 goto out_invalid_option;
245 }
246 break; 231 break;
247 default: 232 default:
248 DCCP_CRIT("DCCP(%p): option %d(len=%d) not " 233 DCCP_CRIT("DCCP(%p): option %d(len=%d) not "
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index baeb1eaf011b..2ef115277bea 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -693,22 +693,22 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
693 aux = scp->accessdata.acc_userl; 693 aux = scp->accessdata.acc_userl;
694 *skb_put(skb, 1) = aux; 694 *skb_put(skb, 1) = aux;
695 if (aux > 0) 695 if (aux > 0)
696 memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux); 696 memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux);
697 697
698 aux = scp->accessdata.acc_passl; 698 aux = scp->accessdata.acc_passl;
699 *skb_put(skb, 1) = aux; 699 *skb_put(skb, 1) = aux;
700 if (aux > 0) 700 if (aux > 0)
701 memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux); 701 memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux);
702 702
703 aux = scp->accessdata.acc_accl; 703 aux = scp->accessdata.acc_accl;
704 *skb_put(skb, 1) = aux; 704 *skb_put(skb, 1) = aux;
705 if (aux > 0) 705 if (aux > 0)
706 memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux); 706 memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux);
707 707
708 aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl); 708 aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
709 *skb_put(skb, 1) = aux; 709 *skb_put(skb, 1) = aux;
710 if (aux > 0) 710 if (aux > 0)
711 memcpy(skb_put(skb,aux), scp->conndata_out.opt_data, aux); 711 memcpy(skb_put(skb, aux), scp->conndata_out.opt_data, aux);
712 712
713 scp->persist = dn_nsp_persist(sk); 713 scp->persist = dn_nsp_persist(sk);
714 scp->persist_fxn = dn_nsp_retrans_conninit; 714 scp->persist_fxn = dn_nsp_retrans_conninit;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index dc54bd0d083b..f8c1ae4b41f0 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -392,7 +392,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
392 dev_queue_xmit(skb); 392 dev_queue_xmit(skb);
393 dev_put(dev); 393 dev_put(dev);
394 mutex_unlock(&econet_mutex); 394 mutex_unlock(&econet_mutex);
395 return(len); 395 return len;
396 396
397 out_free: 397 out_free:
398 kfree_skb(skb); 398 kfree_skb(skb);
@@ -637,7 +637,7 @@ static int econet_create(struct net *net, struct socket *sock, int protocol,
637 eo->num = protocol; 637 eo->num = protocol;
638 638
639 econet_insert_socket(&econet_sklist, sk); 639 econet_insert_socket(&econet_sklist, sk);
640 return(0); 640 return 0;
641out: 641out:
642 return err; 642 return err;
643} 643}
@@ -1009,7 +1009,6 @@ static int __init aun_udp_initialise(void)
1009 struct sockaddr_in sin; 1009 struct sockaddr_in sin;
1010 1010
1011 skb_queue_head_init(&aun_queue); 1011 skb_queue_head_init(&aun_queue);
1012 spin_lock_init(&aun_queue_lock);
1013 setup_timer(&ab_cleanup_timer, ab_cleanup, 0); 1012 setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
1014 ab_cleanup_timer.expires = jiffies + (HZ*2); 1013 ab_cleanup_timer.expires = jiffies + (HZ*2);
1015 add_timer(&ab_cleanup_timer); 1014 add_timer(&ab_cleanup_timer);
@@ -1167,7 +1166,6 @@ static int __init econet_proto_init(void)
1167 goto out; 1166 goto out;
1168 sock_register(&econet_family_ops); 1167 sock_register(&econet_family_ops);
1169#ifdef CONFIG_ECONET_AUNUDP 1168#ifdef CONFIG_ECONET_AUNUDP
1170 spin_lock_init(&aun_queue_lock);
1171 aun_udp_initialise(); 1169 aun_udp_initialise();
1172#endif 1170#endif
1173#ifdef CONFIG_ECONET_NATIVE 1171#ifdef CONFIG_ECONET_NATIVE
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 215c83986a9d..f00ef2f1d814 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -367,7 +367,7 @@ struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
367EXPORT_SYMBOL(alloc_etherdev_mq); 367EXPORT_SYMBOL(alloc_etherdev_mq);
368 368
369static size_t _format_mac_addr(char *buf, int buflen, 369static size_t _format_mac_addr(char *buf, int buflen,
370 const unsigned char *addr, int len) 370 const unsigned char *addr, int len)
371{ 371{
372 int i; 372 int i;
373 char *cp = buf; 373 char *cp = buf;
@@ -376,7 +376,7 @@ static size_t _format_mac_addr(char *buf, int buflen,
376 cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]); 376 cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]);
377 if (i == len - 1) 377 if (i == len - 1)
378 break; 378 break;
379 cp += strlcpy(cp, ":", buflen - (cp - buf)); 379 cp += scnprintf(cp, buflen - (cp - buf), ":");
380 } 380 }
381 return cp - buf; 381 return cp - buf;
382} 382}
@@ -386,7 +386,7 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
386 size_t l; 386 size_t l;
387 387
388 l = _format_mac_addr(buf, PAGE_SIZE, addr, len); 388 l = _format_mac_addr(buf, PAGE_SIZE, addr, len);
389 l += strlcpy(buf + l, "\n", PAGE_SIZE - l); 389 l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
390 return ((ssize_t) l); 390 return (ssize_t)l;
391} 391}
392EXPORT_SYMBOL(sysfs_format_mac); 392EXPORT_SYMBOL(sysfs_format_mac);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 571f8950ed06..5462e2d147a6 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -215,8 +215,15 @@ config NET_IPIP
215 be inserted in and removed from the running kernel whenever you 215 be inserted in and removed from the running kernel whenever you
216 want). Most people won't need this and can say N. 216 want). Most people won't need this and can say N.
217 217
218config NET_IPGRE_DEMUX
219 tristate "IP: GRE demultiplexer"
220 help
221 This is helper module to demultiplex GRE packets on GRE version field criteria.
222 Required by ip_gre and pptp modules.
223
218config NET_IPGRE 224config NET_IPGRE
219 tristate "IP: GRE tunnels over IP" 225 tristate "IP: GRE tunnels over IP"
226 depends on NET_IPGRE_DEMUX
220 help 227 help
221 Tunneling means encapsulating data of one protocol type within 228 Tunneling means encapsulating data of one protocol type within
222 another protocol and sending it over a channel that understands the 229 another protocol and sending it over a channel that understands the
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 80ff87ce43aa..4978d22f9a75 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
20obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o 20obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
21obj-$(CONFIG_IP_MROUTE) += ipmr.o 21obj-$(CONFIG_IP_MROUTE) += ipmr.o
22obj-$(CONFIG_NET_IPIP) += ipip.o 22obj-$(CONFIG_NET_IPIP) += ipip.o
23obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
23obj-$(CONFIG_NET_IPGRE) += ip_gre.o 24obj-$(CONFIG_NET_IPGRE) += ip_gre.o
24obj-$(CONFIG_SYN_COOKIES) += syncookies.o 25obj-$(CONFIG_SYN_COOKIES) += syncookies.o
25obj-$(CONFIG_INET_AH) += ah4.o 26obj-$(CONFIG_INET_AH) += ah4.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6a1100c25a9f..f581f77d1097 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -227,18 +227,16 @@ EXPORT_SYMBOL(inet_ehash_secret);
227 227
228/* 228/*
229 * inet_ehash_secret must be set exactly once 229 * inet_ehash_secret must be set exactly once
230 * Instead of using a dedicated spinlock, we (ab)use inetsw_lock
231 */ 230 */
232void build_ehash_secret(void) 231void build_ehash_secret(void)
233{ 232{
234 u32 rnd; 233 u32 rnd;
234
235 do { 235 do {
236 get_random_bytes(&rnd, sizeof(rnd)); 236 get_random_bytes(&rnd, sizeof(rnd));
237 } while (rnd == 0); 237 } while (rnd == 0);
238 spin_lock_bh(&inetsw_lock); 238
239 if (!inet_ehash_secret) 239 cmpxchg(&inet_ehash_secret, 0, rnd);
240 inet_ehash_secret = rnd;
241 spin_unlock_bh(&inetsw_lock);
242} 240}
243EXPORT_SYMBOL(build_ehash_secret); 241EXPORT_SYMBOL(build_ehash_secret);
244 242
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 96c1955b3e2f..4083c186fd30 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -55,7 +55,7 @@
55 * Stuart Cheshire : Metricom and grat arp fixes 55 * Stuart Cheshire : Metricom and grat arp fixes
56 * *** FOR 2.1 clean this up *** 56 * *** FOR 2.1 clean this up ***
57 * Lawrence V. Stefani: (08/12/96) Added FDDI support. 57 * Lawrence V. Stefani: (08/12/96) Added FDDI support.
58 * Alan Cox : Took the AP1000 nasty FDDI hack and 58 * Alan Cox : Took the AP1000 nasty FDDI hack and
59 * folded into the mainstream FDDI code. 59 * folded into the mainstream FDDI code.
60 * Ack spit, Linus how did you allow that 60 * Ack spit, Linus how did you allow that
61 * one in... 61 * one in...
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(clip_tbl_hook);
120#endif 120#endif
121 121
122#include <asm/system.h> 122#include <asm/system.h>
123#include <asm/uaccess.h> 123#include <linux/uaccess.h>
124 124
125#include <linux/netfilter_arp.h> 125#include <linux/netfilter_arp.h>
126 126
@@ -173,32 +173,32 @@ const struct neigh_ops arp_broken_ops = {
173EXPORT_SYMBOL(arp_broken_ops); 173EXPORT_SYMBOL(arp_broken_ops);
174 174
175struct neigh_table arp_tbl = { 175struct neigh_table arp_tbl = {
176 .family = AF_INET, 176 .family = AF_INET,
177 .entry_size = sizeof(struct neighbour) + 4, 177 .entry_size = sizeof(struct neighbour) + 4,
178 .key_len = 4, 178 .key_len = 4,
179 .hash = arp_hash, 179 .hash = arp_hash,
180 .constructor = arp_constructor, 180 .constructor = arp_constructor,
181 .proxy_redo = parp_redo, 181 .proxy_redo = parp_redo,
182 .id = "arp_cache", 182 .id = "arp_cache",
183 .parms = { 183 .parms = {
184 .tbl = &arp_tbl, 184 .tbl = &arp_tbl,
185 .base_reachable_time = 30 * HZ, 185 .base_reachable_time = 30 * HZ,
186 .retrans_time = 1 * HZ, 186 .retrans_time = 1 * HZ,
187 .gc_staletime = 60 * HZ, 187 .gc_staletime = 60 * HZ,
188 .reachable_time = 30 * HZ, 188 .reachable_time = 30 * HZ,
189 .delay_probe_time = 5 * HZ, 189 .delay_probe_time = 5 * HZ,
190 .queue_len = 3, 190 .queue_len = 3,
191 .ucast_probes = 3, 191 .ucast_probes = 3,
192 .mcast_probes = 3, 192 .mcast_probes = 3,
193 .anycast_delay = 1 * HZ, 193 .anycast_delay = 1 * HZ,
194 .proxy_delay = (8 * HZ) / 10, 194 .proxy_delay = (8 * HZ) / 10,
195 .proxy_qlen = 64, 195 .proxy_qlen = 64,
196 .locktime = 1 * HZ, 196 .locktime = 1 * HZ,
197 }, 197 },
198 .gc_interval = 30 * HZ, 198 .gc_interval = 30 * HZ,
199 .gc_thresh1 = 128, 199 .gc_thresh1 = 128,
200 .gc_thresh2 = 512, 200 .gc_thresh2 = 512,
201 .gc_thresh3 = 1024, 201 .gc_thresh3 = 1024,
202}; 202};
203EXPORT_SYMBOL(arp_tbl); 203EXPORT_SYMBOL(arp_tbl);
204 204
@@ -233,7 +233,7 @@ static u32 arp_hash(const void *pkey, const struct net_device *dev)
233 233
234static int arp_constructor(struct neighbour *neigh) 234static int arp_constructor(struct neighbour *neigh)
235{ 235{
236 __be32 addr = *(__be32*)neigh->primary_key; 236 __be32 addr = *(__be32 *)neigh->primary_key;
237 struct net_device *dev = neigh->dev; 237 struct net_device *dev = neigh->dev;
238 struct in_device *in_dev; 238 struct in_device *in_dev;
239 struct neigh_parms *parms; 239 struct neigh_parms *parms;
@@ -296,16 +296,19 @@ static int arp_constructor(struct neighbour *neigh)
296 neigh->ops = &arp_broken_ops; 296 neigh->ops = &arp_broken_ops;
297 neigh->output = neigh->ops->output; 297 neigh->output = neigh->ops->output;
298 return 0; 298 return 0;
299#else
300 break;
299#endif 301#endif
300 ;} 302 }
301#endif 303#endif
302 if (neigh->type == RTN_MULTICAST) { 304 if (neigh->type == RTN_MULTICAST) {
303 neigh->nud_state = NUD_NOARP; 305 neigh->nud_state = NUD_NOARP;
304 arp_mc_map(addr, neigh->ha, dev, 1); 306 arp_mc_map(addr, neigh->ha, dev, 1);
305 } else if (dev->flags&(IFF_NOARP|IFF_LOOPBACK)) { 307 } else if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) {
306 neigh->nud_state = NUD_NOARP; 308 neigh->nud_state = NUD_NOARP;
307 memcpy(neigh->ha, dev->dev_addr, dev->addr_len); 309 memcpy(neigh->ha, dev->dev_addr, dev->addr_len);
308 } else if (neigh->type == RTN_BROADCAST || dev->flags&IFF_POINTOPOINT) { 310 } else if (neigh->type == RTN_BROADCAST ||
311 (dev->flags & IFF_POINTOPOINT)) {
309 neigh->nud_state = NUD_NOARP; 312 neigh->nud_state = NUD_NOARP;
310 memcpy(neigh->ha, dev->broadcast, dev->addr_len); 313 memcpy(neigh->ha, dev->broadcast, dev->addr_len);
311 } 314 }
@@ -315,7 +318,7 @@ static int arp_constructor(struct neighbour *neigh)
315 else 318 else
316 neigh->ops = &arp_generic_ops; 319 neigh->ops = &arp_generic_ops;
317 320
318 if (neigh->nud_state&NUD_VALID) 321 if (neigh->nud_state & NUD_VALID)
319 neigh->output = neigh->ops->connected_output; 322 neigh->output = neigh->ops->connected_output;
320 else 323 else
321 neigh->output = neigh->ops->output; 324 neigh->output = neigh->ops->output;
@@ -334,7 +337,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
334 __be32 saddr = 0; 337 __be32 saddr = 0;
335 u8 *dst_ha = NULL; 338 u8 *dst_ha = NULL;
336 struct net_device *dev = neigh->dev; 339 struct net_device *dev = neigh->dev;
337 __be32 target = *(__be32*)neigh->primary_key; 340 __be32 target = *(__be32 *)neigh->primary_key;
338 int probes = atomic_read(&neigh->probes); 341 int probes = atomic_read(&neigh->probes);
339 struct in_device *in_dev; 342 struct in_device *in_dev;
340 343
@@ -347,7 +350,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
347 switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { 350 switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
348 default: 351 default:
349 case 0: /* By default announce any local IP */ 352 case 0: /* By default announce any local IP */
350 if (skb && inet_addr_type(dev_net(dev), ip_hdr(skb)->saddr) == RTN_LOCAL) 353 if (skb && inet_addr_type(dev_net(dev),
354 ip_hdr(skb)->saddr) == RTN_LOCAL)
351 saddr = ip_hdr(skb)->saddr; 355 saddr = ip_hdr(skb)->saddr;
352 break; 356 break;
353 case 1: /* Restrict announcements of saddr in same subnet */ 357 case 1: /* Restrict announcements of saddr in same subnet */
@@ -369,16 +373,21 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
369 if (!saddr) 373 if (!saddr)
370 saddr = inet_select_addr(dev, target, RT_SCOPE_LINK); 374 saddr = inet_select_addr(dev, target, RT_SCOPE_LINK);
371 375
372 if ((probes -= neigh->parms->ucast_probes) < 0) { 376 probes -= neigh->parms->ucast_probes;
373 if (!(neigh->nud_state&NUD_VALID)) 377 if (probes < 0) {
374 printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n"); 378 if (!(neigh->nud_state & NUD_VALID))
379 printk(KERN_DEBUG
380 "trying to ucast probe in NUD_INVALID\n");
375 dst_ha = neigh->ha; 381 dst_ha = neigh->ha;
376 read_lock_bh(&neigh->lock); 382 read_lock_bh(&neigh->lock);
377 } else if ((probes -= neigh->parms->app_probes) < 0) { 383 } else {
384 probes -= neigh->parms->app_probes;
385 if (probes < 0) {
378#ifdef CONFIG_ARPD 386#ifdef CONFIG_ARPD
379 neigh_app_ns(neigh); 387 neigh_app_ns(neigh);
380#endif 388#endif
381 return; 389 return;
390 }
382 } 391 }
383 392
384 arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, 393 arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
@@ -451,7 +460,8 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
451 * is allowed to use this function, it is scheduled to be removed. --ANK 460 * is allowed to use this function, it is scheduled to be removed. --ANK
452 */ 461 */
453 462
454static int arp_set_predefined(int addr_hint, unsigned char * haddr, __be32 paddr, struct net_device * dev) 463static int arp_set_predefined(int addr_hint, unsigned char *haddr,
464 __be32 paddr, struct net_device *dev)
455{ 465{
456 switch (addr_hint) { 466 switch (addr_hint) {
457 case RTN_LOCAL: 467 case RTN_LOCAL:
@@ -483,7 +493,8 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
483 493
484 paddr = skb_rtable(skb)->rt_gateway; 494 paddr = skb_rtable(skb)->rt_gateway;
485 495
486 if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev)) 496 if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr,
497 paddr, dev))
487 return 0; 498 return 0;
488 499
489 n = __neigh_lookup(&arp_tbl, &paddr, dev, 1); 500 n = __neigh_lookup(&arp_tbl, &paddr, dev, 1);
@@ -515,13 +526,14 @@ int arp_bind_neighbour(struct dst_entry *dst)
515 return -EINVAL; 526 return -EINVAL;
516 if (n == NULL) { 527 if (n == NULL) {
517 __be32 nexthop = ((struct rtable *)dst)->rt_gateway; 528 __be32 nexthop = ((struct rtable *)dst)->rt_gateway;
518 if (dev->flags&(IFF_LOOPBACK|IFF_POINTOPOINT)) 529 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
519 nexthop = 0; 530 nexthop = 0;
520 n = __neigh_lookup_errno( 531 n = __neigh_lookup_errno(
521#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) 532#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
522 dev->type == ARPHRD_ATM ? clip_tbl_hook : 533 dev->type == ARPHRD_ATM ?
534 clip_tbl_hook :
523#endif 535#endif
524 &arp_tbl, &nexthop, dev); 536 &arp_tbl, &nexthop, dev);
525 if (IS_ERR(n)) 537 if (IS_ERR(n))
526 return PTR_ERR(n); 538 return PTR_ERR(n);
527 dst->neighbour = n; 539 dst->neighbour = n;
@@ -543,8 +555,8 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
543 555
544 if (!IN_DEV_PROXY_ARP(in_dev)) 556 if (!IN_DEV_PROXY_ARP(in_dev))
545 return 0; 557 return 0;
546 558 imi = IN_DEV_MEDIUM_ID(in_dev);
547 if ((imi = IN_DEV_MEDIUM_ID(in_dev)) == 0) 559 if (imi == 0)
548 return 1; 560 return 1;
549 if (imi == -1) 561 if (imi == -1)
550 return 0; 562 return 0;
@@ -555,7 +567,7 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
555 if (out_dev) 567 if (out_dev)
556 omi = IN_DEV_MEDIUM_ID(out_dev); 568 omi = IN_DEV_MEDIUM_ID(out_dev);
557 569
558 return (omi != imi && omi != -1); 570 return omi != imi && omi != -1;
559} 571}
560 572
561/* 573/*
@@ -685,7 +697,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
685 arp->ar_pln = 4; 697 arp->ar_pln = 4;
686 arp->ar_op = htons(type); 698 arp->ar_op = htons(type);
687 699
688 arp_ptr=(unsigned char *)(arp+1); 700 arp_ptr = (unsigned char *)(arp + 1);
689 701
690 memcpy(arp_ptr, src_hw, dev->addr_len); 702 memcpy(arp_ptr, src_hw, dev->addr_len);
691 arp_ptr += dev->addr_len; 703 arp_ptr += dev->addr_len;
@@ -735,9 +747,8 @@ void arp_send(int type, int ptype, __be32 dest_ip,
735 747
736 skb = arp_create(type, ptype, dest_ip, dev, src_ip, 748 skb = arp_create(type, ptype, dest_ip, dev, src_ip,
737 dest_hw, src_hw, target_hw); 749 dest_hw, src_hw, target_hw);
738 if (skb == NULL) { 750 if (skb == NULL)
739 return; 751 return;
740 }
741 752
742 arp_xmit(skb); 753 arp_xmit(skb);
743} 754}
@@ -815,7 +826,7 @@ static int arp_process(struct sk_buff *skb)
815/* 826/*
816 * Extract fields 827 * Extract fields
817 */ 828 */
818 arp_ptr= (unsigned char *)(arp+1); 829 arp_ptr = (unsigned char *)(arp + 1);
819 sha = arp_ptr; 830 sha = arp_ptr;
820 arp_ptr += dev->addr_len; 831 arp_ptr += dev->addr_len;
821 memcpy(&sip, arp_ptr, 4); 832 memcpy(&sip, arp_ptr, 4);
@@ -869,16 +880,17 @@ static int arp_process(struct sk_buff *skb)
869 addr_type = rt->rt_type; 880 addr_type = rt->rt_type;
870 881
871 if (addr_type == RTN_LOCAL) { 882 if (addr_type == RTN_LOCAL) {
872 int dont_send = 0; 883 int dont_send;
873 884
874 if (!dont_send) 885 dont_send = arp_ignore(in_dev, sip, tip);
875 dont_send |= arp_ignore(in_dev,sip,tip);
876 if (!dont_send && IN_DEV_ARPFILTER(in_dev)) 886 if (!dont_send && IN_DEV_ARPFILTER(in_dev))
877 dont_send |= arp_filter(sip,tip,dev); 887 dont_send |= arp_filter(sip, tip, dev);
878 if (!dont_send) { 888 if (!dont_send) {
879 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 889 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
880 if (n) { 890 if (n) {
881 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); 891 arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
892 dev, tip, sha, dev->dev_addr,
893 sha);
882 neigh_release(n); 894 neigh_release(n);
883 } 895 }
884 } 896 }
@@ -887,8 +899,7 @@ static int arp_process(struct sk_buff *skb)
887 if (addr_type == RTN_UNICAST && 899 if (addr_type == RTN_UNICAST &&
888 (arp_fwd_proxy(in_dev, dev, rt) || 900 (arp_fwd_proxy(in_dev, dev, rt) ||
889 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || 901 arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
890 pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) 902 pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
891 {
892 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 903 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
893 if (n) 904 if (n)
894 neigh_release(n); 905 neigh_release(n);
@@ -896,9 +907,12 @@ static int arp_process(struct sk_buff *skb)
896 if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || 907 if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
897 skb->pkt_type == PACKET_HOST || 908 skb->pkt_type == PACKET_HOST ||
898 in_dev->arp_parms->proxy_delay == 0) { 909 in_dev->arp_parms->proxy_delay == 0) {
899 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); 910 arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
911 dev, tip, sha, dev->dev_addr,
912 sha);
900 } else { 913 } else {
901 pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); 914 pneigh_enqueue(&arp_tbl,
915 in_dev->arp_parms, skb);
902 return 0; 916 return 0;
903 } 917 }
904 goto out; 918 goto out;
@@ -939,7 +953,8 @@ static int arp_process(struct sk_buff *skb)
939 if (arp->ar_op != htons(ARPOP_REPLY) || 953 if (arp->ar_op != htons(ARPOP_REPLY) ||
940 skb->pkt_type != PACKET_HOST) 954 skb->pkt_type != PACKET_HOST)
941 state = NUD_STALE; 955 state = NUD_STALE;
942 neigh_update(n, sha, state, override ? NEIGH_UPDATE_F_OVERRIDE : 0); 956 neigh_update(n, sha, state,
957 override ? NEIGH_UPDATE_F_OVERRIDE : 0);
943 neigh_release(n); 958 neigh_release(n);
944 } 959 }
945 960
@@ -975,7 +990,8 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
975 arp->ar_pln != 4) 990 arp->ar_pln != 4)
976 goto freeskb; 991 goto freeskb;
977 992
978 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 993 skb = skb_share_check(skb, GFP_ATOMIC);
994 if (skb == NULL)
979 goto out_of_mem; 995 goto out_of_mem;
980 996
981 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb)); 997 memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
@@ -1019,7 +1035,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
1019 return -EINVAL; 1035 return -EINVAL;
1020 if (!dev && (r->arp_flags & ATF_COM)) { 1036 if (!dev && (r->arp_flags & ATF_COM)) {
1021 dev = dev_getbyhwaddr(net, r->arp_ha.sa_family, 1037 dev = dev_getbyhwaddr(net, r->arp_ha.sa_family,
1022 r->arp_ha.sa_data); 1038 r->arp_ha.sa_data);
1023 if (!dev) 1039 if (!dev)
1024 return -ENODEV; 1040 return -ENODEV;
1025 } 1041 }
@@ -1033,7 +1049,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
1033} 1049}
1034 1050
1035static int arp_req_set(struct net *net, struct arpreq *r, 1051static int arp_req_set(struct net *net, struct arpreq *r,
1036 struct net_device * dev) 1052 struct net_device *dev)
1037{ 1053{
1038 __be32 ip; 1054 __be32 ip;
1039 struct neighbour *neigh; 1055 struct neighbour *neigh;
@@ -1046,10 +1062,11 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1046 if (r->arp_flags & ATF_PERM) 1062 if (r->arp_flags & ATF_PERM)
1047 r->arp_flags |= ATF_COM; 1063 r->arp_flags |= ATF_COM;
1048 if (dev == NULL) { 1064 if (dev == NULL) {
1049 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip, 1065 struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
1050 .tos = RTO_ONLINK } } }; 1066 .tos = RTO_ONLINK } };
1051 struct rtable * rt; 1067 struct rtable *rt;
1052 if ((err = ip_route_output_key(net, &rt, &fl)) != 0) 1068 err = ip_route_output_key(net, &rt, &fl);
1069 if (err != 0)
1053 return err; 1070 return err;
1054 dev = rt->dst.dev; 1071 dev = rt->dst.dev;
1055 ip_rt_put(rt); 1072 ip_rt_put(rt);
@@ -1083,9 +1100,9 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1083 unsigned state = NUD_STALE; 1100 unsigned state = NUD_STALE;
1084 if (r->arp_flags & ATF_PERM) 1101 if (r->arp_flags & ATF_PERM)
1085 state = NUD_PERMANENT; 1102 state = NUD_PERMANENT;
1086 err = neigh_update(neigh, (r->arp_flags&ATF_COM) ? 1103 err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
1087 r->arp_ha.sa_data : NULL, state, 1104 r->arp_ha.sa_data : NULL, state,
1088 NEIGH_UPDATE_F_OVERRIDE| 1105 NEIGH_UPDATE_F_OVERRIDE |
1089 NEIGH_UPDATE_F_ADMIN); 1106 NEIGH_UPDATE_F_ADMIN);
1090 neigh_release(neigh); 1107 neigh_release(neigh);
1091 } 1108 }
@@ -1094,12 +1111,12 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1094 1111
1095static unsigned arp_state_to_flags(struct neighbour *neigh) 1112static unsigned arp_state_to_flags(struct neighbour *neigh)
1096{ 1113{
1097 unsigned flags = 0;
1098 if (neigh->nud_state&NUD_PERMANENT) 1114 if (neigh->nud_state&NUD_PERMANENT)
1099 flags = ATF_PERM|ATF_COM; 1115 return ATF_PERM | ATF_COM;
1100 else if (neigh->nud_state&NUD_VALID) 1116 else if (neigh->nud_state&NUD_VALID)
1101 flags = ATF_COM; 1117 return ATF_COM;
1102 return flags; 1118 else
1119 return 0;
1103} 1120}
1104 1121
1105/* 1122/*
@@ -1142,7 +1159,7 @@ static int arp_req_delete_public(struct net *net, struct arpreq *r,
1142} 1159}
1143 1160
1144static int arp_req_delete(struct net *net, struct arpreq *r, 1161static int arp_req_delete(struct net *net, struct arpreq *r,
1145 struct net_device * dev) 1162 struct net_device *dev)
1146{ 1163{
1147 int err; 1164 int err;
1148 __be32 ip; 1165 __be32 ip;
@@ -1153,10 +1170,11 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1153 1170
1154 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr; 1171 ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
1155 if (dev == NULL) { 1172 if (dev == NULL) {
1156 struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip, 1173 struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
1157 .tos = RTO_ONLINK } } }; 1174 .tos = RTO_ONLINK } };
1158 struct rtable * rt; 1175 struct rtable *rt;
1159 if ((err = ip_route_output_key(net, &rt, &fl)) != 0) 1176 err = ip_route_output_key(net, &rt, &fl);
1177 if (err != 0)
1160 return err; 1178 return err;
1161 dev = rt->dst.dev; 1179 dev = rt->dst.dev;
1162 ip_rt_put(rt); 1180 ip_rt_put(rt);
@@ -1166,7 +1184,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1166 err = -ENXIO; 1184 err = -ENXIO;
1167 neigh = neigh_lookup(&arp_tbl, &ip, dev); 1185 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1168 if (neigh) { 1186 if (neigh) {
1169 if (neigh->nud_state&~NUD_NOARP) 1187 if (neigh->nud_state & ~NUD_NOARP)
1170 err = neigh_update(neigh, NULL, NUD_FAILED, 1188 err = neigh_update(neigh, NULL, NUD_FAILED,
1171 NEIGH_UPDATE_F_OVERRIDE| 1189 NEIGH_UPDATE_F_OVERRIDE|
1172 NEIGH_UPDATE_F_ADMIN); 1190 NEIGH_UPDATE_F_ADMIN);
@@ -1186,24 +1204,24 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1186 struct net_device *dev = NULL; 1204 struct net_device *dev = NULL;
1187 1205
1188 switch (cmd) { 1206 switch (cmd) {
1189 case SIOCDARP: 1207 case SIOCDARP:
1190 case SIOCSARP: 1208 case SIOCSARP:
1191 if (!capable(CAP_NET_ADMIN)) 1209 if (!capable(CAP_NET_ADMIN))
1192 return -EPERM; 1210 return -EPERM;
1193 case SIOCGARP: 1211 case SIOCGARP:
1194 err = copy_from_user(&r, arg, sizeof(struct arpreq)); 1212 err = copy_from_user(&r, arg, sizeof(struct arpreq));
1195 if (err) 1213 if (err)
1196 return -EFAULT; 1214 return -EFAULT;
1197 break; 1215 break;
1198 default: 1216 default:
1199 return -EINVAL; 1217 return -EINVAL;
1200 } 1218 }
1201 1219
1202 if (r.arp_pa.sa_family != AF_INET) 1220 if (r.arp_pa.sa_family != AF_INET)
1203 return -EPFNOSUPPORT; 1221 return -EPFNOSUPPORT;
1204 1222
1205 if (!(r.arp_flags & ATF_PUBL) && 1223 if (!(r.arp_flags & ATF_PUBL) &&
1206 (r.arp_flags & (ATF_NETMASK|ATF_DONTPUB))) 1224 (r.arp_flags & (ATF_NETMASK | ATF_DONTPUB)))
1207 return -EINVAL; 1225 return -EINVAL;
1208 if (!(r.arp_flags & ATF_NETMASK)) 1226 if (!(r.arp_flags & ATF_NETMASK))
1209 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr = 1227 ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
@@ -1211,7 +1229,8 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1211 rtnl_lock(); 1229 rtnl_lock();
1212 if (r.arp_dev[0]) { 1230 if (r.arp_dev[0]) {
1213 err = -ENODEV; 1231 err = -ENODEV;
1214 if ((dev = __dev_get_by_name(net, r.arp_dev)) == NULL) 1232 dev = __dev_get_by_name(net, r.arp_dev);
1233 if (dev == NULL)
1215 goto out; 1234 goto out;
1216 1235
1217 /* Mmmm... It is wrong... ARPHRD_NETROM==0 */ 1236 /* Mmmm... It is wrong... ARPHRD_NETROM==0 */
@@ -1243,7 +1262,8 @@ out:
1243 return err; 1262 return err;
1244} 1263}
1245 1264
1246static int arp_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1265static int arp_netdev_event(struct notifier_block *this, unsigned long event,
1266 void *ptr)
1247{ 1267{
1248 struct net_device *dev = ptr; 1268 struct net_device *dev = ptr;
1249 1269
@@ -1311,12 +1331,13 @@ static char *ax2asc2(ax25_address *a, char *buf)
1311 for (n = 0, s = buf; n < 6; n++) { 1331 for (n = 0, s = buf; n < 6; n++) {
1312 c = (a->ax25_call[n] >> 1) & 0x7F; 1332 c = (a->ax25_call[n] >> 1) & 0x7F;
1313 1333
1314 if (c != ' ') *s++ = c; 1334 if (c != ' ')
1335 *s++ = c;
1315 } 1336 }
1316 1337
1317 *s++ = '-'; 1338 *s++ = '-';
1318 1339 n = (a->ax25_call[6] >> 1) & 0x0F;
1319 if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) { 1340 if (n > 9) {
1320 *s++ = '1'; 1341 *s++ = '1';
1321 n -= 10; 1342 n -= 10;
1322 } 1343 }
@@ -1325,10 +1346,9 @@ static char *ax2asc2(ax25_address *a, char *buf)
1325 *s++ = '\0'; 1346 *s++ = '\0';
1326 1347
1327 if (*buf == '\0' || *buf == '-') 1348 if (*buf == '\0' || *buf == '-')
1328 return "*"; 1349 return "*";
1329 1350
1330 return buf; 1351 return buf;
1331
1332} 1352}
1333#endif /* CONFIG_AX25 */ 1353#endif /* CONFIG_AX25 */
1334 1354
@@ -1408,10 +1428,10 @@ static void *arp_seq_start(struct seq_file *seq, loff_t *pos)
1408/* ------------------------------------------------------------------------ */ 1428/* ------------------------------------------------------------------------ */
1409 1429
1410static const struct seq_operations arp_seq_ops = { 1430static const struct seq_operations arp_seq_ops = {
1411 .start = arp_seq_start, 1431 .start = arp_seq_start,
1412 .next = neigh_seq_next, 1432 .next = neigh_seq_next,
1413 .stop = neigh_seq_stop, 1433 .stop = neigh_seq_stop,
1414 .show = arp_seq_show, 1434 .show = arp_seq_show,
1415}; 1435};
1416 1436
1417static int arp_seq_open(struct inode *inode, struct file *file) 1437static int arp_seq_open(struct inode *inode, struct file *file)
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 721a8a37b45c..174be6caa5c8 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -73,6 +73,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
73 inet->inet_id = jiffies; 73 inet->inet_id = jiffies;
74 74
75 sk_dst_set(sk, &rt->dst); 75 sk_dst_set(sk, &rt->dst);
76 return(0); 76 return 0;
77} 77}
78EXPORT_SYMBOL(ip4_datagram_connect); 78EXPORT_SYMBOL(ip4_datagram_connect);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index da14c49284f4..c2ff48fa18c7 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -209,7 +209,7 @@ static void inetdev_destroy(struct in_device *in_dev)
209 inet_free_ifa(ifa); 209 inet_free_ifa(ifa);
210 } 210 }
211 211
212 dev->ip_ptr = NULL; 212 rcu_assign_pointer(dev->ip_ptr, NULL);
213 213
214 devinet_sysctl_unregister(in_dev); 214 devinet_sysctl_unregister(in_dev);
215 neigh_parms_release(&arp_tbl, in_dev->arp_parms); 215 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
@@ -1059,7 +1059,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1059 switch (event) { 1059 switch (event) {
1060 case NETDEV_REGISTER: 1060 case NETDEV_REGISTER:
1061 printk(KERN_DEBUG "inetdev_event: bug\n"); 1061 printk(KERN_DEBUG "inetdev_event: bug\n");
1062 dev->ip_ptr = NULL; 1062 rcu_assign_pointer(dev->ip_ptr, NULL);
1063 break; 1063 break;
1064 case NETDEV_UP: 1064 case NETDEV_UP:
1065 if (!inetdev_valid_mtu(dev->mtu)) 1065 if (!inetdev_valid_mtu(dev->mtu))
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 4a8e370862bc..a96e5ec211a0 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -186,9 +186,7 @@ static inline struct tnode *node_parent_rcu(struct node *node)
186{ 186{
187 struct tnode *ret = node_parent(node); 187 struct tnode *ret = node_parent(node);
188 188
189 return rcu_dereference_check(ret, 189 return rcu_dereference_rtnl(ret);
190 rcu_read_lock_held() ||
191 lockdep_rtnl_is_held());
192} 190}
193 191
194/* Same as rcu_assign_pointer 192/* Same as rcu_assign_pointer
@@ -211,9 +209,7 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
211{ 209{
212 struct node *ret = tnode_get_child(tn, i); 210 struct node *ret = tnode_get_child(tn, i);
213 211
214 return rcu_dereference_check(ret, 212 return rcu_dereference_rtnl(ret);
215 rcu_read_lock_held() ||
216 lockdep_rtnl_is_held());
217} 213}
218 214
219static inline int tnode_child_length(const struct tnode *tn) 215static inline int tnode_child_length(const struct tnode *tn)
@@ -459,8 +455,8 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
459 tn->empty_children = 1<<bits; 455 tn->empty_children = 1<<bits;
460 } 456 }
461 457
462 pr_debug("AT %p s=%u %lu\n", tn, (unsigned int) sizeof(struct tnode), 458 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
463 (unsigned long) (sizeof(struct node) << bits)); 459 sizeof(struct node) << bits);
464 return tn; 460 return tn;
465} 461}
466 462
@@ -609,11 +605,10 @@ static struct node *resize(struct trie *t, struct tnode *tn)
609 605
610 /* Keep root node larger */ 606 /* Keep root node larger */
611 607
612 if (!node_parent((struct node*) tn)) { 608 if (!node_parent((struct node *)tn)) {
613 inflate_threshold_use = inflate_threshold_root; 609 inflate_threshold_use = inflate_threshold_root;
614 halve_threshold_use = halve_threshold_root; 610 halve_threshold_use = halve_threshold_root;
615 } 611 } else {
616 else {
617 inflate_threshold_use = inflate_threshold; 612 inflate_threshold_use = inflate_threshold;
618 halve_threshold_use = halve_threshold; 613 halve_threshold_use = halve_threshold;
619 } 614 }
@@ -639,7 +634,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
639 check_tnode(tn); 634 check_tnode(tn);
640 635
641 /* Return if at least one inflate is run */ 636 /* Return if at least one inflate is run */
642 if( max_work != MAX_WORK) 637 if (max_work != MAX_WORK)
643 return (struct node *) tn; 638 return (struct node *) tn;
644 639
645 /* 640 /*
@@ -966,9 +961,7 @@ fib_find_node(struct trie *t, u32 key)
966 struct node *n; 961 struct node *n;
967 962
968 pos = 0; 963 pos = 0;
969 n = rcu_dereference_check(t->trie, 964 n = rcu_dereference_rtnl(t->trie);
970 rcu_read_lock_held() ||
971 lockdep_rtnl_is_held());
972 965
973 while (n != NULL && NODE_TYPE(n) == T_TNODE) { 966 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
974 tn = (struct tnode *) n; 967 tn = (struct tnode *) n;
@@ -1748,16 +1741,14 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
1748 1741
1749 /* Node empty, walk back up to parent */ 1742 /* Node empty, walk back up to parent */
1750 c = (struct node *) p; 1743 c = (struct node *) p;
1751 } while ( (p = node_parent_rcu(c)) != NULL); 1744 } while ((p = node_parent_rcu(c)) != NULL);
1752 1745
1753 return NULL; /* Root of trie */ 1746 return NULL; /* Root of trie */
1754} 1747}
1755 1748
1756static struct leaf *trie_firstleaf(struct trie *t) 1749static struct leaf *trie_firstleaf(struct trie *t)
1757{ 1750{
1758 struct tnode *n = (struct tnode *) rcu_dereference_check(t->trie, 1751 struct tnode *n = (struct tnode *)rcu_dereference_rtnl(t->trie);
1759 rcu_read_lock_held() ||
1760 lockdep_rtnl_is_held());
1761 1752
1762 if (!n) 1753 if (!n)
1763 return NULL; 1754 return NULL;
@@ -2043,14 +2034,14 @@ struct fib_trie_iter {
2043 struct seq_net_private p; 2034 struct seq_net_private p;
2044 struct fib_table *tb; 2035 struct fib_table *tb;
2045 struct tnode *tnode; 2036 struct tnode *tnode;
2046 unsigned index; 2037 unsigned int index;
2047 unsigned depth; 2038 unsigned int depth;
2048}; 2039};
2049 2040
2050static struct node *fib_trie_get_next(struct fib_trie_iter *iter) 2041static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
2051{ 2042{
2052 struct tnode *tn = iter->tnode; 2043 struct tnode *tn = iter->tnode;
2053 unsigned cindex = iter->index; 2044 unsigned int cindex = iter->index;
2054 struct tnode *p; 2045 struct tnode *p;
2055 2046
2056 /* A single entry routing table */ 2047 /* A single entry routing table */
@@ -2159,7 +2150,7 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2159 */ 2150 */
2160static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat) 2151static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2161{ 2152{
2162 unsigned i, max, pointers, bytes, avdepth; 2153 unsigned int i, max, pointers, bytes, avdepth;
2163 2154
2164 if (stat->leaves) 2155 if (stat->leaves)
2165 avdepth = stat->totdepth*100 / stat->leaves; 2156 avdepth = stat->totdepth*100 / stat->leaves;
@@ -2356,7 +2347,8 @@ static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2356 2347
2357static void seq_indent(struct seq_file *seq, int n) 2348static void seq_indent(struct seq_file *seq, int n)
2358{ 2349{
2359 while (n-- > 0) seq_puts(seq, " "); 2350 while (n-- > 0)
2351 seq_puts(seq, " ");
2360} 2352}
2361 2353
2362static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s) 2354static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
@@ -2388,7 +2380,7 @@ static const char *const rtn_type_names[__RTN_MAX] = {
2388 [RTN_XRESOLVE] = "XRESOLVE", 2380 [RTN_XRESOLVE] = "XRESOLVE",
2389}; 2381};
2390 2382
2391static inline const char *rtn_type(char *buf, size_t len, unsigned t) 2383static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
2392{ 2384{
2393 if (t < __RTN_MAX && rtn_type_names[t]) 2385 if (t < __RTN_MAX && rtn_type_names[t])
2394 return rtn_type_names[t]; 2386 return rtn_type_names[t];
@@ -2544,13 +2536,12 @@ static void fib_route_seq_stop(struct seq_file *seq, void *v)
2544 rcu_read_unlock(); 2536 rcu_read_unlock();
2545} 2537}
2546 2538
2547static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi) 2539static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2548{ 2540{
2549 static unsigned type2flags[RTN_MAX + 1] = { 2541 unsigned int flags = 0;
2550 [7] = RTF_REJECT, [8] = RTF_REJECT,
2551 };
2552 unsigned flags = type2flags[type];
2553 2542
2543 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2544 flags = RTF_REJECT;
2554 if (fi && fi->fib_nh->nh_gw) 2545 if (fi && fi->fib_nh->nh_gw)
2555 flags |= RTF_GATEWAY; 2546 flags |= RTF_GATEWAY;
2556 if (mask == htonl(0xFFFFFFFF)) 2547 if (mask == htonl(0xFFFFFFFF))
@@ -2562,7 +2553,7 @@ static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2562/* 2553/*
2563 * This outputs /proc/net/route. 2554 * This outputs /proc/net/route.
2564 * The format of the file is not supposed to be changed 2555 * The format of the file is not supposed to be changed
2565 * and needs to be same as fib_hash output to avoid breaking 2556 * and needs to be same as fib_hash output to avoid breaking
2566 * legacy utilities 2557 * legacy utilities
2567 */ 2558 */
2568static int fib_route_seq_show(struct seq_file *seq, void *v) 2559static int fib_route_seq_show(struct seq_file *seq, void *v)
@@ -2587,7 +2578,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2587 2578
2588 list_for_each_entry_rcu(fa, &li->falh, fa_list) { 2579 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2589 const struct fib_info *fi = fa->fa_info; 2580 const struct fib_info *fi = fa->fa_info;
2590 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi); 2581 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
2591 int len; 2582 int len;
2592 2583
2593 if (fa->fa_type == RTN_BROADCAST 2584 if (fa->fa_type == RTN_BROADCAST
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
new file mode 100644
index 000000000000..b546736da2e1
--- /dev/null
+++ b/net/ipv4/gre.c
@@ -0,0 +1,151 @@
1/*
2 * GRE over IPv4 demultiplexer driver
3 *
4 * Authors: Dmitry Kozlov (xeb@mail.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/kmod.h>
16#include <linux/skbuff.h>
17#include <linux/in.h>
18#include <linux/netdevice.h>
19#include <linux/version.h>
20#include <linux/spinlock.h>
21#include <net/protocol.h>
22#include <net/gre.h>
23
24
25const struct gre_protocol *gre_proto[GREPROTO_MAX] __read_mostly;
26static DEFINE_SPINLOCK(gre_proto_lock);
27
28int gre_add_protocol(const struct gre_protocol *proto, u8 version)
29{
30 if (version >= GREPROTO_MAX)
31 goto err_out;
32
33 spin_lock(&gre_proto_lock);
34 if (gre_proto[version])
35 goto err_out_unlock;
36
37 rcu_assign_pointer(gre_proto[version], proto);
38 spin_unlock(&gre_proto_lock);
39 return 0;
40
41err_out_unlock:
42 spin_unlock(&gre_proto_lock);
43err_out:
44 return -1;
45}
46EXPORT_SYMBOL_GPL(gre_add_protocol);
47
48int gre_del_protocol(const struct gre_protocol *proto, u8 version)
49{
50 if (version >= GREPROTO_MAX)
51 goto err_out;
52
53 spin_lock(&gre_proto_lock);
54 if (gre_proto[version] != proto)
55 goto err_out_unlock;
56 rcu_assign_pointer(gre_proto[version], NULL);
57 spin_unlock(&gre_proto_lock);
58 synchronize_rcu();
59 return 0;
60
61err_out_unlock:
62 spin_unlock(&gre_proto_lock);
63err_out:
64 return -1;
65}
66EXPORT_SYMBOL_GPL(gre_del_protocol);
67
68static int gre_rcv(struct sk_buff *skb)
69{
70 const struct gre_protocol *proto;
71 u8 ver;
72 int ret;
73
74 if (!pskb_may_pull(skb, 12))
75 goto drop;
76
77 ver = skb->data[1]&0x7f;
78 if (ver >= GREPROTO_MAX)
79 goto drop;
80
81 rcu_read_lock();
82 proto = rcu_dereference(gre_proto[ver]);
83 if (!proto || !proto->handler)
84 goto drop_unlock;
85 ret = proto->handler(skb);
86 rcu_read_unlock();
87 return ret;
88
89drop_unlock:
90 rcu_read_unlock();
91drop:
92 kfree_skb(skb);
93 return NET_RX_DROP;
94}
95
96static void gre_err(struct sk_buff *skb, u32 info)
97{
98 const struct gre_protocol *proto;
99 u8 ver;
100
101 if (!pskb_may_pull(skb, 12))
102 goto drop;
103
104 ver = skb->data[1]&0x7f;
105 if (ver >= GREPROTO_MAX)
106 goto drop;
107
108 rcu_read_lock();
109 proto = rcu_dereference(gre_proto[ver]);
110 if (!proto || !proto->err_handler)
111 goto drop_unlock;
112 proto->err_handler(skb, info);
113 rcu_read_unlock();
114 return;
115
116drop_unlock:
117 rcu_read_unlock();
118drop:
119 kfree_skb(skb);
120}
121
122static const struct net_protocol net_gre_protocol = {
123 .handler = gre_rcv,
124 .err_handler = gre_err,
125 .netns_ok = 1,
126};
127
128static int __init gre_init(void)
129{
130 pr_info("GRE over IPv4 demultiplexor driver");
131
132 if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
133 pr_err("gre: can't add protocol\n");
134 return -EAGAIN;
135 }
136
137 return 0;
138}
139
140static void __exit gre_exit(void)
141{
142 inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
143}
144
145module_init(gre_init);
146module_exit(gre_exit);
147
148MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver");
149MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
150MODULE_LICENSE("GPL");
151
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index a0d847c7cba5..96bc7f9475a3 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -379,7 +379,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
379 inet->tos = ip_hdr(skb)->tos; 379 inet->tos = ip_hdr(skb)->tos;
380 daddr = ipc.addr = rt->rt_src; 380 daddr = ipc.addr = rt->rt_src;
381 ipc.opt = NULL; 381 ipc.opt = NULL;
382 ipc.shtx.flags = 0; 382 ipc.tx_flags = 0;
383 if (icmp_param->replyopts.optlen) { 383 if (icmp_param->replyopts.optlen) {
384 ipc.opt = &icmp_param->replyopts; 384 ipc.opt = &icmp_param->replyopts;
385 if (ipc.opt->srr) 385 if (ipc.opt->srr)
@@ -538,7 +538,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
538 inet_sk(sk)->tos = tos; 538 inet_sk(sk)->tos = tos;
539 ipc.addr = iph->saddr; 539 ipc.addr = iph->saddr;
540 ipc.opt = &icmp_param.replyopts; 540 ipc.opt = &icmp_param.replyopts;
541 ipc.shtx.flags = 0; 541 ipc.tx_flags = 0;
542 542
543 { 543 {
544 struct flowi fl = { 544 struct flowi fl = {
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index e5fa2ddce320..ba8042665849 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -425,7 +425,7 @@ static int inet_diag_bc_run(const void *bc, int len,
425 bc += op->no; 425 bc += op->no;
426 } 426 }
427 } 427 }
428 return (len == 0); 428 return len == 0;
429} 429}
430 430
431static int valid_cc(const void *bc, int len, int cc) 431static int valid_cc(const void *bc, int len, int cc)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b7c41654dde5..168440834ade 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -116,11 +116,11 @@ static int ip4_frag_match(struct inet_frag_queue *q, void *a)
116 struct ip4_create_arg *arg = a; 116 struct ip4_create_arg *arg = a;
117 117
118 qp = container_of(q, struct ipq, q); 118 qp = container_of(q, struct ipq, q);
119 return (qp->id == arg->iph->id && 119 return qp->id == arg->iph->id &&
120 qp->saddr == arg->iph->saddr && 120 qp->saddr == arg->iph->saddr &&
121 qp->daddr == arg->iph->daddr && 121 qp->daddr == arg->iph->daddr &&
122 qp->protocol == arg->iph->protocol && 122 qp->protocol == arg->iph->protocol &&
123 qp->user == arg->user); 123 qp->user == arg->user;
124} 124}
125 125
126/* Memory Tracking Functions. */ 126/* Memory Tracking Functions. */
@@ -542,7 +542,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
542 /* If the first fragment is fragmented itself, we split 542 /* If the first fragment is fragmented itself, we split
543 * it to two chunks: the first with data and paged part 543 * it to two chunks: the first with data and paged part
544 * and the second, holding only fragments. */ 544 * and the second, holding only fragments. */
545 if (skb_has_frags(head)) { 545 if (skb_has_frag_list(head)) {
546 struct sk_buff *clone; 546 struct sk_buff *clone;
547 int i, plen = 0; 547 int i, plen = 0;
548 548
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 35c93e8b6a46..5d6ddcb7403b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -44,6 +44,7 @@
44#include <net/net_namespace.h> 44#include <net/net_namespace.h>
45#include <net/netns/generic.h> 45#include <net/netns/generic.h>
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47#include <net/gre.h>
47 48
48#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 49#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
49#include <net/ipv6.h> 50#include <net/ipv6.h>
@@ -128,7 +129,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev);
128 129
129static int ipgre_net_id __read_mostly; 130static int ipgre_net_id __read_mostly;
130struct ipgre_net { 131struct ipgre_net {
131 struct ip_tunnel *tunnels[4][HASH_SIZE]; 132 struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
132 133
133 struct net_device *fb_tunnel_dev; 134 struct net_device *fb_tunnel_dev;
134}; 135};
@@ -158,9 +159,8 @@ struct ipgre_net {
158#define tunnels_l tunnels[1] 159#define tunnels_l tunnels[1]
159#define tunnels_wc tunnels[0] 160#define tunnels_wc tunnels[0]
160/* 161/*
161 * Locking : hash tables are protected by RCU and a spinlock 162 * Locking : hash tables are protected by RCU and RTNL
162 */ 163 */
163static DEFINE_SPINLOCK(ipgre_lock);
164 164
165#define for_each_ip_tunnel_rcu(start) \ 165#define for_each_ip_tunnel_rcu(start) \
166 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 166 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
@@ -173,8 +173,8 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
173{ 173{
174 struct net *net = dev_net(dev); 174 struct net *net = dev_net(dev);
175 int link = dev->ifindex; 175 int link = dev->ifindex;
176 unsigned h0 = HASH(remote); 176 unsigned int h0 = HASH(remote);
177 unsigned h1 = HASH(key); 177 unsigned int h1 = HASH(key);
178 struct ip_tunnel *t, *cand = NULL; 178 struct ip_tunnel *t, *cand = NULL;
179 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 179 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
180 int dev_type = (gre_proto == htons(ETH_P_TEB)) ? 180 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
@@ -289,13 +289,13 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
289 return NULL; 289 return NULL;
290} 290}
291 291
292static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign, 292static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
293 struct ip_tunnel_parm *parms) 293 struct ip_tunnel_parm *parms)
294{ 294{
295 __be32 remote = parms->iph.daddr; 295 __be32 remote = parms->iph.daddr;
296 __be32 local = parms->iph.saddr; 296 __be32 local = parms->iph.saddr;
297 __be32 key = parms->i_key; 297 __be32 key = parms->i_key;
298 unsigned h = HASH(key); 298 unsigned int h = HASH(key);
299 int prio = 0; 299 int prio = 0;
300 300
301 if (local) 301 if (local)
@@ -308,7 +308,7 @@ static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
308 return &ign->tunnels[prio][h]; 308 return &ign->tunnels[prio][h];
309} 309}
310 310
311static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign, 311static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
312 struct ip_tunnel *t) 312 struct ip_tunnel *t)
313{ 313{
314 return __ipgre_bucket(ign, &t->parms); 314 return __ipgre_bucket(ign, &t->parms);
@@ -316,23 +316,22 @@ static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
316 316
317static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t) 317static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
318{ 318{
319 struct ip_tunnel **tp = ipgre_bucket(ign, t); 319 struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
320 320
321 spin_lock_bh(&ipgre_lock); 321 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
322 t->next = *tp;
323 rcu_assign_pointer(*tp, t); 322 rcu_assign_pointer(*tp, t);
324 spin_unlock_bh(&ipgre_lock);
325} 323}
326 324
327static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t) 325static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
328{ 326{
329 struct ip_tunnel **tp; 327 struct ip_tunnel __rcu **tp;
330 328 struct ip_tunnel *iter;
331 for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) { 329
332 if (t == *tp) { 330 for (tp = ipgre_bucket(ign, t);
333 spin_lock_bh(&ipgre_lock); 331 (iter = rtnl_dereference(*tp)) != NULL;
334 *tp = t->next; 332 tp = &iter->next) {
335 spin_unlock_bh(&ipgre_lock); 333 if (t == iter) {
334 rcu_assign_pointer(*tp, t->next);
336 break; 335 break;
337 } 336 }
338 } 337 }
@@ -346,10 +345,13 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
346 __be32 local = parms->iph.saddr; 345 __be32 local = parms->iph.saddr;
347 __be32 key = parms->i_key; 346 __be32 key = parms->i_key;
348 int link = parms->link; 347 int link = parms->link;
349 struct ip_tunnel *t, **tp; 348 struct ip_tunnel *t;
349 struct ip_tunnel __rcu **tp;
350 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 350 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
351 351
352 for (tp = __ipgre_bucket(ign, parms); (t = *tp) != NULL; tp = &t->next) 352 for (tp = __ipgre_bucket(ign, parms);
353 (t = rtnl_dereference(*tp)) != NULL;
354 tp = &t->next)
353 if (local == t->parms.iph.saddr && 355 if (local == t->parms.iph.saddr &&
354 remote == t->parms.iph.daddr && 356 remote == t->parms.iph.daddr &&
355 key == t->parms.i_key && 357 key == t->parms.i_key &&
@@ -360,7 +362,7 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
360 return t; 362 return t;
361} 363}
362 364
363static struct ip_tunnel * ipgre_tunnel_locate(struct net *net, 365static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
364 struct ip_tunnel_parm *parms, int create) 366 struct ip_tunnel_parm *parms, int create)
365{ 367{
366 struct ip_tunnel *t, *nt; 368 struct ip_tunnel *t, *nt;
@@ -645,9 +647,11 @@ static int ipgre_rcv(struct sk_buff *skb)
645 skb_reset_network_header(skb); 647 skb_reset_network_header(skb);
646 ipgre_ecn_decapsulate(iph, skb); 648 ipgre_ecn_decapsulate(iph, skb);
647 649
648 netif_rx(skb); 650 if (netif_rx(skb) == NET_RX_DROP)
651 stats->rx_dropped++;
652
649 rcu_read_unlock(); 653 rcu_read_unlock();
650 return(0); 654 return 0;
651 } 655 }
652 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 656 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
653 657
@@ -655,7 +659,7 @@ drop:
655 rcu_read_unlock(); 659 rcu_read_unlock();
656drop_nolock: 660drop_nolock:
657 kfree_skb(skb); 661 kfree_skb(skb);
658 return(0); 662 return 0;
659} 663}
660 664
661static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 665static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -668,7 +672,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
668 u8 tos; 672 u8 tos;
669 __be16 df; 673 __be16 df;
670 struct rtable *rt; /* Route to the other host */ 674 struct rtable *rt; /* Route to the other host */
671 struct net_device *tdev; /* Device to other host */ 675 struct net_device *tdev; /* Device to other host */
672 struct iphdr *iph; /* Our new IP header */ 676 struct iphdr *iph; /* Our new IP header */
673 unsigned int max_headroom; /* The extra header space needed */ 677 unsigned int max_headroom; /* The extra header space needed */
674 int gre_hlen; 678 int gre_hlen;
@@ -1012,7 +1016,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
1012 break; 1016 break;
1013 } 1017 }
1014 } else { 1018 } else {
1015 unsigned nflags = 0; 1019 unsigned int nflags = 0;
1016 1020
1017 t = netdev_priv(dev); 1021 t = netdev_priv(dev);
1018 1022
@@ -1125,7 +1129,7 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1125 1129
1126static int ipgre_header(struct sk_buff *skb, struct net_device *dev, 1130static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1127 unsigned short type, 1131 unsigned short type,
1128 const void *daddr, const void *saddr, unsigned len) 1132 const void *daddr, const void *saddr, unsigned int len)
1129{ 1133{
1130 struct ip_tunnel *t = netdev_priv(dev); 1134 struct ip_tunnel *t = netdev_priv(dev);
1131 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); 1135 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
@@ -1274,14 +1278,13 @@ static void ipgre_fb_tunnel_init(struct net_device *dev)
1274 tunnel->hlen = sizeof(struct iphdr) + 4; 1278 tunnel->hlen = sizeof(struct iphdr) + 4;
1275 1279
1276 dev_hold(dev); 1280 dev_hold(dev);
1277 ign->tunnels_wc[0] = tunnel; 1281 rcu_assign_pointer(ign->tunnels_wc[0], tunnel);
1278} 1282}
1279 1283
1280 1284
1281static const struct net_protocol ipgre_protocol = { 1285static const struct gre_protocol ipgre_protocol = {
1282 .handler = ipgre_rcv, 1286 .handler = ipgre_rcv,
1283 .err_handler = ipgre_err, 1287 .err_handler = ipgre_err,
1284 .netns_ok = 1,
1285}; 1288};
1286 1289
1287static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head) 1290static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
@@ -1291,11 +1294,13 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1291 for (prio = 0; prio < 4; prio++) { 1294 for (prio = 0; prio < 4; prio++) {
1292 int h; 1295 int h;
1293 for (h = 0; h < HASH_SIZE; h++) { 1296 for (h = 0; h < HASH_SIZE; h++) {
1294 struct ip_tunnel *t = ign->tunnels[prio][h]; 1297 struct ip_tunnel *t;
1298
1299 t = rtnl_dereference(ign->tunnels[prio][h]);
1295 1300
1296 while (t != NULL) { 1301 while (t != NULL) {
1297 unregister_netdevice_queue(t->dev, head); 1302 unregister_netdevice_queue(t->dev, head);
1298 t = t->next; 1303 t = rtnl_dereference(t->next);
1299 } 1304 }
1300 } 1305 }
1301 } 1306 }
@@ -1522,7 +1527,7 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1522 t = nt; 1527 t = nt;
1523 1528
1524 if (dev->type != ARPHRD_ETHER) { 1529 if (dev->type != ARPHRD_ETHER) {
1525 unsigned nflags = 0; 1530 unsigned int nflags = 0;
1526 1531
1527 if (ipv4_is_multicast(p.iph.daddr)) 1532 if (ipv4_is_multicast(p.iph.daddr))
1528 nflags = IFF_BROADCAST; 1533 nflags = IFF_BROADCAST;
@@ -1663,7 +1668,7 @@ static int __init ipgre_init(void)
1663 if (err < 0) 1668 if (err < 0)
1664 return err; 1669 return err;
1665 1670
1666 err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE); 1671 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1667 if (err < 0) { 1672 if (err < 0) {
1668 printk(KERN_INFO "ipgre init: can't add protocol\n"); 1673 printk(KERN_INFO "ipgre init: can't add protocol\n");
1669 goto add_proto_failed; 1674 goto add_proto_failed;
@@ -1683,7 +1688,7 @@ out:
1683tap_ops_failed: 1688tap_ops_failed:
1684 rtnl_link_unregister(&ipgre_link_ops); 1689 rtnl_link_unregister(&ipgre_link_ops);
1685rtnl_link_failed: 1690rtnl_link_failed:
1686 inet_del_protocol(&ipgre_protocol, IPPROTO_GRE); 1691 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1687add_proto_failed: 1692add_proto_failed:
1688 unregister_pernet_device(&ipgre_net_ops); 1693 unregister_pernet_device(&ipgre_net_ops);
1689 goto out; 1694 goto out;
@@ -1693,7 +1698,7 @@ static void __exit ipgre_fini(void)
1693{ 1698{
1694 rtnl_link_unregister(&ipgre_tap_ops); 1699 rtnl_link_unregister(&ipgre_tap_ops);
1695 rtnl_link_unregister(&ipgre_link_ops); 1700 rtnl_link_unregister(&ipgre_link_ops);
1696 if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) 1701 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1697 printk(KERN_INFO "ipgre close: can't remove protocol\n"); 1702 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1698 unregister_pernet_device(&ipgre_net_ops); 1703 unregister_pernet_device(&ipgre_net_ops);
1699} 1704}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ba9836c488ed..1906fa35860c 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -466,7 +466,7 @@ error:
466 } 466 }
467 return -EINVAL; 467 return -EINVAL;
468} 468}
469 469EXPORT_SYMBOL(ip_options_compile);
470 470
471/* 471/*
472 * Undo all the changes done by ip_options_compile(). 472 * Undo all the changes done by ip_options_compile().
@@ -646,3 +646,4 @@ int ip_options_rcv_srr(struct sk_buff *skb)
646 } 646 }
647 return 0; 647 return 0;
648} 648}
649EXPORT_SYMBOL(ip_options_rcv_srr);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 7649d7750075..439d2a34ee44 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -487,7 +487,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
487 * LATER: this step can be merged to real generation of fragments, 487 * LATER: this step can be merged to real generation of fragments,
488 * we can switch to copy when see the first bad fragment. 488 * we can switch to copy when see the first bad fragment.
489 */ 489 */
490 if (skb_has_frags(skb)) { 490 if (skb_has_frag_list(skb)) {
491 struct sk_buff *frag, *frag2; 491 struct sk_buff *frag, *frag2;
492 int first_len = skb_pagelen(skb); 492 int first_len = skb_pagelen(skb);
493 493
@@ -844,10 +844,9 @@ int ip_append_data(struct sock *sk,
844 inet->cork.length = 0; 844 inet->cork.length = 0;
845 sk->sk_sndmsg_page = NULL; 845 sk->sk_sndmsg_page = NULL;
846 sk->sk_sndmsg_off = 0; 846 sk->sk_sndmsg_off = 0;
847 if ((exthdrlen = rt->dst.header_len) != 0) { 847 exthdrlen = rt->dst.header_len;
848 length += exthdrlen; 848 length += exthdrlen;
849 transhdrlen += exthdrlen; 849 transhdrlen += exthdrlen;
850 }
851 } else { 850 } else {
852 rt = (struct rtable *)inet->cork.dst; 851 rt = (struct rtable *)inet->cork.dst;
853 if (inet->cork.flags & IPCORK_OPT) 852 if (inet->cork.flags & IPCORK_OPT)
@@ -934,16 +933,19 @@ alloc_new_skb:
934 !(rt->dst.dev->features&NETIF_F_SG)) 933 !(rt->dst.dev->features&NETIF_F_SG))
935 alloclen = mtu; 934 alloclen = mtu;
936 else 935 else
937 alloclen = datalen + fragheaderlen; 936 alloclen = fraglen;
938 937
939 /* The last fragment gets additional space at tail. 938 /* The last fragment gets additional space at tail.
940 * Note, with MSG_MORE we overallocate on fragments, 939 * Note, with MSG_MORE we overallocate on fragments,
941 * because we have no idea what fragment will be 940 * because we have no idea what fragment will be
942 * the last. 941 * the last.
943 */ 942 */
944 if (datalen == length + fraggap) 943 if (datalen == length + fraggap) {
945 alloclen += rt->dst.trailer_len; 944 alloclen += rt->dst.trailer_len;
946 945 /* make sure mtu is not reached */
946 if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
947 datalen -= ALIGN(rt->dst.trailer_len, 8);
948 }
947 if (transhdrlen) { 949 if (transhdrlen) {
948 skb = sock_alloc_send_skb(sk, 950 skb = sock_alloc_send_skb(sk,
949 alloclen + hh_len + 15, 951 alloclen + hh_len + 15,
@@ -960,7 +962,7 @@ alloc_new_skb:
960 else 962 else
961 /* only the initial fragment is 963 /* only the initial fragment is
962 time stamped */ 964 time stamped */
963 ipc->shtx.flags = 0; 965 ipc->tx_flags = 0;
964 } 966 }
965 if (skb == NULL) 967 if (skb == NULL)
966 goto error; 968 goto error;
@@ -971,7 +973,7 @@ alloc_new_skb:
971 skb->ip_summed = csummode; 973 skb->ip_summed = csummode;
972 skb->csum = 0; 974 skb->csum = 0;
973 skb_reserve(skb, hh_len); 975 skb_reserve(skb, hh_len);
974 *skb_tx(skb) = ipc->shtx; 976 skb_shinfo(skb)->tx_flags = ipc->tx_flags;
975 977
976 /* 978 /*
977 * Find where to start putting bytes. 979 * Find where to start putting bytes.
@@ -1391,7 +1393,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1391 1393
1392 daddr = ipc.addr = rt->rt_src; 1394 daddr = ipc.addr = rt->rt_src;
1393 ipc.opt = NULL; 1395 ipc.opt = NULL;
1394 ipc.shtx.flags = 0; 1396 ipc.tx_flags = 0;
1395 1397
1396 if (replyopts.opt.optlen) { 1398 if (replyopts.opt.optlen) {
1397 ipc.opt = &replyopts.opt; 1399 ipc.opt = &replyopts.opt;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ec036731a70b..babd25278106 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -122,11 +122,11 @@
122 122
123static int ipip_net_id __read_mostly; 123static int ipip_net_id __read_mostly;
124struct ipip_net { 124struct ipip_net {
125 struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 125 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
126 struct ip_tunnel *tunnels_r[HASH_SIZE]; 126 struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
127 struct ip_tunnel *tunnels_l[HASH_SIZE]; 127 struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
128 struct ip_tunnel *tunnels_wc[1]; 128 struct ip_tunnel __rcu *tunnels_wc[1];
129 struct ip_tunnel **tunnels[4]; 129 struct ip_tunnel __rcu **tunnels[4];
130 130
131 struct net_device *fb_tunnel_dev; 131 struct net_device *fb_tunnel_dev;
132}; 132};
@@ -135,9 +135,8 @@ static void ipip_tunnel_init(struct net_device *dev);
135static void ipip_tunnel_setup(struct net_device *dev); 135static void ipip_tunnel_setup(struct net_device *dev);
136 136
137/* 137/*
138 * Locking : hash tables are protected by RCU and a spinlock 138 * Locking : hash tables are protected by RCU and RTNL
139 */ 139 */
140static DEFINE_SPINLOCK(ipip_lock);
141 140
142#define for_each_ip_tunnel_rcu(start) \ 141#define for_each_ip_tunnel_rcu(start) \
143 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 142 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
@@ -145,8 +144,8 @@ static DEFINE_SPINLOCK(ipip_lock);
145static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, 144static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
146 __be32 remote, __be32 local) 145 __be32 remote, __be32 local)
147{ 146{
148 unsigned h0 = HASH(remote); 147 unsigned int h0 = HASH(remote);
149 unsigned h1 = HASH(local); 148 unsigned int h1 = HASH(local);
150 struct ip_tunnel *t; 149 struct ip_tunnel *t;
151 struct ipip_net *ipn = net_generic(net, ipip_net_id); 150 struct ipip_net *ipn = net_generic(net, ipip_net_id);
152 151
@@ -169,12 +168,12 @@ static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
169 return NULL; 168 return NULL;
170} 169}
171 170
172static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn, 171static struct ip_tunnel __rcu **__ipip_bucket(struct ipip_net *ipn,
173 struct ip_tunnel_parm *parms) 172 struct ip_tunnel_parm *parms)
174{ 173{
175 __be32 remote = parms->iph.daddr; 174 __be32 remote = parms->iph.daddr;
176 __be32 local = parms->iph.saddr; 175 __be32 local = parms->iph.saddr;
177 unsigned h = 0; 176 unsigned int h = 0;
178 int prio = 0; 177 int prio = 0;
179 178
180 if (remote) { 179 if (remote) {
@@ -188,7 +187,7 @@ static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn,
188 return &ipn->tunnels[prio][h]; 187 return &ipn->tunnels[prio][h];
189} 188}
190 189
191static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn, 190static inline struct ip_tunnel __rcu **ipip_bucket(struct ipip_net *ipn,
192 struct ip_tunnel *t) 191 struct ip_tunnel *t)
193{ 192{
194 return __ipip_bucket(ipn, &t->parms); 193 return __ipip_bucket(ipn, &t->parms);
@@ -196,13 +195,14 @@ static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn,
196 195
197static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t) 196static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
198{ 197{
199 struct ip_tunnel **tp; 198 struct ip_tunnel __rcu **tp;
200 199 struct ip_tunnel *iter;
201 for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) { 200
202 if (t == *tp) { 201 for (tp = ipip_bucket(ipn, t);
203 spin_lock_bh(&ipip_lock); 202 (iter = rtnl_dereference(*tp)) != NULL;
204 *tp = t->next; 203 tp = &iter->next) {
205 spin_unlock_bh(&ipip_lock); 204 if (t == iter) {
205 rcu_assign_pointer(*tp, t->next);
206 break; 206 break;
207 } 207 }
208 } 208 }
@@ -210,12 +210,10 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
210 210
211static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t) 211static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
212{ 212{
213 struct ip_tunnel **tp = ipip_bucket(ipn, t); 213 struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
214 214
215 spin_lock_bh(&ipip_lock); 215 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
216 t->next = *tp;
217 rcu_assign_pointer(*tp, t); 216 rcu_assign_pointer(*tp, t);
218 spin_unlock_bh(&ipip_lock);
219} 217}
220 218
221static struct ip_tunnel * ipip_tunnel_locate(struct net *net, 219static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@ -223,12 +221,15 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
223{ 221{
224 __be32 remote = parms->iph.daddr; 222 __be32 remote = parms->iph.daddr;
225 __be32 local = parms->iph.saddr; 223 __be32 local = parms->iph.saddr;
226 struct ip_tunnel *t, **tp, *nt; 224 struct ip_tunnel *t, *nt;
225 struct ip_tunnel __rcu **tp;
227 struct net_device *dev; 226 struct net_device *dev;
228 char name[IFNAMSIZ]; 227 char name[IFNAMSIZ];
229 struct ipip_net *ipn = net_generic(net, ipip_net_id); 228 struct ipip_net *ipn = net_generic(net, ipip_net_id);
230 229
231 for (tp = __ipip_bucket(ipn, parms); (t = *tp) != NULL; tp = &t->next) { 230 for (tp = __ipip_bucket(ipn, parms);
231 (t = rtnl_dereference(*tp)) != NULL;
232 tp = &t->next) {
232 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr) 233 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
233 return t; 234 return t;
234 } 235 }
@@ -268,16 +269,15 @@ failed_free:
268 return NULL; 269 return NULL;
269} 270}
270 271
272/* called with RTNL */
271static void ipip_tunnel_uninit(struct net_device *dev) 273static void ipip_tunnel_uninit(struct net_device *dev)
272{ 274{
273 struct net *net = dev_net(dev); 275 struct net *net = dev_net(dev);
274 struct ipip_net *ipn = net_generic(net, ipip_net_id); 276 struct ipip_net *ipn = net_generic(net, ipip_net_id);
275 277
276 if (dev == ipn->fb_tunnel_dev) { 278 if (dev == ipn->fb_tunnel_dev)
277 spin_lock_bh(&ipip_lock); 279 rcu_assign_pointer(ipn->tunnels_wc[0], NULL);
278 ipn->tunnels_wc[0] = NULL; 280 else
279 spin_unlock_bh(&ipip_lock);
280 } else
281 ipip_tunnel_unlink(ipn, netdev_priv(dev)); 281 ipip_tunnel_unlink(ipn, netdev_priv(dev));
282 dev_put(dev); 282 dev_put(dev);
283} 283}
@@ -377,7 +377,10 @@ static int ipip_rcv(struct sk_buff *skb)
377 skb_tunnel_rx(skb, tunnel->dev); 377 skb_tunnel_rx(skb, tunnel->dev);
378 378
379 ipip_ecn_decapsulate(iph, skb); 379 ipip_ecn_decapsulate(iph, skb);
380 netif_rx(skb); 380
381 if (netif_rx(skb) == NET_RX_DROP)
382 tunnel->dev->stats.rx_dropped++;
383
381 rcu_read_unlock(); 384 rcu_read_unlock();
382 return 0; 385 return 0;
383 } 386 }
@@ -741,10 +744,10 @@ static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
741 iph->ihl = 5; 744 iph->ihl = 5;
742 745
743 dev_hold(dev); 746 dev_hold(dev);
744 ipn->tunnels_wc[0] = tunnel; 747 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
745} 748}
746 749
747static struct xfrm_tunnel ipip_handler = { 750static struct xfrm_tunnel ipip_handler __read_mostly = {
748 .handler = ipip_rcv, 751 .handler = ipip_rcv,
749 .err_handler = ipip_err, 752 .err_handler = ipip_err,
750 .priority = 1, 753 .priority = 1,
@@ -760,11 +763,12 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
760 for (prio = 1; prio < 4; prio++) { 763 for (prio = 1; prio < 4; prio++) {
761 int h; 764 int h;
762 for (h = 0; h < HASH_SIZE; h++) { 765 for (h = 0; h < HASH_SIZE; h++) {
763 struct ip_tunnel *t = ipn->tunnels[prio][h]; 766 struct ip_tunnel *t;
764 767
768 t = rtnl_dereference(ipn->tunnels[prio][h]);
765 while (t != NULL) { 769 while (t != NULL) {
766 unregister_netdevice_queue(t->dev, head); 770 unregister_netdevice_queue(t->dev, head);
767 t = t->next; 771 t = rtnl_dereference(t->next);
768 } 772 }
769 } 773 }
770 } 774 }
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 179fcab866fc..10b24c02deb0 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -724,7 +724,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
724 case 0: 724 case 0:
725 if (vifc->vifc_flags == VIFF_USE_IFINDEX) { 725 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
726 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); 726 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
727 if (dev && dev->ip_ptr == NULL) { 727 if (dev && __in_dev_get_rtnl(dev) == NULL) {
728 dev_put(dev); 728 dev_put(dev);
729 return -EADDRNOTAVAIL; 729 return -EADDRNOTAVAIL;
730 } 730 }
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index e8f4f9a57f12..8b642f152468 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -72,7 +72,7 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
72 for (i = 0; i < len; i++) 72 for (i = 0; i < len; i++)
73 ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; 73 ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i];
74 74
75 return (ret != 0); 75 return ret != 0;
76} 76}
77 77
78/* 78/*
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 3a43cf36db87..1e26a4897655 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -29,6 +29,7 @@
29#include <net/netfilter/nf_conntrack.h> 29#include <net/netfilter/nf_conntrack.h>
30#include <net/net_namespace.h> 30#include <net/net_namespace.h>
31#include <net/checksum.h> 31#include <net/checksum.h>
32#include <net/ip.h>
32 33
33#define CLUSTERIP_VERSION "0.8" 34#define CLUSTERIP_VERSION "0.8"
34 35
@@ -231,24 +232,22 @@ clusterip_hashfn(const struct sk_buff *skb,
231{ 232{
232 const struct iphdr *iph = ip_hdr(skb); 233 const struct iphdr *iph = ip_hdr(skb);
233 unsigned long hashval; 234 unsigned long hashval;
234 u_int16_t sport, dport; 235 u_int16_t sport = 0, dport = 0;
235 const u_int16_t *ports; 236 int poff;
236 237
237 switch (iph->protocol) { 238 poff = proto_ports_offset(iph->protocol);
238 case IPPROTO_TCP: 239 if (poff >= 0) {
239 case IPPROTO_UDP: 240 const u_int16_t *ports;
240 case IPPROTO_UDPLITE: 241 u16 _ports[2];
241 case IPPROTO_SCTP: 242
242 case IPPROTO_DCCP: 243 ports = skb_header_pointer(skb, iph->ihl * 4 + poff, 4, _ports);
243 case IPPROTO_ICMP: 244 if (ports) {
244 ports = (const void *)iph+iph->ihl*4; 245 sport = ports[0];
245 sport = ports[0]; 246 dport = ports[1];
246 dport = ports[1]; 247 }
247 break; 248 } else {
248 default:
249 if (net_ratelimit()) 249 if (net_ratelimit())
250 pr_info("unknown protocol %u\n", iph->protocol); 250 pr_info("unknown protocol %u\n", iph->protocol);
251 sport = dport = 0;
252 } 251 }
253 252
254 switch (config->hash_mode) { 253 switch (config->hash_mode) {
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index f2d297351405..65699c24411c 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -28,8 +28,7 @@
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <net/protocol.h> 29#include <net/protocol.h>
30 30
31const struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp; 31const struct net_protocol *inet_protos[MAX_INET_PROTOS] __read_mostly;
32static DEFINE_SPINLOCK(inet_proto_lock);
33 32
34/* 33/*
35 * Add a protocol handler to the hash tables 34 * Add a protocol handler to the hash tables
@@ -37,20 +36,9 @@ static DEFINE_SPINLOCK(inet_proto_lock);
37 36
38int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) 37int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
39{ 38{
40 int hash, ret; 39 int hash = protocol & (MAX_INET_PROTOS - 1);
41 40
42 hash = protocol & (MAX_INET_PROTOS - 1); 41 return !cmpxchg(&inet_protos[hash], NULL, prot) ? 0 : -1;
43
44 spin_lock_bh(&inet_proto_lock);
45 if (inet_protos[hash]) {
46 ret = -1;
47 } else {
48 inet_protos[hash] = prot;
49 ret = 0;
50 }
51 spin_unlock_bh(&inet_proto_lock);
52
53 return ret;
54} 42}
55EXPORT_SYMBOL(inet_add_protocol); 43EXPORT_SYMBOL(inet_add_protocol);
56 44
@@ -60,18 +48,9 @@ EXPORT_SYMBOL(inet_add_protocol);
60 48
61int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) 49int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
62{ 50{
63 int hash, ret; 51 int ret, hash = protocol & (MAX_INET_PROTOS - 1);
64
65 hash = protocol & (MAX_INET_PROTOS - 1);
66 52
67 spin_lock_bh(&inet_proto_lock); 53 ret = (cmpxchg(&inet_protos[hash], prot, NULL) == prot) ? 0 : -1;
68 if (inet_protos[hash] == prot) {
69 inet_protos[hash] = NULL;
70 ret = 0;
71 } else {
72 ret = -1;
73 }
74 spin_unlock_bh(&inet_proto_lock);
75 54
76 synchronize_net(); 55 synchronize_net();
77 56
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 009a7b2aa1ef..1f85ef289895 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -505,7 +505,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
505 505
506 ipc.addr = inet->inet_saddr; 506 ipc.addr = inet->inet_saddr;
507 ipc.opt = NULL; 507 ipc.opt = NULL;
508 ipc.shtx.flags = 0; 508 ipc.tx_flags = 0;
509 ipc.oif = sk->sk_bound_dev_if; 509 ipc.oif = sk->sk_bound_dev_if;
510 510
511 if (msg->msg_controllen) { 511 if (msg->msg_controllen) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6298f75d5e93..98beda47bc99 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1268,18 +1268,11 @@ skip_hashing:
1268 1268
1269void rt_bind_peer(struct rtable *rt, int create) 1269void rt_bind_peer(struct rtable *rt, int create)
1270{ 1270{
1271 static DEFINE_SPINLOCK(rt_peer_lock);
1272 struct inet_peer *peer; 1271 struct inet_peer *peer;
1273 1272
1274 peer = inet_getpeer(rt->rt_dst, create); 1273 peer = inet_getpeer(rt->rt_dst, create);
1275 1274
1276 spin_lock_bh(&rt_peer_lock); 1275 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1277 if (rt->peer == NULL) {
1278 rt->peer = peer;
1279 peer = NULL;
1280 }
1281 spin_unlock_bh(&rt_peer_lock);
1282 if (peer)
1283 inet_putpeer(peer); 1276 inet_putpeer(peer);
1284} 1277}
1285 1278
@@ -2586,7 +2579,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
2586 goto out; 2579 goto out;
2587 2580
2588 /* RACE: Check return value of inet_select_addr instead. */ 2581 /* RACE: Check return value of inet_select_addr instead. */
2589 if (__in_dev_get_rtnl(dev_out) == NULL) { 2582 if (rcu_dereference_raw(dev_out->ip_ptr) == NULL) {
2590 dev_put(dev_out); 2583 dev_put(dev_out);
2591 goto out; /* Wrong error code */ 2584 goto out; /* Wrong error code */
2592 } 2585 }
@@ -2798,7 +2791,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2798 2791
2799 dst_release(&(*rp)->dst); 2792 dst_release(&(*rp)->dst);
2800 *rp = rt; 2793 *rp = rt;
2801 return (rt ? 0 : -ENOMEM); 2794 return rt ? 0 : -ENOMEM;
2802} 2795}
2803 2796
2804int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp, 2797int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 95d75d443927..19192c5fe67a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2392,7 +2392,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2392 err = tp->af_specific->md5_parse(sk, optval, optlen); 2392 err = tp->af_specific->md5_parse(sk, optval, optlen);
2393 break; 2393 break;
2394#endif 2394#endif
2395 2395 case TCP_USER_TIMEOUT:
2396 /* Cap the max timeout in ms TCP will retry/retrans
2397 * before giving up and aborting (ETIMEDOUT) a connection.
2398 */
2399 icsk->icsk_user_timeout = msecs_to_jiffies(val);
2400 break;
2396 default: 2401 default:
2397 err = -ENOPROTOOPT; 2402 err = -ENOPROTOOPT;
2398 break; 2403 break;
@@ -2611,6 +2616,10 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2611 case TCP_THIN_DUPACK: 2616 case TCP_THIN_DUPACK:
2612 val = tp->thin_dupack; 2617 val = tp->thin_dupack;
2613 break; 2618 break;
2619
2620 case TCP_USER_TIMEOUT:
2621 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2622 break;
2614 default: 2623 default:
2615 return -ENOPROTOOPT; 2624 return -ENOPROTOOPT;
2616 } 2625 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 149e79ac2891..fabc09a58d7f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -805,25 +805,12 @@ void tcp_update_metrics(struct sock *sk)
805 } 805 }
806} 806}
807 807
808/* Numbers are taken from RFC3390.
809 *
810 * John Heffner states:
811 *
812 * The RFC specifies a window of no more than 4380 bytes
813 * unless 2*MSS > 4380. Reading the pseudocode in the RFC
814 * is a bit misleading because they use a clamp at 4380 bytes
815 * rather than use a multiplier in the relevant range.
816 */
817__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) 808__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
818{ 809{
819 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 810 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
820 811
821 if (!cwnd) { 812 if (!cwnd)
822 if (tp->mss_cache > 1460) 813 cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
823 cwnd = 2;
824 else
825 cwnd = (tp->mss_cache > 1095) ? 3 : 4;
826 }
827 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 814 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
828} 815}
829 816
@@ -2314,7 +2301,7 @@ static inline int tcp_dupack_heuristics(struct tcp_sock *tp)
2314 2301
2315static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) 2302static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
2316{ 2303{
2317 return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); 2304 return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
2318} 2305}
2319 2306
2320static inline int tcp_head_timedout(struct sock *sk) 2307static inline int tcp_head_timedout(struct sock *sk)
@@ -3411,8 +3398,8 @@ static void tcp_ack_probe(struct sock *sk)
3411 3398
3412static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) 3399static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
3413{ 3400{
3414 return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 3401 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
3415 inet_csk(sk)->icsk_ca_state != TCP_CA_Open); 3402 inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
3416} 3403}
3417 3404
3418static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) 3405static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
@@ -3429,9 +3416,9 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp,
3429 const u32 ack, const u32 ack_seq, 3416 const u32 ack, const u32 ack_seq,
3430 const u32 nwin) 3417 const u32 nwin)
3431{ 3418{
3432 return (after(ack, tp->snd_una) || 3419 return after(ack, tp->snd_una) ||
3433 after(ack_seq, tp->snd_wl1) || 3420 after(ack_seq, tp->snd_wl1) ||
3434 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd)); 3421 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
3435} 3422}
3436 3423
3437/* Update our send window. 3424/* Update our send window.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 020766292bb0..a0232f3a358b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2571,7 +2571,6 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2571 2571
2572 return tcp_gro_receive(head, skb); 2572 return tcp_gro_receive(head, skb);
2573} 2573}
2574EXPORT_SYMBOL(tcp4_gro_receive);
2575 2574
2576int tcp4_gro_complete(struct sk_buff *skb) 2575int tcp4_gro_complete(struct sk_buff *skb)
2577{ 2576{
@@ -2584,7 +2583,6 @@ int tcp4_gro_complete(struct sk_buff *skb)
2584 2583
2585 return tcp_gro_complete(skb); 2584 return tcp_gro_complete(skb);
2586} 2585}
2587EXPORT_SYMBOL(tcp4_gro_complete);
2588 2586
2589struct proto tcp_prot = { 2587struct proto tcp_prot = {
2590 .name = "TCP", 2588 .name = "TCP",
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f25b56cb85cb..43cf901d7659 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -55,7 +55,7 @@ static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
55 return 1; 55 return 1;
56 if (after(end_seq, s_win) && before(seq, e_win)) 56 if (after(end_seq, s_win) && before(seq, e_win))
57 return 1; 57 return 1;
58 return (seq == e_win && seq == end_seq); 58 return seq == e_win && seq == end_seq;
59} 59}
60 60
61/* 61/*
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index de3bd8458588..05b1ecf36763 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -224,16 +224,10 @@ void tcp_select_initial_window(int __space, __u32 mss,
224 } 224 }
225 } 225 }
226 226
227 /* Set initial window to value enough for senders, 227 /* Set initial window to value enough for senders, following RFC5681. */
228 * following RFC2414. Senders, not following this RFC,
229 * will be satisfied with 2.
230 */
231 if (mss > (1 << *rcv_wscale)) { 228 if (mss > (1 << *rcv_wscale)) {
232 int init_cwnd = 4; 229 int init_cwnd = rfc3390_bytes_to_packets(mss);
233 if (mss > 1460 * 3) 230
234 init_cwnd = 2;
235 else if (mss > 1460)
236 init_cwnd = 3;
237 /* when initializing use the value from init_rcv_wnd 231 /* when initializing use the value from init_rcv_wnd
238 * rather than the default from above 232 * rather than the default from above
239 */ 233 */
@@ -1376,9 +1370,9 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
1376 const struct sk_buff *skb, 1370 const struct sk_buff *skb,
1377 unsigned mss_now, int nonagle) 1371 unsigned mss_now, int nonagle)
1378{ 1372{
1379 return (skb->len < mss_now && 1373 return skb->len < mss_now &&
1380 ((nonagle & TCP_NAGLE_CORK) || 1374 ((nonagle & TCP_NAGLE_CORK) ||
1381 (!nonagle && tp->packets_out && tcp_minshall_check(tp)))); 1375 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1382} 1376}
1383 1377
1384/* Return non-zero if the Nagle test allows this packet to be 1378/* Return non-zero if the Nagle test allows this packet to be
@@ -1449,10 +1443,10 @@ int tcp_may_send_now(struct sock *sk)
1449 struct tcp_sock *tp = tcp_sk(sk); 1443 struct tcp_sock *tp = tcp_sk(sk);
1450 struct sk_buff *skb = tcp_send_head(sk); 1444 struct sk_buff *skb = tcp_send_head(sk);
1451 1445
1452 return (skb && 1446 return skb &&
1453 tcp_snd_test(sk, skb, tcp_current_mss(sk), 1447 tcp_snd_test(sk, skb, tcp_current_mss(sk),
1454 (tcp_skb_is_last(sk, skb) ? 1448 (tcp_skb_is_last(sk, skb) ?
1455 tp->nonagle : TCP_NAGLE_PUSH))); 1449 tp->nonagle : TCP_NAGLE_PUSH));
1456} 1450}
1457 1451
1458/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1452/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
@@ -2429,6 +2423,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2429 __u8 rcv_wscale; 2423 __u8 rcv_wscale;
2430 /* Set this up on the first call only */ 2424 /* Set this up on the first call only */
2431 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2425 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2426
2427 /* limit the window selection if the user enforce a smaller rx buffer */
2428 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2429 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2430 req->window_clamp = tcp_full_space(sk);
2431
2432 /* tcp_full_space because it is guaranteed to be the first packet */ 2432 /* tcp_full_space because it is guaranteed to be the first packet */
2433 tcp_select_initial_window(tcp_full_space(sk), 2433 tcp_select_initial_window(tcp_full_space(sk),
2434 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2434 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
@@ -2555,6 +2555,11 @@ static void tcp_connect_init(struct sock *sk)
2555 2555
2556 tcp_initialize_rcv_mss(sk); 2556 tcp_initialize_rcv_mss(sk);
2557 2557
2558 /* limit the window selection if the user enforce a smaller rx buffer */
2559 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2560 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2561 tp->window_clamp = tcp_full_space(sk);
2562
2558 tcp_select_initial_window(tcp_full_space(sk), 2563 tcp_select_initial_window(tcp_full_space(sk),
2559 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2564 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2560 &tp->rcv_wnd, 2565 &tp->rcv_wnd,
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c35b469e851c..baea4a129022 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -138,10 +138,10 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
138 * retransmissions with an initial RTO of TCP_RTO_MIN. 138 * retransmissions with an initial RTO of TCP_RTO_MIN.
139 */ 139 */
140static bool retransmits_timed_out(struct sock *sk, 140static bool retransmits_timed_out(struct sock *sk,
141 unsigned int boundary) 141 unsigned int boundary,
142 unsigned int timeout)
142{ 143{
143 unsigned int timeout, linear_backoff_thresh; 144 unsigned int linear_backoff_thresh, start_ts;
144 unsigned int start_ts;
145 145
146 if (!inet_csk(sk)->icsk_retransmits) 146 if (!inet_csk(sk)->icsk_retransmits)
147 return false; 147 return false;
@@ -151,14 +151,15 @@ static bool retransmits_timed_out(struct sock *sk,
151 else 151 else
152 start_ts = tcp_sk(sk)->retrans_stamp; 152 start_ts = tcp_sk(sk)->retrans_stamp;
153 153
154 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); 154 if (likely(timeout == 0)) {
155 155 linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
156 if (boundary <= linear_backoff_thresh)
157 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
158 else
159 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
160 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
161 156
157 if (boundary <= linear_backoff_thresh)
158 timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
159 else
160 timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
161 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
162 }
162 return (tcp_time_stamp - start_ts) >= timeout; 163 return (tcp_time_stamp - start_ts) >= timeout;
163} 164}
164 165
@@ -174,7 +175,7 @@ static int tcp_write_timeout(struct sock *sk)
174 dst_negative_advice(sk); 175 dst_negative_advice(sk);
175 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 176 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
176 } else { 177 } else {
177 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { 178 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
178 /* Black hole detection */ 179 /* Black hole detection */
179 tcp_mtu_probing(icsk, sk); 180 tcp_mtu_probing(icsk, sk);
180 181
@@ -187,14 +188,16 @@ static int tcp_write_timeout(struct sock *sk)
187 188
188 retry_until = tcp_orphan_retries(sk, alive); 189 retry_until = tcp_orphan_retries(sk, alive);
189 do_reset = alive || 190 do_reset = alive ||
190 !retransmits_timed_out(sk, retry_until); 191 !retransmits_timed_out(sk, retry_until, 0);
191 192
192 if (tcp_out_of_resources(sk, do_reset)) 193 if (tcp_out_of_resources(sk, do_reset))
193 return 1; 194 return 1;
194 } 195 }
195 } 196 }
196 197
197 if (retransmits_timed_out(sk, retry_until)) { 198 if (retransmits_timed_out(sk, retry_until,
199 (1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV) ? 0 :
200 icsk->icsk_user_timeout)) {
198 /* Has it gone just too far? */ 201 /* Has it gone just too far? */
199 tcp_write_err(sk); 202 tcp_write_err(sk);
200 return 1; 203 return 1;
@@ -436,7 +439,7 @@ out_reset_timer:
436 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 439 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
437 } 440 }
438 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 441 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
439 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) 442 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
440 __sk_dst_reset(sk); 443 __sk_dst_reset(sk);
441 444
442out:; 445out:;
@@ -556,7 +559,14 @@ static void tcp_keepalive_timer (unsigned long data)
556 elapsed = keepalive_time_elapsed(tp); 559 elapsed = keepalive_time_elapsed(tp);
557 560
558 if (elapsed >= keepalive_time_when(tp)) { 561 if (elapsed >= keepalive_time_when(tp)) {
559 if (icsk->icsk_probes_out >= keepalive_probes(tp)) { 562 /* If the TCP_USER_TIMEOUT option is enabled, use that
563 * to determine when to timeout instead.
564 */
565 if ((icsk->icsk_user_timeout != 0 &&
566 elapsed >= icsk->icsk_user_timeout &&
567 icsk->icsk_probes_out > 0) ||
568 (icsk->icsk_user_timeout == 0 &&
569 icsk->icsk_probes_out >= keepalive_probes(tp))) {
560 tcp_send_active_reset(sk, GFP_ATOMIC); 570 tcp_send_active_reset(sk, GFP_ATOMIC);
561 tcp_write_err(sk); 571 tcp_write_err(sk);
562 goto out; 572 goto out;
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 20151d6a6241..a534dda5456e 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -80,7 +80,7 @@ static void tcp_westwood_init(struct sock *sk)
80 */ 80 */
81static inline u32 westwood_do_filter(u32 a, u32 b) 81static inline u32 westwood_do_filter(u32 a, u32 b)
82{ 82{
83 return (((7 * a) + b) >> 3); 83 return ((7 * a) + b) >> 3;
84} 84}
85 85
86static void westwood_filter(struct westwood *w, u32 delta) 86static void westwood_filter(struct westwood *w, u32 delta)
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 59186ca7808a..9a17bd2a0a37 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -14,8 +14,8 @@
14#include <net/protocol.h> 14#include <net/protocol.h>
15#include <net/xfrm.h> 15#include <net/xfrm.h>
16 16
17static struct xfrm_tunnel *tunnel4_handlers; 17static struct xfrm_tunnel *tunnel4_handlers __read_mostly;
18static struct xfrm_tunnel *tunnel64_handlers; 18static struct xfrm_tunnel *tunnel64_handlers __read_mostly;
19static DEFINE_MUTEX(tunnel4_mutex); 19static DEFINE_MUTEX(tunnel4_mutex);
20 20
21static inline struct xfrm_tunnel **fam_handlers(unsigned short family) 21static inline struct xfrm_tunnel **fam_handlers(unsigned short family)
@@ -39,7 +39,7 @@ int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
39 } 39 }
40 40
41 handler->next = *pprev; 41 handler->next = *pprev;
42 *pprev = handler; 42 rcu_assign_pointer(*pprev, handler);
43 43
44 ret = 0; 44 ret = 0;
45 45
@@ -73,6 +73,11 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
73} 73}
74EXPORT_SYMBOL(xfrm4_tunnel_deregister); 74EXPORT_SYMBOL(xfrm4_tunnel_deregister);
75 75
76#define for_each_tunnel_rcu(head, handler) \
77 for (handler = rcu_dereference(head); \
78 handler != NULL; \
79 handler = rcu_dereference(handler->next)) \
80
76static int tunnel4_rcv(struct sk_buff *skb) 81static int tunnel4_rcv(struct sk_buff *skb)
77{ 82{
78 struct xfrm_tunnel *handler; 83 struct xfrm_tunnel *handler;
@@ -80,7 +85,7 @@ static int tunnel4_rcv(struct sk_buff *skb)
80 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 85 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
81 goto drop; 86 goto drop;
82 87
83 for (handler = tunnel4_handlers; handler; handler = handler->next) 88 for_each_tunnel_rcu(tunnel4_handlers, handler)
84 if (!handler->handler(skb)) 89 if (!handler->handler(skb))
85 return 0; 90 return 0;
86 91
@@ -99,7 +104,7 @@ static int tunnel64_rcv(struct sk_buff *skb)
99 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 104 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
100 goto drop; 105 goto drop;
101 106
102 for (handler = tunnel64_handlers; handler; handler = handler->next) 107 for_each_tunnel_rcu(tunnel64_handlers, handler)
103 if (!handler->handler(skb)) 108 if (!handler->handler(skb))
104 return 0; 109 return 0;
105 110
@@ -115,7 +120,7 @@ static void tunnel4_err(struct sk_buff *skb, u32 info)
115{ 120{
116 struct xfrm_tunnel *handler; 121 struct xfrm_tunnel *handler;
117 122
118 for (handler = tunnel4_handlers; handler; handler = handler->next) 123 for_each_tunnel_rcu(tunnel4_handlers, handler)
119 if (!handler->err_handler(skb, info)) 124 if (!handler->err_handler(skb, info))
120 break; 125 break;
121} 126}
@@ -125,7 +130,7 @@ static void tunnel64_err(struct sk_buff *skb, u32 info)
125{ 130{
126 struct xfrm_tunnel *handler; 131 struct xfrm_tunnel *handler;
127 132
128 for (handler = tunnel64_handlers; handler; handler = handler->next) 133 for_each_tunnel_rcu(tunnel64_handlers, handler)
129 if (!handler->err_handler(skb, info)) 134 if (!handler->err_handler(skb, info))
130 break; 135 break;
131} 136}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index fb23c2e63b52..b3f7e8cf18ac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -797,7 +797,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
797 return -EOPNOTSUPP; 797 return -EOPNOTSUPP;
798 798
799 ipc.opt = NULL; 799 ipc.opt = NULL;
800 ipc.shtx.flags = 0; 800 ipc.tx_flags = 0;
801 801
802 if (up->pending) { 802 if (up->pending) {
803 /* 803 /*
@@ -845,7 +845,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
845 ipc.addr = inet->inet_saddr; 845 ipc.addr = inet->inet_saddr;
846 846
847 ipc.oif = sk->sk_bound_dev_if; 847 ipc.oif = sk->sk_bound_dev_if;
848 err = sock_tx_timestamp(msg, sk, &ipc.shtx); 848 err = sock_tx_timestamp(sk, &ipc.tx_flags);
849 if (err) 849 if (err)
850 return err; 850 return err;
851 if (msg->msg_controllen) { 851 if (msg->msg_controllen) {
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 41f5982d2087..82806455e859 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -58,14 +58,14 @@ static int xfrm_tunnel_err(struct sk_buff *skb, u32 info)
58 return -ENOENT; 58 return -ENOENT;
59} 59}
60 60
61static struct xfrm_tunnel xfrm_tunnel_handler = { 61static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = {
62 .handler = xfrm_tunnel_rcv, 62 .handler = xfrm_tunnel_rcv,
63 .err_handler = xfrm_tunnel_err, 63 .err_handler = xfrm_tunnel_err,
64 .priority = 2, 64 .priority = 2,
65}; 65};
66 66
67#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 67#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
68static struct xfrm_tunnel xfrm64_tunnel_handler = { 68static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
69 .handler = xfrm_tunnel_rcv, 69 .handler = xfrm_tunnel_rcv,
70 .err_handler = xfrm_tunnel_err, 70 .err_handler = xfrm_tunnel_err,
71 .priority = 2, 71 .priority = 2,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 324fac3b6c16..8c88340278f5 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -243,7 +243,7 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev)
243/* Check if a route is valid prefix route */ 243/* Check if a route is valid prefix route */
244static inline int addrconf_is_prefix_route(const struct rt6_info *rt) 244static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
245{ 245{
246 return ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0); 246 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0;
247} 247}
248 248
249static void addrconf_del_timer(struct inet6_ifaddr *ifp) 249static void addrconf_del_timer(struct inet6_ifaddr *ifp)
@@ -2964,7 +2964,8 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2964 start sending router solicitations. 2964 start sending router solicitations.
2965 */ 2965 */
2966 2966
2967 if (ifp->idev->cnf.forwarding == 0 && 2967 if ((ifp->idev->cnf.forwarding == 0 ||
2968 ifp->idev->cnf.forwarding == 2) &&
2968 ifp->idev->cnf.rtr_solicits > 0 && 2969 ifp->idev->cnf.rtr_solicits > 0 &&
2969 (dev->flags&IFF_LOOPBACK) == 0 && 2970 (dev->flags&IFF_LOOPBACK) == 0 &&
2970 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) { 2971 (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 8175f802651b..c8993e5a337c 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -518,10 +518,9 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
518 518
519static inline int ip6addrlbl_msgsize(void) 519static inline int ip6addrlbl_msgsize(void)
520{ 520{
521 return (NLMSG_ALIGN(sizeof(struct ifaddrlblmsg)) 521 return NLMSG_ALIGN(sizeof(struct ifaddrlblmsg))
522 + nla_total_size(16) /* IFAL_ADDRESS */ 522 + nla_total_size(16) /* IFAL_ADDRESS */
523 + nla_total_size(4) /* IFAL_LABEL */ 523 + nla_total_size(4); /* IFAL_LABEL */
524 );
525} 524}
526 525
527static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh, 526static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 56b9bf2516f4..60220985bb80 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -467,7 +467,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
467 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 467 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
468 sin->sin6_scope_id = sk->sk_bound_dev_if; 468 sin->sin6_scope_id = sk->sk_bound_dev_if;
469 *uaddr_len = sizeof(*sin); 469 *uaddr_len = sizeof(*sin);
470 return(0); 470 return 0;
471} 471}
472 472
473EXPORT_SYMBOL(inet6_getname); 473EXPORT_SYMBOL(inet6_getname);
@@ -488,7 +488,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
488 case SIOCADDRT: 488 case SIOCADDRT:
489 case SIOCDELRT: 489 case SIOCDELRT:
490 490
491 return(ipv6_route_ioctl(net, cmd, (void __user *)arg)); 491 return ipv6_route_ioctl(net, cmd, (void __user *)arg);
492 492
493 case SIOCSIFADDR: 493 case SIOCSIFADDR:
494 return addrconf_add_ifaddr(net, (void __user *) arg); 494 return addrconf_add_ifaddr(net, (void __user *) arg);
@@ -502,7 +502,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
502 return sk->sk_prot->ioctl(sk, cmd, arg); 502 return sk->sk_prot->ioctl(sk, cmd, arg);
503 } 503 }
504 /*NOTREACHED*/ 504 /*NOTREACHED*/
505 return(0); 505 return 0;
506} 506}
507 507
508EXPORT_SYMBOL(inet6_ioctl); 508EXPORT_SYMBOL(inet6_ioctl);
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index e1caa5d526c2..14ed0a955b56 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -13,12 +13,12 @@ int ipv6_ext_hdr(u8 nexthdr)
13 /* 13 /*
14 * find out if nexthdr is an extension header or a protocol 14 * find out if nexthdr is an extension header or a protocol
15 */ 15 */
16 return ( (nexthdr == NEXTHDR_HOP) || 16 return (nexthdr == NEXTHDR_HOP) ||
17 (nexthdr == NEXTHDR_ROUTING) || 17 (nexthdr == NEXTHDR_ROUTING) ||
18 (nexthdr == NEXTHDR_FRAGMENT) || 18 (nexthdr == NEXTHDR_FRAGMENT) ||
19 (nexthdr == NEXTHDR_AUTH) || 19 (nexthdr == NEXTHDR_AUTH) ||
20 (nexthdr == NEXTHDR_NONE) || 20 (nexthdr == NEXTHDR_NONE) ||
21 (nexthdr == NEXTHDR_DEST) ); 21 (nexthdr == NEXTHDR_DEST);
22} 22}
23 23
24/* 24/*
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 980912ed7a38..99157b4cd56e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -637,7 +637,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
637 } 637 }
638 mtu -= hlen + sizeof(struct frag_hdr); 638 mtu -= hlen + sizeof(struct frag_hdr);
639 639
640 if (skb_has_frags(skb)) { 640 if (skb_has_frag_list(skb)) {
641 int first_len = skb_pagelen(skb); 641 int first_len = skb_pagelen(skb);
642 struct sk_buff *frag2; 642 struct sk_buff *frag2;
643 643
@@ -878,8 +878,8 @@ static inline int ip6_rt_check(struct rt6key *rt_key,
878 struct in6_addr *fl_addr, 878 struct in6_addr *fl_addr,
879 struct in6_addr *addr_cache) 879 struct in6_addr *addr_cache)
880{ 880{
881 return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) && 881 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
882 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache))); 882 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
883} 883}
884 884
885static struct dst_entry *ip6_sk_dst_check(struct sock *sk, 885static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0fd027f3f47e..f6d9f683543e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -83,15 +83,14 @@ struct ip6_tnl_net {
83 /* the IPv6 tunnel fallback device */ 83 /* the IPv6 tunnel fallback device */
84 struct net_device *fb_tnl_dev; 84 struct net_device *fb_tnl_dev;
85 /* lists for storing tunnels in use */ 85 /* lists for storing tunnels in use */
86 struct ip6_tnl *tnls_r_l[HASH_SIZE]; 86 struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
87 struct ip6_tnl *tnls_wc[1]; 87 struct ip6_tnl __rcu *tnls_wc[1];
88 struct ip6_tnl **tnls[2]; 88 struct ip6_tnl __rcu **tnls[2];
89}; 89};
90 90
91/* 91/*
92 * Locking : hash tables are protected by RCU and a spinlock 92 * Locking : hash tables are protected by RCU and RTNL
93 */ 93 */
94static DEFINE_SPINLOCK(ip6_tnl_lock);
95 94
96static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) 95static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
97{ 96{
@@ -138,8 +137,8 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
138static struct ip6_tnl * 137static struct ip6_tnl *
139ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) 138ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
140{ 139{
141 unsigned h0 = HASH(remote); 140 unsigned int h0 = HASH(remote);
142 unsigned h1 = HASH(local); 141 unsigned int h1 = HASH(local);
143 struct ip6_tnl *t; 142 struct ip6_tnl *t;
144 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 143 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
145 144
@@ -167,7 +166,7 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
167 * Return: head of IPv6 tunnel list 166 * Return: head of IPv6 tunnel list
168 **/ 167 **/
169 168
170static struct ip6_tnl ** 169static struct ip6_tnl __rcu **
171ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p) 170ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p)
172{ 171{
173 struct in6_addr *remote = &p->raddr; 172 struct in6_addr *remote = &p->raddr;
@@ -190,12 +189,10 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p)
190static void 189static void
191ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 190ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
192{ 191{
193 struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms); 192 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
194 193
195 spin_lock_bh(&ip6_tnl_lock); 194 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
196 t->next = *tp;
197 rcu_assign_pointer(*tp, t); 195 rcu_assign_pointer(*tp, t);
198 spin_unlock_bh(&ip6_tnl_lock);
199} 196}
200 197
201/** 198/**
@@ -206,13 +203,14 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
206static void 203static void
207ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t) 204ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
208{ 205{
209 struct ip6_tnl **tp; 206 struct ip6_tnl __rcu **tp;
210 207 struct ip6_tnl *iter;
211 for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) { 208
212 if (t == *tp) { 209 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
213 spin_lock_bh(&ip6_tnl_lock); 210 (iter = rtnl_dereference(*tp)) != NULL;
214 *tp = t->next; 211 tp = &iter->next) {
215 spin_unlock_bh(&ip6_tnl_lock); 212 if (t == iter) {
213 rcu_assign_pointer(*tp, t->next);
216 break; 214 break;
217 } 215 }
218 } 216 }
@@ -290,10 +288,13 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net,
290{ 288{
291 struct in6_addr *remote = &p->raddr; 289 struct in6_addr *remote = &p->raddr;
292 struct in6_addr *local = &p->laddr; 290 struct in6_addr *local = &p->laddr;
291 struct ip6_tnl __rcu **tp;
293 struct ip6_tnl *t; 292 struct ip6_tnl *t;
294 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 293 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
295 294
296 for (t = *ip6_tnl_bucket(ip6n, p); t; t = t->next) { 295 for (tp = ip6_tnl_bucket(ip6n, p);
296 (t = rtnl_dereference(*tp)) != NULL;
297 tp = &t->next) {
297 if (ipv6_addr_equal(local, &t->parms.laddr) && 298 if (ipv6_addr_equal(local, &t->parms.laddr) &&
298 ipv6_addr_equal(remote, &t->parms.raddr)) 299 ipv6_addr_equal(remote, &t->parms.raddr))
299 return t; 300 return t;
@@ -318,13 +319,10 @@ ip6_tnl_dev_uninit(struct net_device *dev)
318 struct net *net = dev_net(dev); 319 struct net *net = dev_net(dev);
319 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 320 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
320 321
321 if (dev == ip6n->fb_tnl_dev) { 322 if (dev == ip6n->fb_tnl_dev)
322 spin_lock_bh(&ip6_tnl_lock); 323 rcu_assign_pointer(ip6n->tnls_wc[0], NULL);
323 ip6n->tnls_wc[0] = NULL; 324 else
324 spin_unlock_bh(&ip6_tnl_lock);
325 } else {
326 ip6_tnl_unlink(ip6n, t); 325 ip6_tnl_unlink(ip6n, t);
327 }
328 ip6_tnl_dst_reset(t); 326 ip6_tnl_dst_reset(t);
329 dev_put(dev); 327 dev_put(dev);
330} 328}
@@ -727,7 +725,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
727 skb_tunnel_rx(skb, t->dev); 725 skb_tunnel_rx(skb, t->dev);
728 726
729 dscp_ecn_decapsulate(t, ipv6h, skb); 727 dscp_ecn_decapsulate(t, ipv6h, skb);
730 netif_rx(skb); 728
729 if (netif_rx(skb) == NET_RX_DROP)
730 t->dev->stats.rx_dropped++;
731
731 rcu_read_unlock(); 732 rcu_read_unlock();
732 return 0; 733 return 0;
733 } 734 }
@@ -1369,16 +1370,16 @@ static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1369 ip6_tnl_dev_init_gen(dev); 1370 ip6_tnl_dev_init_gen(dev);
1370 t->parms.proto = IPPROTO_IPV6; 1371 t->parms.proto = IPPROTO_IPV6;
1371 dev_hold(dev); 1372 dev_hold(dev);
1372 ip6n->tnls_wc[0] = t; 1373 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1373} 1374}
1374 1375
1375static struct xfrm6_tunnel ip4ip6_handler = { 1376static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
1376 .handler = ip4ip6_rcv, 1377 .handler = ip4ip6_rcv,
1377 .err_handler = ip4ip6_err, 1378 .err_handler = ip4ip6_err,
1378 .priority = 1, 1379 .priority = 1,
1379}; 1380};
1380 1381
1381static struct xfrm6_tunnel ip6ip6_handler = { 1382static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
1382 .handler = ip6ip6_rcv, 1383 .handler = ip6ip6_rcv,
1383 .err_handler = ip6ip6_err, 1384 .err_handler = ip6ip6_err,
1384 .priority = 1, 1385 .priority = 1,
@@ -1391,14 +1392,14 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1391 LIST_HEAD(list); 1392 LIST_HEAD(list);
1392 1393
1393 for (h = 0; h < HASH_SIZE; h++) { 1394 for (h = 0; h < HASH_SIZE; h++) {
1394 t = ip6n->tnls_r_l[h]; 1395 t = rtnl_dereference(ip6n->tnls_r_l[h]);
1395 while (t != NULL) { 1396 while (t != NULL) {
1396 unregister_netdevice_queue(t->dev, &list); 1397 unregister_netdevice_queue(t->dev, &list);
1397 t = t->next; 1398 t = rtnl_dereference(t->next);
1398 } 1399 }
1399 } 1400 }
1400 1401
1401 t = ip6n->tnls_wc[0]; 1402 t = rtnl_dereference(ip6n->tnls_wc[0]);
1402 unregister_netdevice_queue(t->dev, &list); 1403 unregister_netdevice_queue(t->dev, &list);
1403 unregister_netdevice_many(&list); 1404 unregister_netdevice_many(&list);
1404} 1405}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 66078dad7fe8..2640c9be589d 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -666,7 +666,9 @@ static int pim6_rcv(struct sk_buff *skb)
666 666
667 skb_tunnel_rx(skb, reg_dev); 667 skb_tunnel_rx(skb, reg_dev);
668 668
669 netif_rx(skb); 669 if (netif_rx(skb) == NET_RX_DROP)
670 reg_dev->stats.rx_dropped++;
671
670 dev_put(reg_dev); 672 dev_put(reg_dev);
671 return 0; 673 return 0;
672 drop: 674 drop:
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 58841c4ae947..b3dd844cd34f 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -228,12 +228,12 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
228 do { 228 do {
229 cur = ((void *)cur) + (cur->nd_opt_len << 3); 229 cur = ((void *)cur) + (cur->nd_opt_len << 3);
230 } while(cur < end && cur->nd_opt_type != type); 230 } while(cur < end && cur->nd_opt_type != type);
231 return (cur <= end && cur->nd_opt_type == type ? cur : NULL); 231 return cur <= end && cur->nd_opt_type == type ? cur : NULL;
232} 232}
233 233
234static inline int ndisc_is_useropt(struct nd_opt_hdr *opt) 234static inline int ndisc_is_useropt(struct nd_opt_hdr *opt)
235{ 235{
236 return (opt->nd_opt_type == ND_OPT_RDNSS); 236 return opt->nd_opt_type == ND_OPT_RDNSS;
237} 237}
238 238
239static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, 239static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
@@ -244,7 +244,7 @@ static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
244 do { 244 do {
245 cur = ((void *)cur) + (cur->nd_opt_len << 3); 245 cur = ((void *)cur) + (cur->nd_opt_len << 3);
246 } while(cur < end && !ndisc_is_useropt(cur)); 246 } while(cur < end && !ndisc_is_useropt(cur));
247 return (cur <= end && ndisc_is_useropt(cur) ? cur : NULL); 247 return cur <= end && ndisc_is_useropt(cur) ? cur : NULL;
248} 248}
249 249
250static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len, 250static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
@@ -319,7 +319,7 @@ static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
319 int prepad = ndisc_addr_option_pad(dev->type); 319 int prepad = ndisc_addr_option_pad(dev->type);
320 if (lladdrlen != NDISC_OPT_SPACE(dev->addr_len + prepad)) 320 if (lladdrlen != NDISC_OPT_SPACE(dev->addr_len + prepad))
321 return NULL; 321 return NULL;
322 return (lladdr + prepad); 322 return lladdr + prepad;
323} 323}
324 324
325int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir) 325int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir)
@@ -1105,6 +1105,18 @@ errout:
1105 rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err); 1105 rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err);
1106} 1106}
1107 1107
1108static inline int accept_ra(struct inet6_dev *in6_dev)
1109{
1110 /*
1111 * If forwarding is enabled, RA are not accepted unless the special
1112 * hybrid mode (accept_ra=2) is enabled.
1113 */
1114 if (in6_dev->cnf.forwarding && in6_dev->cnf.accept_ra < 2)
1115 return 0;
1116
1117 return in6_dev->cnf.accept_ra;
1118}
1119
1108static void ndisc_router_discovery(struct sk_buff *skb) 1120static void ndisc_router_discovery(struct sk_buff *skb)
1109{ 1121{
1110 struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb); 1122 struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb);
@@ -1158,8 +1170,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1158 return; 1170 return;
1159 } 1171 }
1160 1172
1161 /* skip route and link configuration on routers */ 1173 if (!accept_ra(in6_dev))
1162 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
1163 goto skip_linkparms; 1174 goto skip_linkparms;
1164 1175
1165#ifdef CONFIG_IPV6_NDISC_NODETYPE 1176#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -1309,8 +1320,7 @@ skip_linkparms:
1309 NEIGH_UPDATE_F_ISROUTER); 1320 NEIGH_UPDATE_F_ISROUTER);
1310 } 1321 }
1311 1322
1312 /* skip route and link configuration on routers */ 1323 if (!accept_ra(in6_dev))
1313 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
1314 goto out; 1324 goto out;
1315 1325
1316#ifdef CONFIG_IPV6_ROUTE_INFO 1326#ifdef CONFIG_IPV6_ROUTE_INFO
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 8e754be92c24..6b331e9b5706 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -82,13 +82,13 @@ EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
82int 82int
83ip6t_ext_hdr(u8 nexthdr) 83ip6t_ext_hdr(u8 nexthdr)
84{ 84{
85 return ( (nexthdr == IPPROTO_HOPOPTS) || 85 return (nexthdr == IPPROTO_HOPOPTS) ||
86 (nexthdr == IPPROTO_ROUTING) || 86 (nexthdr == IPPROTO_ROUTING) ||
87 (nexthdr == IPPROTO_FRAGMENT) || 87 (nexthdr == IPPROTO_FRAGMENT) ||
88 (nexthdr == IPPROTO_ESP) || 88 (nexthdr == IPPROTO_ESP) ||
89 (nexthdr == IPPROTO_AH) || 89 (nexthdr == IPPROTO_AH) ||
90 (nexthdr == IPPROTO_NONE) || 90 (nexthdr == IPPROTO_NONE) ||
91 (nexthdr == IPPROTO_DSTOPTS) ); 91 (nexthdr == IPPROTO_DSTOPTS);
92} 92}
93 93
94/* Returns whether matches rule or not. */ 94/* Returns whether matches rule or not. */
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 578f3c1a16db..138a8b362706 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -363,7 +363,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
363 /* If the first fragment is fragmented itself, we split 363 /* If the first fragment is fragmented itself, we split
364 * it to two chunks: the first with data and paged part 364 * it to two chunks: the first with data and paged part
365 * and the second, holding only fragments. */ 365 * and the second, holding only fragments. */
366 if (skb_has_frags(head)) { 366 if (skb_has_frag_list(head)) {
367 struct sk_buff *clone; 367 struct sk_buff *clone;
368 int i, plen = 0; 368 int i, plen = 0;
369 369
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 1fa3468f0f32..9bb936ae2452 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -25,28 +25,14 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <net/protocol.h> 26#include <net/protocol.h>
27 27
28const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS]; 28const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS] __read_mostly;
29static DEFINE_SPINLOCK(inet6_proto_lock);
30
31 29
32int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) 30int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
33{ 31{
34 int ret, hash = protocol & (MAX_INET_PROTOS - 1); 32 int hash = protocol & (MAX_INET_PROTOS - 1);
35
36 spin_lock_bh(&inet6_proto_lock);
37
38 if (inet6_protos[hash]) {
39 ret = -1;
40 } else {
41 inet6_protos[hash] = prot;
42 ret = 0;
43 }
44
45 spin_unlock_bh(&inet6_proto_lock);
46 33
47 return ret; 34 return !cmpxchg(&inet6_protos[hash], NULL, prot) ? 0 : -1;
48} 35}
49
50EXPORT_SYMBOL(inet6_add_protocol); 36EXPORT_SYMBOL(inet6_add_protocol);
51 37
52/* 38/*
@@ -57,20 +43,10 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol
57{ 43{
58 int ret, hash = protocol & (MAX_INET_PROTOS - 1); 44 int ret, hash = protocol & (MAX_INET_PROTOS - 1);
59 45
60 spin_lock_bh(&inet6_proto_lock); 46 ret = (cmpxchg(&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1;
61
62 if (inet6_protos[hash] != prot) {
63 ret = -1;
64 } else {
65 inet6_protos[hash] = NULL;
66 ret = 0;
67 }
68
69 spin_unlock_bh(&inet6_proto_lock);
70 47
71 synchronize_net(); 48 synchronize_net();
72 49
73 return ret; 50 return ret;
74} 51}
75
76EXPORT_SYMBOL(inet6_del_protocol); 52EXPORT_SYMBOL(inet6_del_protocol);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index e677937a07fc..45e6efb7f171 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -764,7 +764,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
764 return -EINVAL; 764 return -EINVAL;
765 765
766 if (sin6->sin6_family && sin6->sin6_family != AF_INET6) 766 if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
767 return(-EAFNOSUPPORT); 767 return -EAFNOSUPPORT;
768 768
769 /* port is the proto value [0..255] carried in nexthdr */ 769 /* port is the proto value [0..255] carried in nexthdr */
770 proto = ntohs(sin6->sin6_port); 770 proto = ntohs(sin6->sin6_port);
@@ -772,10 +772,10 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
772 if (!proto) 772 if (!proto)
773 proto = inet->inet_num; 773 proto = inet->inet_num;
774 else if (proto != inet->inet_num) 774 else if (proto != inet->inet_num)
775 return(-EINVAL); 775 return -EINVAL;
776 776
777 if (proto > 255) 777 if (proto > 255)
778 return(-EINVAL); 778 return -EINVAL;
779 779
780 daddr = &sin6->sin6_addr; 780 daddr = &sin6->sin6_addr;
781 if (np->sndflow) { 781 if (np->sndflow) {
@@ -985,7 +985,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
985 /* You may get strange result with a positive odd offset; 985 /* You may get strange result with a positive odd offset;
986 RFC2292bis agrees with me. */ 986 RFC2292bis agrees with me. */
987 if (val > 0 && (val&1)) 987 if (val > 0 && (val&1))
988 return(-EINVAL); 988 return -EINVAL;
989 if (val < 0) { 989 if (val < 0) {
990 rp->checksum = 0; 990 rp->checksum = 0;
991 } else { 991 } else {
@@ -997,7 +997,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
997 break; 997 break;
998 998
999 default: 999 default:
1000 return(-ENOPROTOOPT); 1000 return -ENOPROTOOPT;
1001 } 1001 }
1002} 1002}
1003 1003
@@ -1190,7 +1190,7 @@ static int rawv6_init_sk(struct sock *sk)
1190 default: 1190 default:
1191 break; 1191 break;
1192 } 1192 }
1193 return(0); 1193 return 0;
1194} 1194}
1195 1195
1196struct proto rawv6_prot = { 1196struct proto rawv6_prot = {
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 64cfef1b0a4c..c7ba3149633f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -458,7 +458,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
458 /* If the first fragment is fragmented itself, we split 458 /* If the first fragment is fragmented itself, we split
459 * it to two chunks: the first with data and paged part 459 * it to two chunks: the first with data and paged part
460 * and the second, holding only fragments. */ 460 * and the second, holding only fragments. */
461 if (skb_has_frags(head)) { 461 if (skb_has_frag_list(head)) {
462 struct sk_buff *clone; 462 struct sk_buff *clone;
463 int i, plen = 0; 463 int i, plen = 0;
464 464
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d126365ac046..25b0beda4331 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -217,14 +217,14 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
217 217
218static __inline__ int rt6_check_expired(const struct rt6_info *rt) 218static __inline__ int rt6_check_expired(const struct rt6_info *rt)
219{ 219{
220 return (rt->rt6i_flags & RTF_EXPIRES && 220 return (rt->rt6i_flags & RTF_EXPIRES) &&
221 time_after(jiffies, rt->rt6i_expires)); 221 time_after(jiffies, rt->rt6i_expires);
222} 222}
223 223
224static inline int rt6_need_strict(struct in6_addr *daddr) 224static inline int rt6_need_strict(struct in6_addr *daddr)
225{ 225{
226 return (ipv6_addr_type(daddr) & 226 return ipv6_addr_type(daddr) &
227 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK)); 227 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
228} 228}
229 229
230/* 230/*
@@ -440,7 +440,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
440 __func__, match); 440 __func__, match);
441 441
442 net = dev_net(rt0->rt6i_dev); 442 net = dev_net(rt0->rt6i_dev);
443 return (match ? match : net->ipv6.ip6_null_entry); 443 return match ? match : net->ipv6.ip6_null_entry;
444} 444}
445 445
446#ifdef CONFIG_IPV6_ROUTE_INFO 446#ifdef CONFIG_IPV6_ROUTE_INFO
@@ -859,7 +859,7 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
859 859
860 dst_release(*dstp); 860 dst_release(*dstp);
861 *dstp = new; 861 *dstp = new;
862 return (new ? 0 : -ENOMEM); 862 return new ? 0 : -ENOMEM;
863} 863}
864EXPORT_SYMBOL_GPL(ip6_dst_blackhole); 864EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
865 865
@@ -1070,7 +1070,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
1070 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; 1070 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1071out: 1071out:
1072 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity; 1072 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1073 return (atomic_read(&ops->entries) > rt_max_size); 1073 return atomic_read(&ops->entries) > rt_max_size;
1074} 1074}
1075 1075
1076/* Clean host part of a prefix. Not necessary in radix tree, 1076/* Clean host part of a prefix. Not necessary in radix tree,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 4699cd3c3118..8a0399822230 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -68,19 +68,18 @@ static void ipip6_tunnel_setup(struct net_device *dev);
68 68
69static int sit_net_id __read_mostly; 69static int sit_net_id __read_mostly;
70struct sit_net { 70struct sit_net {
71 struct ip_tunnel *tunnels_r_l[HASH_SIZE]; 71 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
72 struct ip_tunnel *tunnels_r[HASH_SIZE]; 72 struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
73 struct ip_tunnel *tunnels_l[HASH_SIZE]; 73 struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
74 struct ip_tunnel *tunnels_wc[1]; 74 struct ip_tunnel __rcu *tunnels_wc[1];
75 struct ip_tunnel **tunnels[4]; 75 struct ip_tunnel __rcu **tunnels[4];
76 76
77 struct net_device *fb_tunnel_dev; 77 struct net_device *fb_tunnel_dev;
78}; 78};
79 79
80/* 80/*
81 * Locking : hash tables are protected by RCU and a spinlock 81 * Locking : hash tables are protected by RCU and RTNL
82 */ 82 */
83static DEFINE_SPINLOCK(ipip6_lock);
84 83
85#define for_each_ip_tunnel_rcu(start) \ 84#define for_each_ip_tunnel_rcu(start) \
86 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 85 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
@@ -91,8 +90,8 @@ static DEFINE_SPINLOCK(ipip6_lock);
91static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, 90static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
92 struct net_device *dev, __be32 remote, __be32 local) 91 struct net_device *dev, __be32 remote, __be32 local)
93{ 92{
94 unsigned h0 = HASH(remote); 93 unsigned int h0 = HASH(remote);
95 unsigned h1 = HASH(local); 94 unsigned int h1 = HASH(local);
96 struct ip_tunnel *t; 95 struct ip_tunnel *t;
97 struct sit_net *sitn = net_generic(net, sit_net_id); 96 struct sit_net *sitn = net_generic(net, sit_net_id);
98 97
@@ -121,12 +120,12 @@ static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
121 return NULL; 120 return NULL;
122} 121}
123 122
124static struct ip_tunnel **__ipip6_bucket(struct sit_net *sitn, 123static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn,
125 struct ip_tunnel_parm *parms) 124 struct ip_tunnel_parm *parms)
126{ 125{
127 __be32 remote = parms->iph.daddr; 126 __be32 remote = parms->iph.daddr;
128 __be32 local = parms->iph.saddr; 127 __be32 local = parms->iph.saddr;
129 unsigned h = 0; 128 unsigned int h = 0;
130 int prio = 0; 129 int prio = 0;
131 130
132 if (remote) { 131 if (remote) {
@@ -140,7 +139,7 @@ static struct ip_tunnel **__ipip6_bucket(struct sit_net *sitn,
140 return &sitn->tunnels[prio][h]; 139 return &sitn->tunnels[prio][h];
141} 140}
142 141
143static inline struct ip_tunnel **ipip6_bucket(struct sit_net *sitn, 142static inline struct ip_tunnel __rcu **ipip6_bucket(struct sit_net *sitn,
144 struct ip_tunnel *t) 143 struct ip_tunnel *t)
145{ 144{
146 return __ipip6_bucket(sitn, &t->parms); 145 return __ipip6_bucket(sitn, &t->parms);
@@ -148,13 +147,14 @@ static inline struct ip_tunnel **ipip6_bucket(struct sit_net *sitn,
148 147
149static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t) 148static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
150{ 149{
151 struct ip_tunnel **tp; 150 struct ip_tunnel __rcu **tp;
152 151 struct ip_tunnel *iter;
153 for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) { 152
154 if (t == *tp) { 153 for (tp = ipip6_bucket(sitn, t);
155 spin_lock_bh(&ipip6_lock); 154 (iter = rtnl_dereference(*tp)) != NULL;
156 *tp = t->next; 155 tp = &iter->next) {
157 spin_unlock_bh(&ipip6_lock); 156 if (t == iter) {
157 rcu_assign_pointer(*tp, t->next);
158 break; 158 break;
159 } 159 }
160 } 160 }
@@ -162,12 +162,10 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
162 162
163static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t) 163static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
164{ 164{
165 struct ip_tunnel **tp = ipip6_bucket(sitn, t); 165 struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
166 166
167 spin_lock_bh(&ipip6_lock); 167 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
168 t->next = *tp;
169 rcu_assign_pointer(*tp, t); 168 rcu_assign_pointer(*tp, t);
170 spin_unlock_bh(&ipip6_lock);
171} 169}
172 170
173static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) 171static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
@@ -187,17 +185,20 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
187#endif 185#endif
188} 186}
189 187
190static struct ip_tunnel * ipip6_tunnel_locate(struct net *net, 188static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
191 struct ip_tunnel_parm *parms, int create) 189 struct ip_tunnel_parm *parms, int create)
192{ 190{
193 __be32 remote = parms->iph.daddr; 191 __be32 remote = parms->iph.daddr;
194 __be32 local = parms->iph.saddr; 192 __be32 local = parms->iph.saddr;
195 struct ip_tunnel *t, **tp, *nt; 193 struct ip_tunnel *t, *nt;
194 struct ip_tunnel __rcu **tp;
196 struct net_device *dev; 195 struct net_device *dev;
197 char name[IFNAMSIZ]; 196 char name[IFNAMSIZ];
198 struct sit_net *sitn = net_generic(net, sit_net_id); 197 struct sit_net *sitn = net_generic(net, sit_net_id);
199 198
200 for (tp = __ipip6_bucket(sitn, parms); (t = *tp) != NULL; tp = &t->next) { 199 for (tp = __ipip6_bucket(sitn, parms);
200 (t = rtnl_dereference(*tp)) != NULL;
201 tp = &t->next) {
201 if (local == t->parms.iph.saddr && 202 if (local == t->parms.iph.saddr &&
202 remote == t->parms.iph.daddr && 203 remote == t->parms.iph.daddr &&
203 parms->link == t->parms.link) { 204 parms->link == t->parms.link) {
@@ -340,7 +341,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
340 341
341 ASSERT_RTNL(); 342 ASSERT_RTNL();
342 343
343 for (p = t->prl; p; p = p->next) { 344 for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) {
344 if (p->addr == a->addr) { 345 if (p->addr == a->addr) {
345 if (chg) { 346 if (chg) {
346 p->flags = a->flags; 347 p->flags = a->flags;
@@ -451,15 +452,12 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
451 struct sit_net *sitn = net_generic(net, sit_net_id); 452 struct sit_net *sitn = net_generic(net, sit_net_id);
452 453
453 if (dev == sitn->fb_tunnel_dev) { 454 if (dev == sitn->fb_tunnel_dev) {
454 spin_lock_bh(&ipip6_lock); 455 rcu_assign_pointer(sitn->tunnels_wc[0], NULL);
455 sitn->tunnels_wc[0] = NULL;
456 spin_unlock_bh(&ipip6_lock);
457 dev_put(dev);
458 } else { 456 } else {
459 ipip6_tunnel_unlink(sitn, netdev_priv(dev)); 457 ipip6_tunnel_unlink(sitn, netdev_priv(dev));
460 ipip6_tunnel_del_prl(netdev_priv(dev), NULL); 458 ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
461 dev_put(dev);
462 } 459 }
460 dev_put(dev);
463} 461}
464 462
465 463
@@ -566,7 +564,10 @@ static int ipip6_rcv(struct sk_buff *skb)
566 skb_tunnel_rx(skb, tunnel->dev); 564 skb_tunnel_rx(skb, tunnel->dev);
567 565
568 ipip6_ecn_decapsulate(iph, skb); 566 ipip6_ecn_decapsulate(iph, skb);
569 netif_rx(skb); 567
568 if (netif_rx(skb) == NET_RX_DROP)
569 tunnel->dev->stats.rx_dropped++;
570
570 rcu_read_unlock(); 571 rcu_read_unlock();
571 return 0; 572 return 0;
572 } 573 }
@@ -590,7 +591,7 @@ __be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel)
590#ifdef CONFIG_IPV6_SIT_6RD 591#ifdef CONFIG_IPV6_SIT_6RD
591 if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix, 592 if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
592 tunnel->ip6rd.prefixlen)) { 593 tunnel->ip6rd.prefixlen)) {
593 unsigned pbw0, pbi0; 594 unsigned int pbw0, pbi0;
594 int pbi1; 595 int pbi1;
595 u32 d; 596 u32 d;
596 597
@@ -1132,7 +1133,7 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1132 sitn->tunnels_wc[0] = tunnel; 1133 sitn->tunnels_wc[0] = tunnel;
1133} 1134}
1134 1135
1135static struct xfrm_tunnel sit_handler = { 1136static struct xfrm_tunnel sit_handler __read_mostly = {
1136 .handler = ipip6_rcv, 1137 .handler = ipip6_rcv,
1137 .err_handler = ipip6_err, 1138 .err_handler = ipip6_err,
1138 .priority = 1, 1139 .priority = 1,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index fe6d40418c0b..8d93f6d81979 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -139,7 +139,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
139 return -EINVAL; 139 return -EINVAL;
140 140
141 if (usin->sin6_family != AF_INET6) 141 if (usin->sin6_family != AF_INET6)
142 return(-EAFNOSUPPORT); 142 return -EAFNOSUPPORT;
143 143
144 memset(&fl, 0, sizeof(fl)); 144 memset(&fl, 0, sizeof(fl));
145 145
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index fc3c86a47452..d9864725d0c6 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -30,8 +30,8 @@
30#include <net/protocol.h> 30#include <net/protocol.h>
31#include <net/xfrm.h> 31#include <net/xfrm.h>
32 32
33static struct xfrm6_tunnel *tunnel6_handlers; 33static struct xfrm6_tunnel *tunnel6_handlers __read_mostly;
34static struct xfrm6_tunnel *tunnel46_handlers; 34static struct xfrm6_tunnel *tunnel46_handlers __read_mostly;
35static DEFINE_MUTEX(tunnel6_mutex); 35static DEFINE_MUTEX(tunnel6_mutex);
36 36
37int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family) 37int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
@@ -51,7 +51,7 @@ int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
51 } 51 }
52 52
53 handler->next = *pprev; 53 handler->next = *pprev;
54 *pprev = handler; 54 rcu_assign_pointer(*pprev, handler);
55 55
56 ret = 0; 56 ret = 0;
57 57
@@ -88,6 +88,11 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
88 88
89EXPORT_SYMBOL(xfrm6_tunnel_deregister); 89EXPORT_SYMBOL(xfrm6_tunnel_deregister);
90 90
91#define for_each_tunnel_rcu(head, handler) \
92 for (handler = rcu_dereference(head); \
93 handler != NULL; \
94 handler = rcu_dereference(handler->next)) \
95
91static int tunnel6_rcv(struct sk_buff *skb) 96static int tunnel6_rcv(struct sk_buff *skb)
92{ 97{
93 struct xfrm6_tunnel *handler; 98 struct xfrm6_tunnel *handler;
@@ -95,7 +100,7 @@ static int tunnel6_rcv(struct sk_buff *skb)
95 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 100 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
96 goto drop; 101 goto drop;
97 102
98 for (handler = tunnel6_handlers; handler; handler = handler->next) 103 for_each_tunnel_rcu(tunnel6_handlers, handler)
99 if (!handler->handler(skb)) 104 if (!handler->handler(skb))
100 return 0; 105 return 0;
101 106
@@ -113,7 +118,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
113 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 118 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
114 goto drop; 119 goto drop;
115 120
116 for (handler = tunnel46_handlers; handler; handler = handler->next) 121 for_each_tunnel_rcu(tunnel46_handlers, handler)
117 if (!handler->handler(skb)) 122 if (!handler->handler(skb))
118 return 0; 123 return 0;
119 124
@@ -129,7 +134,7 @@ static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
129{ 134{
130 struct xfrm6_tunnel *handler; 135 struct xfrm6_tunnel *handler;
131 136
132 for (handler = tunnel6_handlers; handler; handler = handler->next) 137 for_each_tunnel_rcu(tunnel6_handlers, handler)
133 if (!handler->err_handler(skb, opt, type, code, offset, info)) 138 if (!handler->err_handler(skb, opt, type, code, offset, info))
134 break; 139 break;
135} 140}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 6baeabbbca82..39676eac3a37 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -199,7 +199,7 @@ static inline int xfrm6_garbage_collect(struct dst_ops *ops)
199 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops); 199 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
200 200
201 xfrm6_policy_afinfo.garbage_collect(net); 201 xfrm6_policy_afinfo.garbage_collect(net);
202 return (atomic_read(&ops->entries) > ops->gc_thresh * 2); 202 return atomic_read(&ops->entries) > ops->gc_thresh * 2;
203} 203}
204 204
205static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) 205static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu)
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 2ce3a8278f26..ac7584b946a5 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -317,13 +317,13 @@ static const struct xfrm_type xfrm6_tunnel_type = {
317 .output = xfrm6_tunnel_output, 317 .output = xfrm6_tunnel_output,
318}; 318};
319 319
320static struct xfrm6_tunnel xfrm6_tunnel_handler = { 320static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
321 .handler = xfrm6_tunnel_rcv, 321 .handler = xfrm6_tunnel_rcv,
322 .err_handler = xfrm6_tunnel_err, 322 .err_handler = xfrm6_tunnel_err,
323 .priority = 2, 323 .priority = 2,
324}; 324};
325 325
326static struct xfrm6_tunnel xfrm46_tunnel_handler = { 326static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
327 .handler = xfrm6_tunnel_rcv, 327 .handler = xfrm6_tunnel_rcv,
328 .err_handler = xfrm6_tunnel_err, 328 .err_handler = xfrm6_tunnel_err,
329 .priority = 2, 329 .priority = 2,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index fd55b5135de5..bf3635129b17 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -573,9 +573,9 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name)
573 /* Requested object/attribute doesn't exist */ 573 /* Requested object/attribute doesn't exist */
574 if((self->errno == IAS_CLASS_UNKNOWN) || 574 if((self->errno == IAS_CLASS_UNKNOWN) ||
575 (self->errno == IAS_ATTRIB_UNKNOWN)) 575 (self->errno == IAS_ATTRIB_UNKNOWN))
576 return (-EADDRNOTAVAIL); 576 return -EADDRNOTAVAIL;
577 else 577 else
578 return (-EHOSTUNREACH); 578 return -EHOSTUNREACH;
579 } 579 }
580 580
581 /* Get the remote TSAP selector */ 581 /* Get the remote TSAP selector */
@@ -663,7 +663,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
663 __func__, name); 663 __func__, name);
664 self->daddr = DEV_ADDR_ANY; 664 self->daddr = DEV_ADDR_ANY;
665 kfree(discoveries); 665 kfree(discoveries);
666 return(-ENOTUNIQ); 666 return -ENOTUNIQ;
667 } 667 }
668 /* First time we found that one, save it ! */ 668 /* First time we found that one, save it ! */
669 daddr = self->daddr; 669 daddr = self->daddr;
@@ -677,7 +677,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
677 IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__); 677 IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__);
678 self->daddr = DEV_ADDR_ANY; 678 self->daddr = DEV_ADDR_ANY;
679 kfree(discoveries); 679 kfree(discoveries);
680 return(-EHOSTUNREACH); 680 return -EHOSTUNREACH;
681 break; 681 break;
682 } 682 }
683 } 683 }
@@ -689,7 +689,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
689 IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n", 689 IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n",
690 __func__, name); 690 __func__, name);
691 self->daddr = DEV_ADDR_ANY; 691 self->daddr = DEV_ADDR_ANY;
692 return(-EADDRNOTAVAIL); 692 return -EADDRNOTAVAIL;
693 } 693 }
694 694
695 /* Revert back to discovered device & service */ 695 /* Revert back to discovered device & service */
@@ -2465,9 +2465,9 @@ bed:
2465 /* Requested object/attribute doesn't exist */ 2465 /* Requested object/attribute doesn't exist */
2466 if((self->errno == IAS_CLASS_UNKNOWN) || 2466 if((self->errno == IAS_CLASS_UNKNOWN) ||
2467 (self->errno == IAS_ATTRIB_UNKNOWN)) 2467 (self->errno == IAS_ATTRIB_UNKNOWN))
2468 return (-EADDRNOTAVAIL); 2468 return -EADDRNOTAVAIL;
2469 else 2469 else
2470 return (-EHOSTUNREACH); 2470 return -EHOSTUNREACH;
2471 } 2471 }
2472 2472
2473 /* Translate from internal to user structure */ 2473 /* Translate from internal to user structure */
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index c1c8ae939126..36c3f037f172 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -315,7 +315,7 @@ struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn,
315 315
316 /* Get the actual number of device in the buffer and return */ 316 /* Get the actual number of device in the buffer and return */
317 *pn = i; 317 *pn = i;
318 return(buffer); 318 return buffer;
319} 319}
320 320
321#ifdef CONFIG_PROC_FS 321#ifdef CONFIG_PROC_FS
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index faa82ca2dfdc..a39cca8331df 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -449,8 +449,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
449 } 449 }
450 450
451#ifdef SERIAL_DO_RESTART 451#ifdef SERIAL_DO_RESTART
452 return ((self->flags & ASYNC_HUP_NOTIFY) ? 452 return (self->flags & ASYNC_HUP_NOTIFY) ?
453 -EAGAIN : -ERESTARTSYS); 453 -EAGAIN : -ERESTARTSYS;
454#else 454#else
455 return -EAGAIN; 455 return -EAGAIN;
456#endif 456#endif
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 5bb8353105cc..8ee1ff6c742f 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -45,13 +45,11 @@ static int irlan_eth_close(struct net_device *dev);
45static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb, 45static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
46 struct net_device *dev); 46 struct net_device *dev);
47static void irlan_eth_set_multicast_list( struct net_device *dev); 47static void irlan_eth_set_multicast_list( struct net_device *dev);
48static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev);
49 48
50static const struct net_device_ops irlan_eth_netdev_ops = { 49static const struct net_device_ops irlan_eth_netdev_ops = {
51 .ndo_open = irlan_eth_open, 50 .ndo_open = irlan_eth_open,
52 .ndo_stop = irlan_eth_close, 51 .ndo_stop = irlan_eth_close,
53 .ndo_start_xmit = irlan_eth_xmit, 52 .ndo_start_xmit = irlan_eth_xmit,
54 .ndo_get_stats = irlan_eth_get_stats,
55 .ndo_set_multicast_list = irlan_eth_set_multicast_list, 53 .ndo_set_multicast_list = irlan_eth_set_multicast_list,
56 .ndo_change_mtu = eth_change_mtu, 54 .ndo_change_mtu = eth_change_mtu,
57 .ndo_validate_addr = eth_validate_addr, 55 .ndo_validate_addr = eth_validate_addr,
@@ -208,10 +206,10 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
208 * tried :-) DB 206 * tried :-) DB
209 */ 207 */
210 /* irttp_data_request already free the packet */ 208 /* irttp_data_request already free the packet */
211 self->stats.tx_dropped++; 209 dev->stats.tx_dropped++;
212 } else { 210 } else {
213 self->stats.tx_packets++; 211 dev->stats.tx_packets++;
214 self->stats.tx_bytes += len; 212 dev->stats.tx_bytes += len;
215 } 213 }
216 214
217 return NETDEV_TX_OK; 215 return NETDEV_TX_OK;
@@ -226,15 +224,16 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
226int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb) 224int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
227{ 225{
228 struct irlan_cb *self = instance; 226 struct irlan_cb *self = instance;
227 struct net_device *dev = self->dev;
229 228
230 if (skb == NULL) { 229 if (skb == NULL) {
231 ++self->stats.rx_dropped; 230 dev->stats.rx_dropped++;
232 return 0; 231 return 0;
233 } 232 }
234 if (skb->len < ETH_HLEN) { 233 if (skb->len < ETH_HLEN) {
235 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n", 234 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n",
236 __func__, skb->len); 235 __func__, skb->len);
237 ++self->stats.rx_dropped; 236 dev->stats.rx_dropped++;
238 dev_kfree_skb(skb); 237 dev_kfree_skb(skb);
239 return 0; 238 return 0;
240 } 239 }
@@ -244,10 +243,10 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
244 * might have been previously set by the low level IrDA network 243 * might have been previously set by the low level IrDA network
245 * device driver 244 * device driver
246 */ 245 */
247 skb->protocol = eth_type_trans(skb, self->dev); /* Remove eth header */ 246 skb->protocol = eth_type_trans(skb, dev); /* Remove eth header */
248 247
249 self->stats.rx_packets++; 248 dev->stats.rx_packets++;
250 self->stats.rx_bytes += skb->len; 249 dev->stats.rx_bytes += skb->len;
251 250
252 netif_rx(skb); /* Eat it! */ 251 netif_rx(skb); /* Eat it! */
253 252
@@ -348,16 +347,3 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
348 else 347 else
349 irlan_set_broadcast_filter(self, FALSE); 348 irlan_set_broadcast_filter(self, FALSE);
350} 349}
351
352/*
353 * Function irlan_get_stats (dev)
354 *
355 * Get the current statistics for this device
356 *
357 */
358static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev)
359{
360 struct irlan_cb *self = netdev_priv(dev);
361
362 return &self->stats;
363}
diff --git a/net/irda/irlan/irlan_event.c b/net/irda/irlan/irlan_event.c
index cbcb4eb54037..43f16040a6fe 100644
--- a/net/irda/irlan/irlan_event.c
+++ b/net/irda/irlan/irlan_event.c
@@ -24,7 +24,7 @@
24 24
25#include <net/irda/irlan_event.h> 25#include <net/irda/irlan_event.h>
26 26
27char *irlan_state[] = { 27const char * const irlan_state[] = {
28 "IRLAN_IDLE", 28 "IRLAN_IDLE",
29 "IRLAN_QUERY", 29 "IRLAN_QUERY",
30 "IRLAN_CONN", 30 "IRLAN_CONN",
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 0e7d8bde145d..6115a44c0a24 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -939,7 +939,7 @@ struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask, int nslots)
939 } 939 }
940 940
941 /* Return current cached discovery log */ 941 /* Return current cached discovery log */
942 return(irlmp_copy_discoveries(irlmp->cachelog, pn, mask, TRUE)); 942 return irlmp_copy_discoveries(irlmp->cachelog, pn, mask, TRUE);
943} 943}
944EXPORT_SYMBOL(irlmp_get_discoveries); 944EXPORT_SYMBOL(irlmp_get_discoveries);
945 945
diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c
index 3750884094da..062e63b1c5c4 100644
--- a/net/irda/irlmp_frame.c
+++ b/net/irda/irlmp_frame.c
@@ -448,7 +448,7 @@ static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
448 (self->cache.slsap_sel == slsap_sel) && 448 (self->cache.slsap_sel == slsap_sel) &&
449 (self->cache.dlsap_sel == dlsap_sel)) 449 (self->cache.dlsap_sel == dlsap_sel))
450 { 450 {
451 return (self->cache.lsap); 451 return self->cache.lsap;
452 } 452 }
453#endif 453#endif
454 454
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index e98e40d76f4f..7f17a8020e8a 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -238,7 +238,7 @@ irnet_ias_to_tsap(irnet_socket * self,
238 DEXIT(IRDA_SR_TRACE, "\n"); 238 DEXIT(IRDA_SR_TRACE, "\n");
239 239
240 /* Return the TSAP */ 240 /* Return the TSAP */
241 return(dtsap_sel); 241 return dtsap_sel;
242} 242}
243 243
244/*------------------------------------------------------------------*/ 244/*------------------------------------------------------------------*/
@@ -301,7 +301,7 @@ irnet_connect_tsap(irnet_socket * self)
301 { 301 {
302 clear_bit(0, &self->ttp_connect); 302 clear_bit(0, &self->ttp_connect);
303 DERROR(IRDA_SR_ERROR, "connect aborted!\n"); 303 DERROR(IRDA_SR_ERROR, "connect aborted!\n");
304 return(err); 304 return err;
305 } 305 }
306 306
307 /* Connect to remote device */ 307 /* Connect to remote device */
@@ -312,7 +312,7 @@ irnet_connect_tsap(irnet_socket * self)
312 { 312 {
313 clear_bit(0, &self->ttp_connect); 313 clear_bit(0, &self->ttp_connect);
314 DERROR(IRDA_SR_ERROR, "connect aborted!\n"); 314 DERROR(IRDA_SR_ERROR, "connect aborted!\n");
315 return(err); 315 return err;
316 } 316 }
317 317
318 /* The above call is non-blocking. 318 /* The above call is non-blocking.
@@ -321,7 +321,7 @@ irnet_connect_tsap(irnet_socket * self)
321 * See you there ;-) */ 321 * See you there ;-) */
322 322
323 DEXIT(IRDA_SR_TRACE, "\n"); 323 DEXIT(IRDA_SR_TRACE, "\n");
324 return(err); 324 return err;
325} 325}
326 326
327/*------------------------------------------------------------------*/ 327/*------------------------------------------------------------------*/
@@ -362,10 +362,10 @@ irnet_discover_next_daddr(irnet_socket * self)
362 /* The above request is non-blocking. 362 /* The above request is non-blocking.
363 * After a while, IrDA will call us back in irnet_discovervalue_confirm() 363 * After a while, IrDA will call us back in irnet_discovervalue_confirm()
364 * We will then call irnet_ias_to_tsap() and come back here again... */ 364 * We will then call irnet_ias_to_tsap() and come back here again... */
365 return(0); 365 return 0;
366 } 366 }
367 else 367 else
368 return(1); 368 return 1;
369} 369}
370 370
371/*------------------------------------------------------------------*/ 371/*------------------------------------------------------------------*/
@@ -436,7 +436,7 @@ irnet_discover_daddr_and_lsap_sel(irnet_socket * self)
436 /* Follow me in irnet_discovervalue_confirm() */ 436 /* Follow me in irnet_discovervalue_confirm() */
437 437
438 DEXIT(IRDA_SR_TRACE, "\n"); 438 DEXIT(IRDA_SR_TRACE, "\n");
439 return(0); 439 return 0;
440} 440}
441 441
442/*------------------------------------------------------------------*/ 442/*------------------------------------------------------------------*/
@@ -485,7 +485,7 @@ irnet_dname_to_daddr(irnet_socket * self)
485 /* No luck ! */ 485 /* No luck ! */
486 DEBUG(IRDA_SR_INFO, "cannot discover device ``%s'' !!!\n", self->rname); 486 DEBUG(IRDA_SR_INFO, "cannot discover device ``%s'' !!!\n", self->rname);
487 kfree(discoveries); 487 kfree(discoveries);
488 return(-EADDRNOTAVAIL); 488 return -EADDRNOTAVAIL;
489} 489}
490 490
491 491
@@ -527,7 +527,7 @@ irda_irnet_create(irnet_socket * self)
527 INIT_WORK(&self->disconnect_work, irnet_ppp_disconnect); 527 INIT_WORK(&self->disconnect_work, irnet_ppp_disconnect);
528 528
529 DEXIT(IRDA_SOCK_TRACE, "\n"); 529 DEXIT(IRDA_SOCK_TRACE, "\n");
530 return(0); 530 return 0;
531} 531}
532 532
533/*------------------------------------------------------------------*/ 533/*------------------------------------------------------------------*/
@@ -601,7 +601,7 @@ irda_irnet_connect(irnet_socket * self)
601 * We will finish the connection procedure in irnet_connect_tsap(). 601 * We will finish the connection procedure in irnet_connect_tsap().
602 */ 602 */
603 DEXIT(IRDA_SOCK_TRACE, "\n"); 603 DEXIT(IRDA_SOCK_TRACE, "\n");
604 return(0); 604 return 0;
605} 605}
606 606
607/*------------------------------------------------------------------*/ 607/*------------------------------------------------------------------*/
@@ -733,7 +733,7 @@ irnet_daddr_to_dname(irnet_socket * self)
733 /* No luck ! */ 733 /* No luck ! */
734 DEXIT(IRDA_SERV_INFO, ": cannot discover device 0x%08x !!!\n", self->daddr); 734 DEXIT(IRDA_SERV_INFO, ": cannot discover device 0x%08x !!!\n", self->daddr);
735 kfree(discoveries); 735 kfree(discoveries);
736 return(-EADDRNOTAVAIL); 736 return -EADDRNOTAVAIL;
737} 737}
738 738
739/*------------------------------------------------------------------*/ 739/*------------------------------------------------------------------*/
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index dfe7b38dd4af..69f1fa64994e 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -166,7 +166,7 @@ irnet_ctrl_write(irnet_socket * ap,
166 } 166 }
167 167
168 /* Success : we have parsed all commands successfully */ 168 /* Success : we have parsed all commands successfully */
169 return(count); 169 return count;
170} 170}
171 171
172#ifdef INITIAL_DISCOVERY 172#ifdef INITIAL_DISCOVERY
@@ -300,7 +300,7 @@ irnet_ctrl_read(irnet_socket * ap,
300 } 300 }
301 301
302 DEXIT(CTRL_TRACE, "\n"); 302 DEXIT(CTRL_TRACE, "\n");
303 return(strlen(event)); 303 return strlen(event);
304 } 304 }
305#endif /* INITIAL_DISCOVERY */ 305#endif /* INITIAL_DISCOVERY */
306 306
@@ -409,7 +409,7 @@ irnet_ctrl_read(irnet_socket * ap,
409 } 409 }
410 410
411 DEXIT(CTRL_TRACE, "\n"); 411 DEXIT(CTRL_TRACE, "\n");
412 return(strlen(event)); 412 return strlen(event);
413} 413}
414 414
415/*------------------------------------------------------------------*/ 415/*------------------------------------------------------------------*/
@@ -623,7 +623,7 @@ dev_irnet_poll(struct file * file,
623 mask |= irnet_ctrl_poll(ap, file, wait); 623 mask |= irnet_ctrl_poll(ap, file, wait);
624 624
625 DEXIT(FS_TRACE, " - mask=0x%X\n", mask); 625 DEXIT(FS_TRACE, " - mask=0x%X\n", mask);
626 return(mask); 626 return mask;
627} 627}
628 628
629/*------------------------------------------------------------------*/ 629/*------------------------------------------------------------------*/
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h
index b5df2418f90c..940225866da0 100644
--- a/net/irda/irnet/irnet_ppp.h
+++ b/net/irda/irnet/irnet_ppp.h
@@ -103,7 +103,8 @@ static const struct file_operations irnet_device_fops =
103 .poll = dev_irnet_poll, 103 .poll = dev_irnet_poll,
104 .unlocked_ioctl = dev_irnet_ioctl, 104 .unlocked_ioctl = dev_irnet_ioctl,
105 .open = dev_irnet_open, 105 .open = dev_irnet_open,
106 .release = dev_irnet_close 106 .release = dev_irnet_close,
107 .llseek = noop_llseek,
107 /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */ 108 /* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */
108}; 109};
109 110
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 43040e97c474..d87c22df6f1e 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -565,12 +565,12 @@ pfkey_proto2satype(uint16_t proto)
565 565
566static uint8_t pfkey_proto_to_xfrm(uint8_t proto) 566static uint8_t pfkey_proto_to_xfrm(uint8_t proto)
567{ 567{
568 return (proto == IPSEC_PROTO_ANY ? 0 : proto); 568 return proto == IPSEC_PROTO_ANY ? 0 : proto;
569} 569}
570 570
571static uint8_t pfkey_proto_from_xfrm(uint8_t proto) 571static uint8_t pfkey_proto_from_xfrm(uint8_t proto)
572{ 572{
573 return (proto ? proto : IPSEC_PROTO_ANY); 573 return proto ? proto : IPSEC_PROTO_ANY;
574} 574}
575 575
576static inline int pfkey_sockaddr_len(sa_family_t family) 576static inline int pfkey_sockaddr_len(sa_family_t family)
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 1ae697681bc7..8d9ce0accc98 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -144,7 +144,6 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
144 nf_reset(skb); 144 nf_reset(skb);
145 145
146 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { 146 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
147 dev->last_rx = jiffies;
148 dev->stats.rx_packets++; 147 dev->stats.rx_packets++;
149 dev->stats.rx_bytes += data_len; 148 dev->stats.rx_bytes += data_len;
150 } else 149 } else
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index ff954b3e94b6..39a21d0c61c4 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1768,7 +1768,7 @@ static const struct proto_ops pppol2tp_ops = {
1768 .ioctl = pppox_ioctl, 1768 .ioctl = pppox_ioctl,
1769}; 1769};
1770 1770
1771static struct pppox_proto pppol2tp_proto = { 1771static const struct pppox_proto pppol2tp_proto = {
1772 .create = pppol2tp_create, 1772 .create = pppol2tp_create,
1773 .ioctl = pppol2tp_ioctl 1773 .ioctl = pppol2tp_ioctl
1774}; 1774};
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index a87cb3ba2df6..d2b03e0851ef 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -138,10 +138,8 @@ struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[])
138 struct crypto_cipher *tfm; 138 struct crypto_cipher *tfm;
139 139
140 tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); 140 tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
141 if (IS_ERR(tfm)) 141 if (!IS_ERR(tfm))
142 return NULL; 142 crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN);
143
144 crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN);
145 143
146 return tfm; 144 return tfm;
147} 145}
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 3d097b3d7b62..b4d66cca76d6 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -119,10 +119,8 @@ struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[])
119 struct crypto_cipher *tfm; 119 struct crypto_cipher *tfm;
120 120
121 tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); 121 tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
122 if (IS_ERR(tfm)) 122 if (!IS_ERR(tfm))
123 return NULL; 123 crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
124
125 crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN);
126 124
127 return tfm; 125 return tfm;
128} 126}
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 965b272499fd..58eab9e8e4ee 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -86,6 +86,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
86 tid, 0, reason); 86 tid, 0, reason);
87 87
88 del_timer_sync(&tid_rx->session_timer); 88 del_timer_sync(&tid_rx->session_timer);
89 del_timer_sync(&tid_rx->reorder_timer);
89 90
90 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); 91 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
91} 92}
@@ -120,6 +121,20 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
120 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); 121 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
121} 122}
122 123
124static void sta_rx_agg_reorder_timer_expired(unsigned long data)
125{
126 u8 *ptid = (u8 *)data;
127 u8 *timer_to_id = ptid - *ptid;
128 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
129 timer_to_tid[0]);
130
131 rcu_read_lock();
132 spin_lock(&sta->lock);
133 ieee80211_release_reorder_timeout(sta, *ptid);
134 spin_unlock(&sta->lock);
135 rcu_read_unlock();
136}
137
123static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, 138static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
124 u8 dialog_token, u16 status, u16 policy, 139 u8 dialog_token, u16 status, u16 policy,
125 u16 buf_size, u16 timeout) 140 u16 buf_size, u16 timeout)
@@ -251,11 +266,18 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
251 goto end; 266 goto end;
252 } 267 }
253 268
269 spin_lock_init(&tid_agg_rx->reorder_lock);
270
254 /* rx timer */ 271 /* rx timer */
255 tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired; 272 tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
256 tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 273 tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
257 init_timer(&tid_agg_rx->session_timer); 274 init_timer(&tid_agg_rx->session_timer);
258 275
276 /* rx reorder timer */
277 tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired;
278 tid_agg_rx->reorder_timer.data = (unsigned long)&sta->timer_to_tid[tid];
279 init_timer(&tid_agg_rx->reorder_timer);
280
259 /* prepare reordering buffer */ 281 /* prepare reordering buffer */
260 tid_agg_rx->reorder_buf = 282 tid_agg_rx->reorder_buf =
261 kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC); 283 kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 29ac8e1a509e..c981604b71e6 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -19,33 +19,6 @@
19#include "rate.h" 19#include "rate.h"
20#include "mesh.h" 20#include "mesh.h"
21 21
22static bool nl80211_type_check(enum nl80211_iftype type)
23{
24 switch (type) {
25 case NL80211_IFTYPE_ADHOC:
26 case NL80211_IFTYPE_STATION:
27 case NL80211_IFTYPE_MONITOR:
28#ifdef CONFIG_MAC80211_MESH
29 case NL80211_IFTYPE_MESH_POINT:
30#endif
31 case NL80211_IFTYPE_AP:
32 case NL80211_IFTYPE_AP_VLAN:
33 case NL80211_IFTYPE_WDS:
34 return true;
35 default:
36 return false;
37 }
38}
39
40static bool nl80211_params_check(enum nl80211_iftype type,
41 struct vif_params *params)
42{
43 if (!nl80211_type_check(type))
44 return false;
45
46 return true;
47}
48
49static int ieee80211_add_iface(struct wiphy *wiphy, char *name, 22static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
50 enum nl80211_iftype type, u32 *flags, 23 enum nl80211_iftype type, u32 *flags,
51 struct vif_params *params) 24 struct vif_params *params)
@@ -55,9 +28,6 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
55 struct ieee80211_sub_if_data *sdata; 28 struct ieee80211_sub_if_data *sdata;
56 int err; 29 int err;
57 30
58 if (!nl80211_params_check(type, params))
59 return -EINVAL;
60
61 err = ieee80211_if_add(local, name, &dev, type, params); 31 err = ieee80211_if_add(local, name, &dev, type, params);
62 if (err || type != NL80211_IFTYPE_MONITOR || !flags) 32 if (err || type != NL80211_IFTYPE_MONITOR || !flags)
63 return err; 33 return err;
@@ -82,12 +52,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
82 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 52 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
83 int ret; 53 int ret;
84 54
85 if (ieee80211_sdata_running(sdata))
86 return -EBUSY;
87
88 if (!nl80211_params_check(type, params))
89 return -EINVAL;
90
91 ret = ieee80211_if_change_type(sdata, type); 55 ret = ieee80211_if_change_type(sdata, type);
92 if (ret) 56 if (ret)
93 return ret; 57 return ret;
@@ -114,44 +78,30 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
114 u8 key_idx, const u8 *mac_addr, 78 u8 key_idx, const u8 *mac_addr,
115 struct key_params *params) 79 struct key_params *params)
116{ 80{
117 struct ieee80211_sub_if_data *sdata; 81 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
118 struct sta_info *sta = NULL; 82 struct sta_info *sta = NULL;
119 enum ieee80211_key_alg alg;
120 struct ieee80211_key *key; 83 struct ieee80211_key *key;
121 int err; 84 int err;
122 85
123 if (!netif_running(dev)) 86 if (!ieee80211_sdata_running(sdata))
124 return -ENETDOWN; 87 return -ENETDOWN;
125 88
126 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 89 /* reject WEP and TKIP keys if WEP failed to initialize */
127
128 switch (params->cipher) { 90 switch (params->cipher) {
129 case WLAN_CIPHER_SUITE_WEP40: 91 case WLAN_CIPHER_SUITE_WEP40:
130 case WLAN_CIPHER_SUITE_WEP104:
131 alg = ALG_WEP;
132 break;
133 case WLAN_CIPHER_SUITE_TKIP: 92 case WLAN_CIPHER_SUITE_TKIP:
134 alg = ALG_TKIP; 93 case WLAN_CIPHER_SUITE_WEP104:
135 break; 94 if (IS_ERR(sdata->local->wep_tx_tfm))
136 case WLAN_CIPHER_SUITE_CCMP: 95 return -EINVAL;
137 alg = ALG_CCMP;
138 break;
139 case WLAN_CIPHER_SUITE_AES_CMAC:
140 alg = ALG_AES_CMAC;
141 break; 96 break;
142 default: 97 default:
143 return -EINVAL; 98 break;
144 } 99 }
145 100
146 /* reject WEP and TKIP keys if WEP failed to initialize */ 101 key = ieee80211_key_alloc(params->cipher, key_idx, params->key_len,
147 if ((alg == ALG_WEP || alg == ALG_TKIP) && 102 params->key, params->seq_len, params->seq);
148 IS_ERR(sdata->local->wep_tx_tfm)) 103 if (IS_ERR(key))
149 return -EINVAL; 104 return PTR_ERR(key);
150
151 key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key,
152 params->seq_len, params->seq);
153 if (!key)
154 return -ENOMEM;
155 105
156 mutex_lock(&sdata->local->sta_mtx); 106 mutex_lock(&sdata->local->sta_mtx);
157 107
@@ -164,9 +114,10 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
164 } 114 }
165 } 115 }
166 116
167 ieee80211_key_link(key, sdata, sta); 117 err = ieee80211_key_link(key, sdata, sta);
118 if (err)
119 ieee80211_key_free(sdata->local, key);
168 120
169 err = 0;
170 out_unlock: 121 out_unlock:
171 mutex_unlock(&sdata->local->sta_mtx); 122 mutex_unlock(&sdata->local->sta_mtx);
172 123
@@ -247,10 +198,10 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
247 198
248 memset(&params, 0, sizeof(params)); 199 memset(&params, 0, sizeof(params));
249 200
250 switch (key->conf.alg) { 201 params.cipher = key->conf.cipher;
251 case ALG_TKIP:
252 params.cipher = WLAN_CIPHER_SUITE_TKIP;
253 202
203 switch (key->conf.cipher) {
204 case WLAN_CIPHER_SUITE_TKIP:
254 iv32 = key->u.tkip.tx.iv32; 205 iv32 = key->u.tkip.tx.iv32;
255 iv16 = key->u.tkip.tx.iv16; 206 iv16 = key->u.tkip.tx.iv16;
256 207
@@ -268,8 +219,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
268 params.seq = seq; 219 params.seq = seq;
269 params.seq_len = 6; 220 params.seq_len = 6;
270 break; 221 break;
271 case ALG_CCMP: 222 case WLAN_CIPHER_SUITE_CCMP:
272 params.cipher = WLAN_CIPHER_SUITE_CCMP;
273 seq[0] = key->u.ccmp.tx_pn[5]; 223 seq[0] = key->u.ccmp.tx_pn[5];
274 seq[1] = key->u.ccmp.tx_pn[4]; 224 seq[1] = key->u.ccmp.tx_pn[4];
275 seq[2] = key->u.ccmp.tx_pn[3]; 225 seq[2] = key->u.ccmp.tx_pn[3];
@@ -279,14 +229,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
279 params.seq = seq; 229 params.seq = seq;
280 params.seq_len = 6; 230 params.seq_len = 6;
281 break; 231 break;
282 case ALG_WEP: 232 case WLAN_CIPHER_SUITE_AES_CMAC:
283 if (key->conf.keylen == 5)
284 params.cipher = WLAN_CIPHER_SUITE_WEP40;
285 else
286 params.cipher = WLAN_CIPHER_SUITE_WEP104;
287 break;
288 case ALG_AES_CMAC:
289 params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
290 seq[0] = key->u.aes_cmac.tx_pn[5]; 233 seq[0] = key->u.aes_cmac.tx_pn[5];
291 seq[1] = key->u.aes_cmac.tx_pn[4]; 234 seq[1] = key->u.aes_cmac.tx_pn[4];
292 seq[2] = key->u.aes_cmac.tx_pn[3]; 235 seq[2] = key->u.aes_cmac.tx_pn[3];
@@ -634,6 +577,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
634 struct sta_info *sta, 577 struct sta_info *sta,
635 struct station_parameters *params) 578 struct station_parameters *params)
636{ 579{
580 unsigned long flags;
637 u32 rates; 581 u32 rates;
638 int i, j; 582 int i, j;
639 struct ieee80211_supported_band *sband; 583 struct ieee80211_supported_band *sband;
@@ -642,7 +586,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
642 586
643 sband = local->hw.wiphy->bands[local->oper_channel->band]; 587 sband = local->hw.wiphy->bands[local->oper_channel->band];
644 588
645 spin_lock_bh(&sta->lock); 589 spin_lock_irqsave(&sta->flaglock, flags);
646 mask = params->sta_flags_mask; 590 mask = params->sta_flags_mask;
647 set = params->sta_flags_set; 591 set = params->sta_flags_set;
648 592
@@ -669,7 +613,7 @@ static void sta_apply_parameters(struct ieee80211_local *local,
669 if (set & BIT(NL80211_STA_FLAG_MFP)) 613 if (set & BIT(NL80211_STA_FLAG_MFP))
670 sta->flags |= WLAN_STA_MFP; 614 sta->flags |= WLAN_STA_MFP;
671 } 615 }
672 spin_unlock_bh(&sta->lock); 616 spin_unlock_irqrestore(&sta->flaglock, flags);
673 617
674 /* 618 /*
675 * cfg80211 validates this (1-2007) and allows setting the AID 619 * cfg80211 validates this (1-2007) and allows setting the AID
@@ -1143,9 +1087,9 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1143 p.uapsd = false; 1087 p.uapsd = false;
1144 1088
1145 if (drv_conf_tx(local, params->queue, &p)) { 1089 if (drv_conf_tx(local, params->queue, &p)) {
1146 printk(KERN_DEBUG "%s: failed to set TX queue " 1090 wiphy_debug(local->hw.wiphy,
1147 "parameters for queue %d\n", 1091 "failed to set TX queue parameters for queue %d\n",
1148 wiphy_name(local->hw.wiphy), params->queue); 1092 params->queue);
1149 return -EINVAL; 1093 return -EINVAL;
1150 } 1094 }
1151 1095
@@ -1207,15 +1151,26 @@ static int ieee80211_scan(struct wiphy *wiphy,
1207 struct net_device *dev, 1151 struct net_device *dev,
1208 struct cfg80211_scan_request *req) 1152 struct cfg80211_scan_request *req)
1209{ 1153{
1210 struct ieee80211_sub_if_data *sdata; 1154 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1211
1212 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1213 1155
1214 if (sdata->vif.type != NL80211_IFTYPE_STATION && 1156 switch (ieee80211_vif_type_p2p(&sdata->vif)) {
1215 sdata->vif.type != NL80211_IFTYPE_ADHOC && 1157 case NL80211_IFTYPE_STATION:
1216 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 1158 case NL80211_IFTYPE_ADHOC:
1217 (sdata->vif.type != NL80211_IFTYPE_AP || sdata->u.ap.beacon)) 1159 case NL80211_IFTYPE_MESH_POINT:
1160 case NL80211_IFTYPE_P2P_CLIENT:
1161 break;
1162 case NL80211_IFTYPE_P2P_GO:
1163 if (sdata->local->ops->hw_scan)
1164 break;
1165 /* FIXME: implement NoA while scanning in software */
1166 return -EOPNOTSUPP;
1167 case NL80211_IFTYPE_AP:
1168 if (sdata->u.ap.beacon)
1169 return -EOPNOTSUPP;
1170 break;
1171 default:
1218 return -EOPNOTSUPP; 1172 return -EOPNOTSUPP;
1173 }
1219 1174
1220 return ieee80211_request_scan(sdata, req); 1175 return ieee80211_request_scan(sdata, req);
1221} 1176}
@@ -1541,11 +1496,11 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
1541 return ieee80211_wk_cancel_remain_on_channel(sdata, cookie); 1496 return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
1542} 1497}
1543 1498
1544static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev, 1499static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
1545 struct ieee80211_channel *chan, 1500 struct ieee80211_channel *chan,
1546 enum nl80211_channel_type channel_type, 1501 enum nl80211_channel_type channel_type,
1547 bool channel_type_valid, 1502 bool channel_type_valid,
1548 const u8 *buf, size_t len, u64 *cookie) 1503 const u8 *buf, size_t len, u64 *cookie)
1549{ 1504{
1550 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1505 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1551 struct ieee80211_local *local = sdata->local; 1506 struct ieee80211_local *local = sdata->local;
@@ -1575,8 +1530,6 @@ static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev,
1575 return -ENOLINK; 1530 return -ENOLINK;
1576 break; 1531 break;
1577 case NL80211_IFTYPE_STATION: 1532 case NL80211_IFTYPE_STATION:
1578 if (!(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED))
1579 flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1580 break; 1533 break;
1581 default: 1534 default:
1582 return -EOPNOTSUPP; 1535 return -EOPNOTSUPP;
@@ -1647,6 +1600,6 @@ struct cfg80211_ops mac80211_config_ops = {
1647 .set_bitrate_mask = ieee80211_set_bitrate_mask, 1600 .set_bitrate_mask = ieee80211_set_bitrate_mask,
1648 .remain_on_channel = ieee80211_remain_on_channel, 1601 .remain_on_channel = ieee80211_remain_on_channel,
1649 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, 1602 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
1650 .action = ieee80211_action, 1603 .mgmt_tx = ieee80211_mgmt_tx,
1651 .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config, 1604 .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config,
1652}; 1605};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 32be11e4c4d9..5b24740fc0b0 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -11,7 +11,7 @@ __ieee80211_get_channel_mode(struct ieee80211_local *local,
11{ 11{
12 struct ieee80211_sub_if_data *sdata; 12 struct ieee80211_sub_if_data *sdata;
13 13
14 WARN_ON(!mutex_is_locked(&local->iflist_mtx)); 14 lockdep_assert_held(&local->iflist_mtx);
15 15
16 list_for_each_entry(sdata, &local->interfaces, list) { 16 list_for_each_entry(sdata, &local->interfaces, list) {
17 if (sdata == ignore) 17 if (sdata == ignore)
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index a694c593ff6a..e81ef4e8cb32 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -85,13 +85,15 @@ static ssize_t tsf_write(struct file *file,
85 if (strncmp(buf, "reset", 5) == 0) { 85 if (strncmp(buf, "reset", 5) == 0) {
86 if (local->ops->reset_tsf) { 86 if (local->ops->reset_tsf) {
87 drv_reset_tsf(local); 87 drv_reset_tsf(local);
88 printk(KERN_INFO "%s: debugfs reset TSF\n", wiphy_name(local->hw.wiphy)); 88 wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
89 } 89 }
90 } else { 90 } else {
91 tsf = simple_strtoul(buf, NULL, 0); 91 tsf = simple_strtoul(buf, NULL, 0);
92 if (local->ops->set_tsf) { 92 if (local->ops->set_tsf) {
93 drv_set_tsf(local, tsf); 93 drv_set_tsf(local, tsf);
94 printk(KERN_INFO "%s: debugfs set TSF to %#018llx\n", wiphy_name(local->hw.wiphy), tsf); 94 wiphy_info(local->hw.wiphy,
95 "debugfs set TSF to %#018llx\n", tsf);
96
95 } 97 }
96 } 98 }
97 99
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index fa5e76e658ef..1647f8dc5cda 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -64,26 +64,13 @@ static ssize_t key_algorithm_read(struct file *file,
64 char __user *userbuf, 64 char __user *userbuf,
65 size_t count, loff_t *ppos) 65 size_t count, loff_t *ppos)
66{ 66{
67 char *alg; 67 char buf[15];
68 struct ieee80211_key *key = file->private_data; 68 struct ieee80211_key *key = file->private_data;
69 u32 c = key->conf.cipher;
69 70
70 switch (key->conf.alg) { 71 sprintf(buf, "%.2x-%.2x-%.2x:%d\n",
71 case ALG_WEP: 72 c >> 24, (c >> 16) & 0xff, (c >> 8) & 0xff, c & 0xff);
72 alg = "WEP\n"; 73 return simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
73 break;
74 case ALG_TKIP:
75 alg = "TKIP\n";
76 break;
77 case ALG_CCMP:
78 alg = "CCMP\n";
79 break;
80 case ALG_AES_CMAC:
81 alg = "AES-128-CMAC\n";
82 break;
83 default:
84 return 0;
85 }
86 return simple_read_from_buffer(userbuf, count, ppos, alg, strlen(alg));
87} 74}
88KEY_OPS(algorithm); 75KEY_OPS(algorithm);
89 76
@@ -95,21 +82,22 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
95 int len; 82 int len;
96 struct ieee80211_key *key = file->private_data; 83 struct ieee80211_key *key = file->private_data;
97 84
98 switch (key->conf.alg) { 85 switch (key->conf.cipher) {
99 case ALG_WEP: 86 case WLAN_CIPHER_SUITE_WEP40:
87 case WLAN_CIPHER_SUITE_WEP104:
100 len = scnprintf(buf, sizeof(buf), "\n"); 88 len = scnprintf(buf, sizeof(buf), "\n");
101 break; 89 break;
102 case ALG_TKIP: 90 case WLAN_CIPHER_SUITE_TKIP:
103 len = scnprintf(buf, sizeof(buf), "%08x %04x\n", 91 len = scnprintf(buf, sizeof(buf), "%08x %04x\n",
104 key->u.tkip.tx.iv32, 92 key->u.tkip.tx.iv32,
105 key->u.tkip.tx.iv16); 93 key->u.tkip.tx.iv16);
106 break; 94 break;
107 case ALG_CCMP: 95 case WLAN_CIPHER_SUITE_CCMP:
108 tpn = key->u.ccmp.tx_pn; 96 tpn = key->u.ccmp.tx_pn;
109 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", 97 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
110 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], tpn[5]); 98 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], tpn[5]);
111 break; 99 break;
112 case ALG_AES_CMAC: 100 case WLAN_CIPHER_SUITE_AES_CMAC:
113 tpn = key->u.aes_cmac.tx_pn; 101 tpn = key->u.aes_cmac.tx_pn;
114 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", 102 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
115 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], 103 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4],
@@ -130,11 +118,12 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
130 int i, len; 118 int i, len;
131 const u8 *rpn; 119 const u8 *rpn;
132 120
133 switch (key->conf.alg) { 121 switch (key->conf.cipher) {
134 case ALG_WEP: 122 case WLAN_CIPHER_SUITE_WEP40:
123 case WLAN_CIPHER_SUITE_WEP104:
135 len = scnprintf(buf, sizeof(buf), "\n"); 124 len = scnprintf(buf, sizeof(buf), "\n");
136 break; 125 break;
137 case ALG_TKIP: 126 case WLAN_CIPHER_SUITE_TKIP:
138 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 127 for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
139 p += scnprintf(p, sizeof(buf)+buf-p, 128 p += scnprintf(p, sizeof(buf)+buf-p,
140 "%08x %04x\n", 129 "%08x %04x\n",
@@ -142,7 +131,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
142 key->u.tkip.rx[i].iv16); 131 key->u.tkip.rx[i].iv16);
143 len = p - buf; 132 len = p - buf;
144 break; 133 break;
145 case ALG_CCMP: 134 case WLAN_CIPHER_SUITE_CCMP:
146 for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) { 135 for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) {
147 rpn = key->u.ccmp.rx_pn[i]; 136 rpn = key->u.ccmp.rx_pn[i];
148 p += scnprintf(p, sizeof(buf)+buf-p, 137 p += scnprintf(p, sizeof(buf)+buf-p,
@@ -152,7 +141,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
152 } 141 }
153 len = p - buf; 142 len = p - buf;
154 break; 143 break;
155 case ALG_AES_CMAC: 144 case WLAN_CIPHER_SUITE_AES_CMAC:
156 rpn = key->u.aes_cmac.rx_pn; 145 rpn = key->u.aes_cmac.rx_pn;
157 p += scnprintf(p, sizeof(buf)+buf-p, 146 p += scnprintf(p, sizeof(buf)+buf-p,
158 "%02x%02x%02x%02x%02x%02x\n", 147 "%02x%02x%02x%02x%02x%02x\n",
@@ -174,11 +163,11 @@ static ssize_t key_replays_read(struct file *file, char __user *userbuf,
174 char buf[20]; 163 char buf[20];
175 int len; 164 int len;
176 165
177 switch (key->conf.alg) { 166 switch (key->conf.cipher) {
178 case ALG_CCMP: 167 case WLAN_CIPHER_SUITE_CCMP:
179 len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays); 168 len = scnprintf(buf, sizeof(buf), "%u\n", key->u.ccmp.replays);
180 break; 169 break;
181 case ALG_AES_CMAC: 170 case WLAN_CIPHER_SUITE_AES_CMAC:
182 len = scnprintf(buf, sizeof(buf), "%u\n", 171 len = scnprintf(buf, sizeof(buf), "%u\n",
183 key->u.aes_cmac.replays); 172 key->u.aes_cmac.replays);
184 break; 173 break;
@@ -196,8 +185,8 @@ static ssize_t key_icverrors_read(struct file *file, char __user *userbuf,
196 char buf[20]; 185 char buf[20];
197 int len; 186 int len;
198 187
199 switch (key->conf.alg) { 188 switch (key->conf.cipher) {
200 case ALG_AES_CMAC: 189 case WLAN_CIPHER_SUITE_AES_CMAC:
201 len = scnprintf(buf, sizeof(buf), "%u\n", 190 len = scnprintf(buf, sizeof(buf), "%u\n",
202 key->u.aes_cmac.icverrors); 191 key->u.aes_cmac.icverrors);
203 break; 192 break;
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 14123dce544b..16983825f8e8 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -54,6 +54,20 @@ static inline int drv_add_interface(struct ieee80211_local *local,
54 return ret; 54 return ret;
55} 55}
56 56
57static inline int drv_change_interface(struct ieee80211_local *local,
58 struct ieee80211_sub_if_data *sdata,
59 enum nl80211_iftype type, bool p2p)
60{
61 int ret;
62
63 might_sleep();
64
65 trace_drv_change_interface(local, sdata, type, p2p);
66 ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
67 trace_drv_return_int(local, ret);
68 return ret;
69}
70
57static inline void drv_remove_interface(struct ieee80211_local *local, 71static inline void drv_remove_interface(struct ieee80211_local *local,
58 struct ieee80211_vif *vif) 72 struct ieee80211_vif *vif)
59{ 73{
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 5d5d2a974668..6831fb1641c8 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -25,12 +25,14 @@ static inline void trace_ ## name(proto) {}
25#define STA_PR_FMT " sta:%pM" 25#define STA_PR_FMT " sta:%pM"
26#define STA_PR_ARG __entry->sta_addr 26#define STA_PR_ARG __entry->sta_addr
27 27
28#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \ 28#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
29 __field(bool, p2p) \
29 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>") 30 __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
30#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \ 31#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
32 __entry->p2p = sdata->vif.p2p; \
31 __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>") 33 __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
32#define VIF_PR_FMT " vif:%s(%d)" 34#define VIF_PR_FMT " vif:%s(%d%s)"
33#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type 35#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
34 36
35/* 37/*
36 * Tracing for driver callbacks. 38 * Tracing for driver callbacks.
@@ -136,6 +138,34 @@ TRACE_EVENT(drv_add_interface,
136 ) 138 )
137); 139);
138 140
141TRACE_EVENT(drv_change_interface,
142 TP_PROTO(struct ieee80211_local *local,
143 struct ieee80211_sub_if_data *sdata,
144 enum nl80211_iftype type, bool p2p),
145
146 TP_ARGS(local, sdata, type, p2p),
147
148 TP_STRUCT__entry(
149 LOCAL_ENTRY
150 VIF_ENTRY
151 __field(u32, new_type)
152 __field(bool, new_p2p)
153 ),
154
155 TP_fast_assign(
156 LOCAL_ASSIGN;
157 VIF_ASSIGN;
158 __entry->new_type = type;
159 __entry->new_p2p = p2p;
160 ),
161
162 TP_printk(
163 LOCAL_PR_FMT VIF_PR_FMT " new type:%d%s",
164 LOCAL_PR_ARG, VIF_PR_ARG, __entry->new_type,
165 __entry->new_p2p ? "/p2p" : ""
166 )
167);
168
139TRACE_EVENT(drv_remove_interface, 169TRACE_EVENT(drv_remove_interface,
140 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), 170 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata),
141 171
@@ -336,7 +366,7 @@ TRACE_EVENT(drv_set_key,
336 LOCAL_ENTRY 366 LOCAL_ENTRY
337 VIF_ENTRY 367 VIF_ENTRY
338 STA_ENTRY 368 STA_ENTRY
339 __field(enum ieee80211_key_alg, alg) 369 __field(u32, cipher)
340 __field(u8, hw_key_idx) 370 __field(u8, hw_key_idx)
341 __field(u8, flags) 371 __field(u8, flags)
342 __field(s8, keyidx) 372 __field(s8, keyidx)
@@ -346,7 +376,7 @@ TRACE_EVENT(drv_set_key,
346 LOCAL_ASSIGN; 376 LOCAL_ASSIGN;
347 VIF_ASSIGN; 377 VIF_ASSIGN;
348 STA_ASSIGN; 378 STA_ASSIGN;
349 __entry->alg = key->alg; 379 __entry->cipher = key->cipher;
350 __entry->flags = key->flags; 380 __entry->flags = key->flags;
351 __entry->keyidx = key->keyidx; 381 __entry->keyidx = key->keyidx;
352 __entry->hw_key_idx = key->hw_key_idx; 382 __entry->hw_key_idx = key->hw_key_idx;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 9d101fb33861..11f74f5f7b2f 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -265,3 +265,31 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
265 265
266 return 0; 266 return 0;
267} 267}
268
269void ieee80211_request_smps_work(struct work_struct *work)
270{
271 struct ieee80211_sub_if_data *sdata =
272 container_of(work, struct ieee80211_sub_if_data,
273 u.mgd.request_smps_work);
274
275 mutex_lock(&sdata->u.mgd.mtx);
276 __ieee80211_request_smps(sdata, sdata->u.mgd.driver_smps_mode);
277 mutex_unlock(&sdata->u.mgd.mtx);
278}
279
280void ieee80211_request_smps(struct ieee80211_vif *vif,
281 enum ieee80211_smps_mode smps_mode)
282{
283 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
284
285 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
286 return;
287
288 if (WARN_ON(smps_mode == IEEE80211_SMPS_OFF))
289 smps_mode = IEEE80211_SMPS_AUTOMATIC;
290
291 ieee80211_queue_work(&sdata->local->hw,
292 &sdata->u.mgd.request_smps_work);
293}
294/* this might change ... don't want non-open drivers using it */
295EXPORT_SYMBOL_GPL(ieee80211_request_smps);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index c691780725a7..1a3aae54f0cf 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -427,8 +427,8 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
427 return NULL; 427 return NULL;
428 428
429#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 429#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
430 printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n", 430 wiphy_debug(local->hw.wiphy, "Adding new IBSS station %pM (dev=%s)\n",
431 wiphy_name(local->hw.wiphy), addr, sdata->name); 431 addr, sdata->name);
432#endif 432#endif
433 433
434 sta = sta_info_alloc(sdata, addr, gfp); 434 sta = sta_info_alloc(sdata, addr, gfp);
@@ -920,12 +920,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
920 memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN); 920 memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN);
921 sdata->u.ibss.ssid_len = params->ssid_len; 921 sdata->u.ibss.ssid_len = params->ssid_len;
922 922
923 mutex_unlock(&sdata->u.ibss.mtx);
924
925 mutex_lock(&sdata->local->mtx);
923 ieee80211_recalc_idle(sdata->local); 926 ieee80211_recalc_idle(sdata->local);
927 mutex_unlock(&sdata->local->mtx);
924 928
925 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 929 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
926 930
927 mutex_unlock(&sdata->u.ibss.mtx);
928
929 return 0; 931 return 0;
930} 932}
931 933
@@ -980,7 +982,9 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
980 982
981 mutex_unlock(&sdata->u.ibss.mtx); 983 mutex_unlock(&sdata->u.ibss.mtx);
982 984
985 mutex_lock(&local->mtx);
983 ieee80211_recalc_idle(sdata->local); 986 ieee80211_recalc_idle(sdata->local);
987 mutex_unlock(&local->mtx);
984 988
985 return 0; 989 return 0;
986} 990}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 65e0ed6c2975..9346a6b0f400 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -50,12 +50,6 @@ struct ieee80211_local;
50 * increased memory use (about 2 kB of RAM per entry). */ 50 * increased memory use (about 2 kB of RAM per entry). */
51#define IEEE80211_FRAGMENT_MAX 4 51#define IEEE80211_FRAGMENT_MAX 4
52 52
53/*
54 * Time after which we ignore scan results and no longer report/use
55 * them in any way.
56 */
57#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
58
59#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024)) 53#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
60 54
61#define IEEE80211_DEFAULT_UAPSD_QUEUES \ 55#define IEEE80211_DEFAULT_UAPSD_QUEUES \
@@ -170,6 +164,7 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
170#define IEEE80211_RX_RA_MATCH BIT(1) 164#define IEEE80211_RX_RA_MATCH BIT(1)
171#define IEEE80211_RX_AMSDU BIT(2) 165#define IEEE80211_RX_AMSDU BIT(2)
172#define IEEE80211_RX_FRAGMENTED BIT(3) 166#define IEEE80211_RX_FRAGMENTED BIT(3)
167#define IEEE80211_MALFORMED_ACTION_FRM BIT(4)
173/* only add flags here that do not change with subframes of an aMPDU */ 168/* only add flags here that do not change with subframes of an aMPDU */
174 169
175struct ieee80211_rx_data { 170struct ieee80211_rx_data {
@@ -343,7 +338,10 @@ struct ieee80211_if_managed {
343 unsigned long timers_running; /* used for quiesce/restart */ 338 unsigned long timers_running; /* used for quiesce/restart */
344 bool powersave; /* powersave requested for this iface */ 339 bool powersave; /* powersave requested for this iface */
345 enum ieee80211_smps_mode req_smps, /* requested smps mode */ 340 enum ieee80211_smps_mode req_smps, /* requested smps mode */
346 ap_smps; /* smps mode AP thinks we're in */ 341 ap_smps, /* smps mode AP thinks we're in */
342 driver_smps_mode; /* smps mode request */
343
344 struct work_struct request_smps_work;
347 345
348 unsigned int flags; 346 unsigned int flags;
349 347
@@ -371,6 +369,13 @@ struct ieee80211_if_managed {
371 int ave_beacon_signal; 369 int ave_beacon_signal;
372 370
373 /* 371 /*
372 * Number of Beacon frames used in ave_beacon_signal. This can be used
373 * to avoid generating less reliable cqm events that would be based
374 * only on couple of received frames.
375 */
376 unsigned int count_beacon_signal;
377
378 /*
374 * Last Beacon frame signal strength average (ave_beacon_signal / 16) 379 * Last Beacon frame signal strength average (ave_beacon_signal / 16)
375 * that triggered a cqm event. 0 indicates that no event has been 380 * that triggered a cqm event. 0 indicates that no event has been
376 * generated for the current association. 381 * generated for the current association.
@@ -474,6 +479,19 @@ enum ieee80211_sub_if_data_flags {
474 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3), 479 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
475}; 480};
476 481
482/**
483 * enum ieee80211_sdata_state_bits - virtual interface state bits
484 * @SDATA_STATE_RUNNING: virtual interface is up & running; this
485 * mirrors netif_running() but is separate for interface type
486 * change handling while the interface is up
487 * @SDATA_STATE_OFFCHANNEL: This interface is currently in offchannel
488 * mode, so queues are stopped
489 */
490enum ieee80211_sdata_state_bits {
491 SDATA_STATE_RUNNING,
492 SDATA_STATE_OFFCHANNEL,
493};
494
477struct ieee80211_sub_if_data { 495struct ieee80211_sub_if_data {
478 struct list_head list; 496 struct list_head list;
479 497
@@ -487,6 +505,8 @@ struct ieee80211_sub_if_data {
487 505
488 unsigned int flags; 506 unsigned int flags;
489 507
508 unsigned long state;
509
490 int drop_unencrypted; 510 int drop_unencrypted;
491 511
492 char name[IFNAMSIZ]; 512 char name[IFNAMSIZ];
@@ -497,6 +517,9 @@ struct ieee80211_sub_if_data {
497 */ 517 */
498 bool ht_opmode_valid; 518 bool ht_opmode_valid;
499 519
520 /* to detect idle changes */
521 bool old_idle;
522
500 /* Fragment table for host-based reassembly */ 523 /* Fragment table for host-based reassembly */
501 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; 524 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
502 unsigned int fragment_next; 525 unsigned int fragment_next;
@@ -508,6 +531,8 @@ struct ieee80211_sub_if_data {
508 struct ieee80211_key *default_mgmt_key; 531 struct ieee80211_key *default_mgmt_key;
509 532
510 u16 sequence_number; 533 u16 sequence_number;
534 __be16 control_port_protocol;
535 bool control_port_no_encrypt;
511 536
512 struct work_struct work; 537 struct work_struct work;
513 struct sk_buff_head skb_queue; 538 struct sk_buff_head skb_queue;
@@ -595,11 +620,17 @@ enum queue_stop_reason {
595 * determine if we are on the operating channel or not 620 * determine if we are on the operating channel or not
596 * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning, 621 * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
597 * gets only set in conjunction with SCAN_SW_SCANNING 622 * gets only set in conjunction with SCAN_SW_SCANNING
623 * @SCAN_COMPLETED: Set for our scan work function when the driver reported
624 * that the scan completed.
625 * @SCAN_ABORTED: Set for our scan work function when the driver reported
626 * a scan complete for an aborted scan.
598 */ 627 */
599enum { 628enum {
600 SCAN_SW_SCANNING, 629 SCAN_SW_SCANNING,
601 SCAN_HW_SCANNING, 630 SCAN_HW_SCANNING,
602 SCAN_OFF_CHANNEL, 631 SCAN_OFF_CHANNEL,
632 SCAN_COMPLETED,
633 SCAN_ABORTED,
603}; 634};
604 635
605/** 636/**
@@ -634,7 +665,6 @@ struct ieee80211_local {
634 /* 665 /*
635 * work stuff, potentially off-channel (in the future) 666 * work stuff, potentially off-channel (in the future)
636 */ 667 */
637 struct mutex work_mtx;
638 struct list_head work_list; 668 struct list_head work_list;
639 struct timer_list work_timer; 669 struct timer_list work_timer;
640 struct work_struct work_work; 670 struct work_struct work_work;
@@ -656,6 +686,8 @@ struct ieee80211_local {
656 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll; 686 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
657 unsigned int filter_flags; /* FIF_* */ 687 unsigned int filter_flags; /* FIF_* */
658 688
689 bool wiphy_ciphers_allocated;
690
659 /* protects the aggregated multicast list and filter calls */ 691 /* protects the aggregated multicast list and filter calls */
660 spinlock_t filter_lock; 692 spinlock_t filter_lock;
661 693
@@ -746,9 +778,10 @@ struct ieee80211_local {
746 */ 778 */
747 struct mutex key_mtx; 779 struct mutex key_mtx;
748 780
781 /* mutex for scan and work locking */
782 struct mutex mtx;
749 783
750 /* Scanning and BSS list */ 784 /* Scanning and BSS list */
751 struct mutex scan_mtx;
752 unsigned long scanning; 785 unsigned long scanning;
753 struct cfg80211_ssid scan_ssid; 786 struct cfg80211_ssid scan_ssid;
754 struct cfg80211_scan_request *int_scan_req; 787 struct cfg80211_scan_request *int_scan_req;
@@ -870,6 +903,11 @@ struct ieee80211_local {
870 struct dentry *keys; 903 struct dentry *keys;
871 } debugfs; 904 } debugfs;
872#endif 905#endif
906
907 /* dummy netdev for use w/ NAPI */
908 struct net_device napi_dev;
909
910 struct napi_struct napi;
873}; 911};
874 912
875static inline struct ieee80211_sub_if_data * 913static inline struct ieee80211_sub_if_data *
@@ -1003,6 +1041,8 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
1003void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); 1041void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
1004void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, 1042void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1005 struct sk_buff *skb); 1043 struct sk_buff *skb);
1044void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
1045void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
1006 1046
1007/* IBSS code */ 1047/* IBSS code */
1008void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 1048void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
@@ -1071,7 +1111,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local);
1071 1111
1072static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata) 1112static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
1073{ 1113{
1074 return netif_running(sdata->dev); 1114 return test_bit(SDATA_STATE_RUNNING, &sdata->state);
1075} 1115}
1076 1116
1077/* tx handling */ 1117/* tx handling */
@@ -1105,6 +1145,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
1105int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, 1145int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
1106 enum ieee80211_smps_mode smps, const u8 *da, 1146 enum ieee80211_smps_mode smps, const u8 *da,
1107 const u8 *bssid); 1147 const u8 *bssid);
1148void ieee80211_request_smps_work(struct work_struct *work);
1108 1149
1109void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, 1150void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
1110 u16 initiator, u16 reason); 1151 u16 initiator, u16 reason);
@@ -1131,6 +1172,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
1131void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); 1172void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
1132void ieee80211_ba_session_work(struct work_struct *work); 1173void ieee80211_ba_session_work(struct work_struct *work);
1133void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); 1174void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
1175void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
1134 1176
1135/* Spectrum management */ 1177/* Spectrum management */
1136void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1178void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1146,6 +1188,12 @@ int __ieee80211_suspend(struct ieee80211_hw *hw);
1146 1188
1147static inline int __ieee80211_resume(struct ieee80211_hw *hw) 1189static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1148{ 1190{
1191 struct ieee80211_local *local = hw_to_local(hw);
1192
1193 WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
1194 "%s: resume with hardware scan still in progress\n",
1195 wiphy_name(hw->wiphy));
1196
1149 return ieee80211_reconfig(hw_to_local(hw)); 1197 return ieee80211_reconfig(hw_to_local(hw));
1150} 1198}
1151#else 1199#else
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ebbe264e2b0b..66785739dad3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -94,21 +94,14 @@ static inline int identical_mac_addr_allowed(int type1, int type2)
94 type2 == NL80211_IFTYPE_AP_VLAN)); 94 type2 == NL80211_IFTYPE_AP_VLAN));
95} 95}
96 96
97static int ieee80211_open(struct net_device *dev) 97static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
98 enum nl80211_iftype iftype)
98{ 99{
99 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
100 struct ieee80211_sub_if_data *nsdata;
101 struct ieee80211_local *local = sdata->local; 100 struct ieee80211_local *local = sdata->local;
102 struct sta_info *sta; 101 struct ieee80211_sub_if_data *nsdata;
103 u32 changed = 0; 102 struct net_device *dev = sdata->dev;
104 int res;
105 u32 hw_reconf_flags = 0;
106 u8 null_addr[ETH_ALEN] = {0};
107 103
108 /* fail early if user set an invalid address */ 104 ASSERT_RTNL();
109 if (compare_ether_addr(dev->dev_addr, null_addr) &&
110 !is_valid_ether_addr(dev->dev_addr))
111 return -EADDRNOTAVAIL;
112 105
113 /* we hold the RTNL here so can safely walk the list */ 106 /* we hold the RTNL here so can safely walk the list */
114 list_for_each_entry(nsdata, &local->interfaces, list) { 107 list_for_each_entry(nsdata, &local->interfaces, list) {
@@ -125,7 +118,7 @@ static int ieee80211_open(struct net_device *dev)
125 * belonging to the same hardware. Then, however, we're 118 * belonging to the same hardware. Then, however, we're
126 * faced with having to adopt two different TSF timers... 119 * faced with having to adopt two different TSF timers...
127 */ 120 */
128 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 121 if (iftype == NL80211_IFTYPE_ADHOC &&
129 nsdata->vif.type == NL80211_IFTYPE_ADHOC) 122 nsdata->vif.type == NL80211_IFTYPE_ADHOC)
130 return -EBUSY; 123 return -EBUSY;
131 124
@@ -139,19 +132,36 @@ static int ieee80211_open(struct net_device *dev)
139 /* 132 /*
140 * check whether it may have the same address 133 * check whether it may have the same address
141 */ 134 */
142 if (!identical_mac_addr_allowed(sdata->vif.type, 135 if (!identical_mac_addr_allowed(iftype,
143 nsdata->vif.type)) 136 nsdata->vif.type))
144 return -ENOTUNIQ; 137 return -ENOTUNIQ;
145 138
146 /* 139 /*
147 * can only add VLANs to enabled APs 140 * can only add VLANs to enabled APs
148 */ 141 */
149 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 142 if (iftype == NL80211_IFTYPE_AP_VLAN &&
150 nsdata->vif.type == NL80211_IFTYPE_AP) 143 nsdata->vif.type == NL80211_IFTYPE_AP)
151 sdata->bss = &nsdata->u.ap; 144 sdata->bss = &nsdata->u.ap;
152 } 145 }
153 } 146 }
154 147
148 return 0;
149}
150
151/*
152 * NOTE: Be very careful when changing this function, it must NOT return
153 * an error on interface type changes that have been pre-checked, so most
154 * checks should be in ieee80211_check_concurrent_iface.
155 */
156static int ieee80211_do_open(struct net_device *dev, bool coming_up)
157{
158 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
159 struct ieee80211_local *local = sdata->local;
160 struct sta_info *sta;
161 u32 changed = 0;
162 int res;
163 u32 hw_reconf_flags = 0;
164
155 switch (sdata->vif.type) { 165 switch (sdata->vif.type) {
156 case NL80211_IFTYPE_WDS: 166 case NL80211_IFTYPE_WDS:
157 if (!is_valid_ether_addr(sdata->u.wds.remote_addr)) 167 if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
@@ -177,7 +187,9 @@ static int ieee80211_open(struct net_device *dev)
177 /* no special treatment */ 187 /* no special treatment */
178 break; 188 break;
179 case NL80211_IFTYPE_UNSPECIFIED: 189 case NL80211_IFTYPE_UNSPECIFIED:
180 case __NL80211_IFTYPE_AFTER_LAST: 190 case NUM_NL80211_IFTYPES:
191 case NL80211_IFTYPE_P2P_CLIENT:
192 case NL80211_IFTYPE_P2P_GO:
181 /* cannot happen */ 193 /* cannot happen */
182 WARN_ON(1); 194 WARN_ON(1);
183 break; 195 break;
@@ -187,39 +199,30 @@ static int ieee80211_open(struct net_device *dev)
187 res = drv_start(local); 199 res = drv_start(local);
188 if (res) 200 if (res)
189 goto err_del_bss; 201 goto err_del_bss;
202 if (local->ops->napi_poll)
203 napi_enable(&local->napi);
190 /* we're brought up, everything changes */ 204 /* we're brought up, everything changes */
191 hw_reconf_flags = ~0; 205 hw_reconf_flags = ~0;
192 ieee80211_led_radio(local, true); 206 ieee80211_led_radio(local, true);
193 } 207 }
194 208
195 /* 209 /*
196 * Check all interfaces and copy the hopefully now-present 210 * Copy the hopefully now-present MAC address to
197 * MAC address to those that have the special null one. 211 * this interface, if it has the special null one.
198 */ 212 */
199 list_for_each_entry(nsdata, &local->interfaces, list) { 213 if (is_zero_ether_addr(dev->dev_addr)) {
200 struct net_device *ndev = nsdata->dev; 214 memcpy(dev->dev_addr,
201 215 local->hw.wiphy->perm_addr,
202 /* 216 ETH_ALEN);
203 * No need to check running since we do not allow 217 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
204 * it to start up with this invalid address. 218
205 */ 219 if (!is_valid_ether_addr(dev->dev_addr)) {
206 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) { 220 if (!local->open_count)
207 memcpy(ndev->dev_addr, 221 drv_stop(local);
208 local->hw.wiphy->perm_addr, 222 return -EADDRNOTAVAIL;
209 ETH_ALEN);
210 memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN);
211 } 223 }
212 } 224 }
213 225
214 /*
215 * Validate the MAC address for this device.
216 */
217 if (!is_valid_ether_addr(dev->dev_addr)) {
218 if (!local->open_count)
219 drv_stop(local);
220 return -EADDRNOTAVAIL;
221 }
222
223 switch (sdata->vif.type) { 226 switch (sdata->vif.type) {
224 case NL80211_IFTYPE_AP_VLAN: 227 case NL80211_IFTYPE_AP_VLAN:
225 /* no need to tell driver */ 228 /* no need to tell driver */
@@ -253,9 +256,11 @@ static int ieee80211_open(struct net_device *dev)
253 netif_carrier_on(dev); 256 netif_carrier_on(dev);
254 break; 257 break;
255 default: 258 default:
256 res = drv_add_interface(local, &sdata->vif); 259 if (coming_up) {
257 if (res) 260 res = drv_add_interface(local, &sdata->vif);
258 goto err_stop; 261 if (res)
262 goto err_stop;
263 }
259 264
260 if (ieee80211_vif_is_mesh(&sdata->vif)) { 265 if (ieee80211_vif_is_mesh(&sdata->vif)) {
261 local->fif_other_bss++; 266 local->fif_other_bss++;
@@ -277,6 +282,8 @@ static int ieee80211_open(struct net_device *dev)
277 netif_carrier_on(dev); 282 netif_carrier_on(dev);
278 } 283 }
279 284
285 set_bit(SDATA_STATE_RUNNING, &sdata->state);
286
280 if (sdata->vif.type == NL80211_IFTYPE_WDS) { 287 if (sdata->vif.type == NL80211_IFTYPE_WDS) {
281 /* Create STA entry for the WDS peer */ 288 /* Create STA entry for the WDS peer */
282 sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr, 289 sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
@@ -307,9 +314,13 @@ static int ieee80211_open(struct net_device *dev)
307 if (sdata->flags & IEEE80211_SDATA_PROMISC) 314 if (sdata->flags & IEEE80211_SDATA_PROMISC)
308 atomic_inc(&local->iff_promiscs); 315 atomic_inc(&local->iff_promiscs);
309 316
317 mutex_lock(&local->mtx);
310 hw_reconf_flags |= __ieee80211_recalc_idle(local); 318 hw_reconf_flags |= __ieee80211_recalc_idle(local);
319 mutex_unlock(&local->mtx);
320
321 if (coming_up)
322 local->open_count++;
311 323
312 local->open_count++;
313 if (hw_reconf_flags) { 324 if (hw_reconf_flags) {
314 ieee80211_hw_config(local, hw_reconf_flags); 325 ieee80211_hw_config(local, hw_reconf_flags);
315 /* 326 /*
@@ -334,22 +345,42 @@ static int ieee80211_open(struct net_device *dev)
334 sdata->bss = NULL; 345 sdata->bss = NULL;
335 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 346 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
336 list_del(&sdata->u.vlan.list); 347 list_del(&sdata->u.vlan.list);
348 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
337 return res; 349 return res;
338} 350}
339 351
340static int ieee80211_stop(struct net_device *dev) 352static int ieee80211_open(struct net_device *dev)
341{ 353{
342 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 354 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
355 int err;
356
357 /* fail early if user set an invalid address */
358 if (!is_zero_ether_addr(dev->dev_addr) &&
359 !is_valid_ether_addr(dev->dev_addr))
360 return -EADDRNOTAVAIL;
361
362 err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type);
363 if (err)
364 return err;
365
366 return ieee80211_do_open(dev, true);
367}
368
369static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
370 bool going_down)
371{
343 struct ieee80211_local *local = sdata->local; 372 struct ieee80211_local *local = sdata->local;
344 unsigned long flags; 373 unsigned long flags;
345 struct sk_buff *skb, *tmp; 374 struct sk_buff *skb, *tmp;
346 u32 hw_reconf_flags = 0; 375 u32 hw_reconf_flags = 0;
347 int i; 376 int i;
348 377
378 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
379
349 /* 380 /*
350 * Stop TX on this interface first. 381 * Stop TX on this interface first.
351 */ 382 */
352 netif_tx_stop_all_queues(dev); 383 netif_tx_stop_all_queues(sdata->dev);
353 384
354 /* 385 /*
355 * Purge work for this interface. 386 * Purge work for this interface.
@@ -366,12 +397,9 @@ static int ieee80211_stop(struct net_device *dev)
366 * (because if we remove a STA after ops->remove_interface() 397 * (because if we remove a STA after ops->remove_interface()
367 * the driver will have removed the vif info already!) 398 * the driver will have removed the vif info already!)
368 * 399 *
369 * We could relax this and only unlink the stations from the 400 * This is relevant only in AP, WDS and mesh modes, since in
370 * hash table and list but keep them on a per-sdata list that 401 * all other modes we've already removed all stations when
371 * will be inserted back again when the interface is brought 402 * disconnecting etc.
372 * up again, but I don't currently see a use case for that,
373 * except with WDS which gets a STA entry created when it is
374 * brought up.
375 */ 403 */
376 sta_info_flush(local, sdata); 404 sta_info_flush(local, sdata);
377 405
@@ -390,11 +418,12 @@ static int ieee80211_stop(struct net_device *dev)
390 if (sdata->vif.type == NL80211_IFTYPE_AP) 418 if (sdata->vif.type == NL80211_IFTYPE_AP)
391 local->fif_pspoll--; 419 local->fif_pspoll--;
392 420
393 netif_addr_lock_bh(dev); 421 netif_addr_lock_bh(sdata->dev);
394 spin_lock_bh(&local->filter_lock); 422 spin_lock_bh(&local->filter_lock);
395 __hw_addr_unsync(&local->mc_list, &dev->mc, dev->addr_len); 423 __hw_addr_unsync(&local->mc_list, &sdata->dev->mc,
424 sdata->dev->addr_len);
396 spin_unlock_bh(&local->filter_lock); 425 spin_unlock_bh(&local->filter_lock);
397 netif_addr_unlock_bh(dev); 426 netif_addr_unlock_bh(sdata->dev);
398 427
399 ieee80211_configure_filter(local); 428 ieee80211_configure_filter(local);
400 429
@@ -406,11 +435,21 @@ static int ieee80211_stop(struct net_device *dev)
406 struct ieee80211_sub_if_data *vlan, *tmpsdata; 435 struct ieee80211_sub_if_data *vlan, *tmpsdata;
407 struct beacon_data *old_beacon = sdata->u.ap.beacon; 436 struct beacon_data *old_beacon = sdata->u.ap.beacon;
408 437
438 /* sdata_running will return false, so this will disable */
439 ieee80211_bss_info_change_notify(sdata,
440 BSS_CHANGED_BEACON_ENABLED);
441
409 /* remove beacon */ 442 /* remove beacon */
410 rcu_assign_pointer(sdata->u.ap.beacon, NULL); 443 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
411 synchronize_rcu(); 444 synchronize_rcu();
412 kfree(old_beacon); 445 kfree(old_beacon);
413 446
447 /* free all potentially still buffered bcast frames */
448 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
449 local->total_ps_buffered--;
450 dev_kfree_skb(skb);
451 }
452
414 /* down all dependent devices, that is VLANs */ 453 /* down all dependent devices, that is VLANs */
415 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, 454 list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
416 u.vlan.list) 455 u.vlan.list)
@@ -418,7 +457,8 @@ static int ieee80211_stop(struct net_device *dev)
418 WARN_ON(!list_empty(&sdata->u.ap.vlans)); 457 WARN_ON(!list_empty(&sdata->u.ap.vlans));
419 } 458 }
420 459
421 local->open_count--; 460 if (going_down)
461 local->open_count--;
422 462
423 switch (sdata->vif.type) { 463 switch (sdata->vif.type) {
424 case NL80211_IFTYPE_AP_VLAN: 464 case NL80211_IFTYPE_AP_VLAN:
@@ -450,27 +490,6 @@ static int ieee80211_stop(struct net_device *dev)
450 490
451 ieee80211_configure_filter(local); 491 ieee80211_configure_filter(local);
452 break; 492 break;
453 case NL80211_IFTYPE_STATION:
454 del_timer_sync(&sdata->u.mgd.chswitch_timer);
455 del_timer_sync(&sdata->u.mgd.timer);
456 del_timer_sync(&sdata->u.mgd.conn_mon_timer);
457 del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
458 /*
459 * If any of the timers fired while we waited for it, it will
460 * have queued its work. Now the work will be running again
461 * but will not rearm the timer again because it checks
462 * whether the interface is running, which, at this point,
463 * it no longer is.
464 */
465 cancel_work_sync(&sdata->u.mgd.chswitch_work);
466 cancel_work_sync(&sdata->u.mgd.monitor_work);
467 cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work);
468
469 /* fall through */
470 case NL80211_IFTYPE_ADHOC:
471 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
472 del_timer_sync(&sdata->u.ibss.timer);
473 /* fall through */
474 case NL80211_IFTYPE_MESH_POINT: 493 case NL80211_IFTYPE_MESH_POINT:
475 if (ieee80211_vif_is_mesh(&sdata->vif)) { 494 if (ieee80211_vif_is_mesh(&sdata->vif)) {
476 /* other_bss and allmulti are always set on mesh 495 /* other_bss and allmulti are always set on mesh
@@ -498,27 +517,34 @@ static int ieee80211_stop(struct net_device *dev)
498 ieee80211_scan_cancel(local); 517 ieee80211_scan_cancel(local);
499 518
500 /* 519 /*
501 * Disable beaconing for AP and mesh, IBSS can't 520 * Disable beaconing here for mesh only, AP and IBSS
502 * still be joined to a network at this point. 521 * are already taken care of.
503 */ 522 */
504 if (sdata->vif.type == NL80211_IFTYPE_AP || 523 if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
505 sdata->vif.type == NL80211_IFTYPE_MESH_POINT) {
506 ieee80211_bss_info_change_notify(sdata, 524 ieee80211_bss_info_change_notify(sdata,
507 BSS_CHANGED_BEACON_ENABLED); 525 BSS_CHANGED_BEACON_ENABLED);
508 }
509 526
510 /* free all remaining keys, there shouldn't be any */ 527 /*
528 * Free all remaining keys, there shouldn't be any,
529 * except maybe group keys in AP more or WDS?
530 */
511 ieee80211_free_keys(sdata); 531 ieee80211_free_keys(sdata);
512 drv_remove_interface(local, &sdata->vif); 532
533 if (going_down)
534 drv_remove_interface(local, &sdata->vif);
513 } 535 }
514 536
515 sdata->bss = NULL; 537 sdata->bss = NULL;
516 538
539 mutex_lock(&local->mtx);
517 hw_reconf_flags |= __ieee80211_recalc_idle(local); 540 hw_reconf_flags |= __ieee80211_recalc_idle(local);
541 mutex_unlock(&local->mtx);
518 542
519 ieee80211_recalc_ps(local, -1); 543 ieee80211_recalc_ps(local, -1);
520 544
521 if (local->open_count == 0) { 545 if (local->open_count == 0) {
546 if (local->ops->napi_poll)
547 napi_disable(&local->napi);
522 ieee80211_clear_tx_pending(local); 548 ieee80211_clear_tx_pending(local);
523 ieee80211_stop_device(local); 549 ieee80211_stop_device(local);
524 550
@@ -541,6 +567,13 @@ static int ieee80211_stop(struct net_device *dev)
541 } 567 }
542 } 568 }
543 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 569 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
570}
571
572static int ieee80211_stop(struct net_device *dev)
573{
574 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
575
576 ieee80211_do_stop(sdata, true);
544 577
545 return 0; 578 return 0;
546} 579}
@@ -585,8 +618,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
585{ 618{
586 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 619 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
587 struct ieee80211_local *local = sdata->local; 620 struct ieee80211_local *local = sdata->local;
588 struct beacon_data *beacon;
589 struct sk_buff *skb;
590 int flushed; 621 int flushed;
591 int i; 622 int i;
592 623
@@ -599,37 +630,8 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
599 __skb_queue_purge(&sdata->fragments[i].skb_list); 630 __skb_queue_purge(&sdata->fragments[i].skb_list);
600 sdata->fragment_next = 0; 631 sdata->fragment_next = 0;
601 632
602 switch (sdata->vif.type) { 633 if (ieee80211_vif_is_mesh(&sdata->vif))
603 case NL80211_IFTYPE_AP: 634 mesh_rmc_free(sdata);
604 beacon = sdata->u.ap.beacon;
605 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
606 synchronize_rcu();
607 kfree(beacon);
608
609 while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
610 local->total_ps_buffered--;
611 dev_kfree_skb(skb);
612 }
613
614 break;
615 case NL80211_IFTYPE_MESH_POINT:
616 if (ieee80211_vif_is_mesh(&sdata->vif))
617 mesh_rmc_free(sdata);
618 break;
619 case NL80211_IFTYPE_ADHOC:
620 if (WARN_ON(sdata->u.ibss.presp))
621 kfree_skb(sdata->u.ibss.presp);
622 break;
623 case NL80211_IFTYPE_STATION:
624 case NL80211_IFTYPE_WDS:
625 case NL80211_IFTYPE_AP_VLAN:
626 case NL80211_IFTYPE_MONITOR:
627 break;
628 case NL80211_IFTYPE_UNSPECIFIED:
629 case __NL80211_IFTYPE_AFTER_LAST:
630 BUG();
631 break;
632 }
633 635
634 flushed = sta_info_flush(local, sdata); 636 flushed = sta_info_flush(local, sdata);
635 WARN_ON(flushed); 637 WARN_ON(flushed);
@@ -844,9 +846,13 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
844 846
845 /* and set some type-dependent values */ 847 /* and set some type-dependent values */
846 sdata->vif.type = type; 848 sdata->vif.type = type;
849 sdata->vif.p2p = false;
847 sdata->dev->netdev_ops = &ieee80211_dataif_ops; 850 sdata->dev->netdev_ops = &ieee80211_dataif_ops;
848 sdata->wdev.iftype = type; 851 sdata->wdev.iftype = type;
849 852
853 sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE);
854 sdata->control_port_no_encrypt = false;
855
850 /* only monitor differs */ 856 /* only monitor differs */
851 sdata->dev->type = ARPHRD_ETHER; 857 sdata->dev->type = ARPHRD_ETHER;
852 858
@@ -854,10 +860,20 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
854 INIT_WORK(&sdata->work, ieee80211_iface_work); 860 INIT_WORK(&sdata->work, ieee80211_iface_work);
855 861
856 switch (type) { 862 switch (type) {
863 case NL80211_IFTYPE_P2P_GO:
864 type = NL80211_IFTYPE_AP;
865 sdata->vif.type = type;
866 sdata->vif.p2p = true;
867 /* fall through */
857 case NL80211_IFTYPE_AP: 868 case NL80211_IFTYPE_AP:
858 skb_queue_head_init(&sdata->u.ap.ps_bc_buf); 869 skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
859 INIT_LIST_HEAD(&sdata->u.ap.vlans); 870 INIT_LIST_HEAD(&sdata->u.ap.vlans);
860 break; 871 break;
872 case NL80211_IFTYPE_P2P_CLIENT:
873 type = NL80211_IFTYPE_STATION;
874 sdata->vif.type = type;
875 sdata->vif.p2p = true;
876 /* fall through */
861 case NL80211_IFTYPE_STATION: 877 case NL80211_IFTYPE_STATION:
862 ieee80211_sta_setup_sdata(sdata); 878 ieee80211_sta_setup_sdata(sdata);
863 break; 879 break;
@@ -878,7 +894,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
878 case NL80211_IFTYPE_AP_VLAN: 894 case NL80211_IFTYPE_AP_VLAN:
879 break; 895 break;
880 case NL80211_IFTYPE_UNSPECIFIED: 896 case NL80211_IFTYPE_UNSPECIFIED:
881 case __NL80211_IFTYPE_AFTER_LAST: 897 case NUM_NL80211_IFTYPES:
882 BUG(); 898 BUG();
883 break; 899 break;
884 } 900 }
@@ -886,12 +902,85 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
886 ieee80211_debugfs_add_netdev(sdata); 902 ieee80211_debugfs_add_netdev(sdata);
887} 903}
888 904
905static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
906 enum nl80211_iftype type)
907{
908 struct ieee80211_local *local = sdata->local;
909 int ret, err;
910 enum nl80211_iftype internal_type = type;
911 bool p2p = false;
912
913 ASSERT_RTNL();
914
915 if (!local->ops->change_interface)
916 return -EBUSY;
917
918 switch (sdata->vif.type) {
919 case NL80211_IFTYPE_AP:
920 case NL80211_IFTYPE_STATION:
921 case NL80211_IFTYPE_ADHOC:
922 /*
923 * Could maybe also all others here?
924 * Just not sure how that interacts
925 * with the RX/config path e.g. for
926 * mesh.
927 */
928 break;
929 default:
930 return -EBUSY;
931 }
932
933 switch (type) {
934 case NL80211_IFTYPE_AP:
935 case NL80211_IFTYPE_STATION:
936 case NL80211_IFTYPE_ADHOC:
937 /*
938 * Could probably support everything
939 * but WDS here (WDS do_open can fail
940 * under memory pressure, which this
941 * code isn't prepared to handle).
942 */
943 break;
944 case NL80211_IFTYPE_P2P_CLIENT:
945 p2p = true;
946 internal_type = NL80211_IFTYPE_STATION;
947 break;
948 case NL80211_IFTYPE_P2P_GO:
949 p2p = true;
950 internal_type = NL80211_IFTYPE_AP;
951 break;
952 default:
953 return -EBUSY;
954 }
955
956 ret = ieee80211_check_concurrent_iface(sdata, internal_type);
957 if (ret)
958 return ret;
959
960 ieee80211_do_stop(sdata, false);
961
962 ieee80211_teardown_sdata(sdata->dev);
963
964 ret = drv_change_interface(local, sdata, internal_type, p2p);
965 if (ret)
966 type = sdata->vif.type;
967
968 ieee80211_setup_sdata(sdata, type);
969
970 err = ieee80211_do_open(sdata->dev, false);
971 WARN(err, "type change: do_open returned %d", err);
972
973 return ret;
974}
975
889int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, 976int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
890 enum nl80211_iftype type) 977 enum nl80211_iftype type)
891{ 978{
979 int ret;
980
892 ASSERT_RTNL(); 981 ASSERT_RTNL();
893 982
894 if (type == sdata->vif.type) 983 if (type == ieee80211_vif_type_p2p(&sdata->vif))
895 return 0; 984 return 0;
896 985
897 /* Setting ad-hoc mode on non-IBSS channel is not supported. */ 986 /* Setting ad-hoc mode on non-IBSS channel is not supported. */
@@ -899,18 +988,15 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
899 type == NL80211_IFTYPE_ADHOC) 988 type == NL80211_IFTYPE_ADHOC)
900 return -EOPNOTSUPP; 989 return -EOPNOTSUPP;
901 990
902 /* 991 if (ieee80211_sdata_running(sdata)) {
903 * We could, here, on changes between IBSS/STA/MESH modes, 992 ret = ieee80211_runtime_change_iftype(sdata, type);
904 * invoke an MLME function instead that disassociates etc. 993 if (ret)
905 * and goes into the requested mode. 994 return ret;
906 */ 995 } else {
907 996 /* Purge and reset type-dependent state. */
908 if (ieee80211_sdata_running(sdata)) 997 ieee80211_teardown_sdata(sdata->dev);
909 return -EBUSY; 998 ieee80211_setup_sdata(sdata, type);
910 999 }
911 /* Purge and reset type-dependent state. */
912 ieee80211_teardown_sdata(sdata->dev);
913 ieee80211_setup_sdata(sdata, type);
914 1000
915 /* reset some values that shouldn't be kept across type changes */ 1001 /* reset some values that shouldn't be kept across type changes */
916 sdata->vif.bss_conf.basic_rates = 1002 sdata->vif.bss_conf.basic_rates =
@@ -1167,8 +1253,7 @@ static u32 ieee80211_idle_off(struct ieee80211_local *local,
1167 return 0; 1253 return 0;
1168 1254
1169#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1255#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1170 printk(KERN_DEBUG "%s: device no longer idle - %s\n", 1256 wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason);
1171 wiphy_name(local->hw.wiphy), reason);
1172#endif 1257#endif
1173 1258
1174 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE; 1259 local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
@@ -1181,8 +1266,7 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
1181 return 0; 1266 return 0;
1182 1267
1183#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1268#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1184 printk(KERN_DEBUG "%s: device now idle\n", 1269 wiphy_debug(local->hw.wiphy, "device now idle\n");
1185 wiphy_name(local->hw.wiphy));
1186#endif 1270#endif
1187 1271
1188 drv_flush(local, false); 1272 drv_flush(local, false);
@@ -1195,28 +1279,61 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
1195{ 1279{
1196 struct ieee80211_sub_if_data *sdata; 1280 struct ieee80211_sub_if_data *sdata;
1197 int count = 0; 1281 int count = 0;
1282 bool working = false, scanning = false;
1283 struct ieee80211_work *wk;
1198 1284
1199 if (!list_empty(&local->work_list)) 1285#ifdef CONFIG_PROVE_LOCKING
1200 return ieee80211_idle_off(local, "working"); 1286 WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
1201 1287 !lockdep_is_held(&local->iflist_mtx));
1202 if (local->scanning) 1288#endif
1203 return ieee80211_idle_off(local, "scanning"); 1289 lockdep_assert_held(&local->mtx);
1204 1290
1205 list_for_each_entry(sdata, &local->interfaces, list) { 1291 list_for_each_entry(sdata, &local->interfaces, list) {
1206 if (!ieee80211_sdata_running(sdata)) 1292 if (!ieee80211_sdata_running(sdata)) {
1293 sdata->vif.bss_conf.idle = true;
1207 continue; 1294 continue;
1295 }
1296
1297 sdata->old_idle = sdata->vif.bss_conf.idle;
1298
1208 /* do not count disabled managed interfaces */ 1299 /* do not count disabled managed interfaces */
1209 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1300 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1210 !sdata->u.mgd.associated) 1301 !sdata->u.mgd.associated) {
1302 sdata->vif.bss_conf.idle = true;
1211 continue; 1303 continue;
1304 }
1212 /* do not count unused IBSS interfaces */ 1305 /* do not count unused IBSS interfaces */
1213 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && 1306 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
1214 !sdata->u.ibss.ssid_len) 1307 !sdata->u.ibss.ssid_len) {
1308 sdata->vif.bss_conf.idle = true;
1215 continue; 1309 continue;
1310 }
1216 /* count everything else */ 1311 /* count everything else */
1217 count++; 1312 count++;
1218 } 1313 }
1219 1314
1315 list_for_each_entry(wk, &local->work_list, list) {
1316 working = true;
1317 wk->sdata->vif.bss_conf.idle = false;
1318 }
1319
1320 if (local->scan_sdata) {
1321 scanning = true;
1322 local->scan_sdata->vif.bss_conf.idle = false;
1323 }
1324
1325 list_for_each_entry(sdata, &local->interfaces, list) {
1326 if (sdata->old_idle == sdata->vif.bss_conf.idle)
1327 continue;
1328 if (!ieee80211_sdata_running(sdata))
1329 continue;
1330 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
1331 }
1332
1333 if (working)
1334 return ieee80211_idle_off(local, "working");
1335 if (scanning)
1336 return ieee80211_idle_off(local, "scanning");
1220 if (!count) 1337 if (!count)
1221 return ieee80211_idle_on(local); 1338 return ieee80211_idle_on(local);
1222 else 1339 else
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 1b9d87ed143a..6a63d1abd14d 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -49,7 +49,7 @@ static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
49 49
50static void assert_key_lock(struct ieee80211_local *local) 50static void assert_key_lock(struct ieee80211_local *local)
51{ 51{
52 WARN_ON(!mutex_is_locked(&local->key_mtx)); 52 lockdep_assert_held(&local->key_mtx);
53} 53}
54 54
55static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key) 55static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
@@ -60,7 +60,7 @@ static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
60 return NULL; 60 return NULL;
61} 61}
62 62
63static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) 63static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
64{ 64{
65 struct ieee80211_sub_if_data *sdata; 65 struct ieee80211_sub_if_data *sdata;
66 struct ieee80211_sta *sta; 66 struct ieee80211_sta *sta;
@@ -68,8 +68,10 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
68 68
69 might_sleep(); 69 might_sleep();
70 70
71 if (!key->local->ops->set_key) 71 if (!key->local->ops->set_key) {
72 return; 72 ret = -EOPNOTSUPP;
73 goto out_unsupported;
74 }
73 75
74 assert_key_lock(key->local); 76 assert_key_lock(key->local);
75 77
@@ -87,10 +89,27 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
87 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 89 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
88 90
89 if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP) 91 if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP)
90 printk(KERN_ERR "mac80211-%s: failed to set key " 92 wiphy_err(key->local->hw.wiphy,
91 "(%d, %pM) to hardware (%d)\n", 93 "failed to set key (%d, %pM) to hardware (%d)\n",
92 wiphy_name(key->local->hw.wiphy), 94 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
93 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); 95
96out_unsupported:
97 if (ret) {
98 switch (key->conf.cipher) {
99 case WLAN_CIPHER_SUITE_WEP40:
100 case WLAN_CIPHER_SUITE_WEP104:
101 case WLAN_CIPHER_SUITE_TKIP:
102 case WLAN_CIPHER_SUITE_CCMP:
103 case WLAN_CIPHER_SUITE_AES_CMAC:
104 /* all of these we can do in software */
105 ret = 0;
106 break;
107 default:
108 ret = -EINVAL;
109 }
110 }
111
112 return ret;
94} 113}
95 114
96static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) 115static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
@@ -121,10 +140,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
121 sta, &key->conf); 140 sta, &key->conf);
122 141
123 if (ret) 142 if (ret)
124 printk(KERN_ERR "mac80211-%s: failed to remove key " 143 wiphy_err(key->local->hw.wiphy,
125 "(%d, %pM) from hardware (%d)\n", 144 "failed to remove key (%d, %pM) from hardware (%d)\n",
126 wiphy_name(key->local->hw.wiphy), 145 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
127 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
128 146
129 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 147 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
130} 148}
@@ -227,20 +245,18 @@ static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
227 } 245 }
228} 246}
229 247
230struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, 248struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
231 int idx,
232 size_t key_len,
233 const u8 *key_data, 249 const u8 *key_data,
234 size_t seq_len, const u8 *seq) 250 size_t seq_len, const u8 *seq)
235{ 251{
236 struct ieee80211_key *key; 252 struct ieee80211_key *key;
237 int i, j; 253 int i, j, err;
238 254
239 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS); 255 BUG_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS);
240 256
241 key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL); 257 key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL);
242 if (!key) 258 if (!key)
243 return NULL; 259 return ERR_PTR(-ENOMEM);
244 260
245 /* 261 /*
246 * Default to software encryption; we'll later upload the 262 * Default to software encryption; we'll later upload the
@@ -249,15 +265,16 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
249 key->conf.flags = 0; 265 key->conf.flags = 0;
250 key->flags = 0; 266 key->flags = 0;
251 267
252 key->conf.alg = alg; 268 key->conf.cipher = cipher;
253 key->conf.keyidx = idx; 269 key->conf.keyidx = idx;
254 key->conf.keylen = key_len; 270 key->conf.keylen = key_len;
255 switch (alg) { 271 switch (cipher) {
256 case ALG_WEP: 272 case WLAN_CIPHER_SUITE_WEP40:
273 case WLAN_CIPHER_SUITE_WEP104:
257 key->conf.iv_len = WEP_IV_LEN; 274 key->conf.iv_len = WEP_IV_LEN;
258 key->conf.icv_len = WEP_ICV_LEN; 275 key->conf.icv_len = WEP_ICV_LEN;
259 break; 276 break;
260 case ALG_TKIP: 277 case WLAN_CIPHER_SUITE_TKIP:
261 key->conf.iv_len = TKIP_IV_LEN; 278 key->conf.iv_len = TKIP_IV_LEN;
262 key->conf.icv_len = TKIP_ICV_LEN; 279 key->conf.icv_len = TKIP_ICV_LEN;
263 if (seq) { 280 if (seq) {
@@ -269,7 +286,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
269 } 286 }
270 } 287 }
271 break; 288 break;
272 case ALG_CCMP: 289 case WLAN_CIPHER_SUITE_CCMP:
273 key->conf.iv_len = CCMP_HDR_LEN; 290 key->conf.iv_len = CCMP_HDR_LEN;
274 key->conf.icv_len = CCMP_MIC_LEN; 291 key->conf.icv_len = CCMP_MIC_LEN;
275 if (seq) { 292 if (seq) {
@@ -278,42 +295,38 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
278 key->u.ccmp.rx_pn[i][j] = 295 key->u.ccmp.rx_pn[i][j] =
279 seq[CCMP_PN_LEN - j - 1]; 296 seq[CCMP_PN_LEN - j - 1];
280 } 297 }
281 break;
282 case ALG_AES_CMAC:
283 key->conf.iv_len = 0;
284 key->conf.icv_len = sizeof(struct ieee80211_mmie);
285 if (seq)
286 for (j = 0; j < 6; j++)
287 key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1];
288 break;
289 }
290 memcpy(key->conf.key, key_data, key_len);
291 INIT_LIST_HEAD(&key->list);
292
293 if (alg == ALG_CCMP) {
294 /* 298 /*
295 * Initialize AES key state here as an optimization so that 299 * Initialize AES key state here as an optimization so that
296 * it does not need to be initialized for every packet. 300 * it does not need to be initialized for every packet.
297 */ 301 */
298 key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data); 302 key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt(key_data);
299 if (!key->u.ccmp.tfm) { 303 if (IS_ERR(key->u.ccmp.tfm)) {
304 err = PTR_ERR(key->u.ccmp.tfm);
300 kfree(key); 305 kfree(key);
301 return NULL; 306 key = ERR_PTR(err);
302 } 307 }
303 } 308 break;
304 309 case WLAN_CIPHER_SUITE_AES_CMAC:
305 if (alg == ALG_AES_CMAC) { 310 key->conf.iv_len = 0;
311 key->conf.icv_len = sizeof(struct ieee80211_mmie);
312 if (seq)
313 for (j = 0; j < 6; j++)
314 key->u.aes_cmac.rx_pn[j] = seq[6 - j - 1];
306 /* 315 /*
307 * Initialize AES key state here as an optimization so that 316 * Initialize AES key state here as an optimization so that
308 * it does not need to be initialized for every packet. 317 * it does not need to be initialized for every packet.
309 */ 318 */
310 key->u.aes_cmac.tfm = 319 key->u.aes_cmac.tfm =
311 ieee80211_aes_cmac_key_setup(key_data); 320 ieee80211_aes_cmac_key_setup(key_data);
312 if (!key->u.aes_cmac.tfm) { 321 if (IS_ERR(key->u.aes_cmac.tfm)) {
322 err = PTR_ERR(key->u.aes_cmac.tfm);
313 kfree(key); 323 kfree(key);
314 return NULL; 324 key = ERR_PTR(err);
315 } 325 }
326 break;
316 } 327 }
328 memcpy(key->conf.key, key_data, key_len);
329 INIT_LIST_HEAD(&key->list);
317 330
318 return key; 331 return key;
319} 332}
@@ -326,9 +339,9 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
326 if (key->local) 339 if (key->local)
327 ieee80211_key_disable_hw_accel(key); 340 ieee80211_key_disable_hw_accel(key);
328 341
329 if (key->conf.alg == ALG_CCMP) 342 if (key->conf.cipher == WLAN_CIPHER_SUITE_CCMP)
330 ieee80211_aes_key_free(key->u.ccmp.tfm); 343 ieee80211_aes_key_free(key->u.ccmp.tfm);
331 if (key->conf.alg == ALG_AES_CMAC) 344 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
332 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); 345 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
333 if (key->local) 346 if (key->local)
334 ieee80211_debugfs_key_remove(key); 347 ieee80211_debugfs_key_remove(key);
@@ -336,12 +349,12 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
336 kfree(key); 349 kfree(key);
337} 350}
338 351
339void ieee80211_key_link(struct ieee80211_key *key, 352int ieee80211_key_link(struct ieee80211_key *key,
340 struct ieee80211_sub_if_data *sdata, 353 struct ieee80211_sub_if_data *sdata,
341 struct sta_info *sta) 354 struct sta_info *sta)
342{ 355{
343 struct ieee80211_key *old_key; 356 struct ieee80211_key *old_key;
344 int idx; 357 int idx, ret;
345 358
346 BUG_ON(!sdata); 359 BUG_ON(!sdata);
347 BUG_ON(!key); 360 BUG_ON(!key);
@@ -396,9 +409,11 @@ void ieee80211_key_link(struct ieee80211_key *key,
396 409
397 ieee80211_debugfs_key_add(key); 410 ieee80211_debugfs_key_add(key);
398 411
399 ieee80211_key_enable_hw_accel(key); 412 ret = ieee80211_key_enable_hw_accel(key);
400 413
401 mutex_unlock(&sdata->local->key_mtx); 414 mutex_unlock(&sdata->local->key_mtx);
415
416 return ret;
402} 417}
403 418
404static void __ieee80211_key_free(struct ieee80211_key *key) 419static void __ieee80211_key_free(struct ieee80211_key *key)
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index b665bbb7a471..cb9a4a65cc68 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -123,18 +123,16 @@ struct ieee80211_key {
123 struct ieee80211_key_conf conf; 123 struct ieee80211_key_conf conf;
124}; 124};
125 125
126struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, 126struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
127 int idx,
128 size_t key_len,
129 const u8 *key_data, 127 const u8 *key_data,
130 size_t seq_len, const u8 *seq); 128 size_t seq_len, const u8 *seq);
131/* 129/*
132 * Insert a key into data structures (sdata, sta if necessary) 130 * Insert a key into data structures (sdata, sta if necessary)
133 * to make it used, free old key. 131 * to make it used, free old key.
134 */ 132 */
135void ieee80211_key_link(struct ieee80211_key *key, 133int __must_check ieee80211_key_link(struct ieee80211_key *key,
136 struct ieee80211_sub_if_data *sdata, 134 struct ieee80211_sub_if_data *sdata,
137 struct sta_info *sta); 135 struct sta_info *sta);
138void ieee80211_key_free(struct ieee80211_local *local, 136void ieee80211_key_free(struct ieee80211_local *local,
139 struct ieee80211_key *key); 137 struct ieee80211_key *key);
140void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx); 138void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index ded5c3843e06..fda97bb0018b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -99,11 +99,13 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
99 int ret = 0; 99 int ret = 0;
100 int power; 100 int power;
101 enum nl80211_channel_type channel_type; 101 enum nl80211_channel_type channel_type;
102 u32 offchannel_flag;
102 103
103 might_sleep(); 104 might_sleep();
104 105
105 scan_chan = local->scan_channel; 106 scan_chan = local->scan_channel;
106 107
108 offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
107 if (scan_chan) { 109 if (scan_chan) {
108 chan = scan_chan; 110 chan = scan_chan;
109 channel_type = NL80211_CHAN_NO_HT; 111 channel_type = NL80211_CHAN_NO_HT;
@@ -117,8 +119,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
117 channel_type = local->_oper_channel_type; 119 channel_type = local->_oper_channel_type;
118 local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL; 120 local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
119 } 121 }
122 offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
120 123
121 if (chan != local->hw.conf.channel || 124 if (offchannel_flag || chan != local->hw.conf.channel ||
122 channel_type != local->hw.conf.channel_type) { 125 channel_type != local->hw.conf.channel_type) {
123 local->hw.conf.channel = chan; 126 local->hw.conf.channel = chan;
124 local->hw.conf.channel_type = channel_type; 127 local->hw.conf.channel_type = channel_type;
@@ -302,7 +305,16 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
302 305
303 trace_api_restart_hw(local); 306 trace_api_restart_hw(local);
304 307
305 /* use this reason, __ieee80211_resume will unblock it */ 308 /* wait for scan work complete */
309 flush_workqueue(local->workqueue);
310
311 WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
312 "%s called with hardware scan in progress\n", __func__);
313
314 if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)))
315 ieee80211_scan_cancel(local);
316
317 /* use this reason, ieee80211_reconfig will unblock it */
306 ieee80211_stop_queues_by_reason(hw, 318 ieee80211_stop_queues_by_reason(hw,
307 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 319 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
308 320
@@ -336,9 +348,6 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
336 struct ieee80211_if_managed *ifmgd; 348 struct ieee80211_if_managed *ifmgd;
337 int c = 0; 349 int c = 0;
338 350
339 if (!netif_running(ndev))
340 return NOTIFY_DONE;
341
342 /* Make sure it's our interface that got changed */ 351 /* Make sure it's our interface that got changed */
343 if (!wdev) 352 if (!wdev)
344 return NOTIFY_DONE; 353 return NOTIFY_DONE;
@@ -349,11 +358,14 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
349 sdata = IEEE80211_DEV_TO_SUB_IF(ndev); 358 sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
350 bss_conf = &sdata->vif.bss_conf; 359 bss_conf = &sdata->vif.bss_conf;
351 360
361 if (!ieee80211_sdata_running(sdata))
362 return NOTIFY_DONE;
363
352 /* ARP filtering is only supported in managed mode */ 364 /* ARP filtering is only supported in managed mode */
353 if (sdata->vif.type != NL80211_IFTYPE_STATION) 365 if (sdata->vif.type != NL80211_IFTYPE_STATION)
354 return NOTIFY_DONE; 366 return NOTIFY_DONE;
355 367
356 idev = sdata->dev->ip_ptr; 368 idev = __in_dev_get_rtnl(sdata->dev);
357 if (!idev) 369 if (!idev)
358 return NOTIFY_DONE; 370 return NOTIFY_DONE;
359 371
@@ -390,6 +402,80 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
390} 402}
391#endif 403#endif
392 404
405static int ieee80211_napi_poll(struct napi_struct *napi, int budget)
406{
407 struct ieee80211_local *local =
408 container_of(napi, struct ieee80211_local, napi);
409
410 return local->ops->napi_poll(&local->hw, budget);
411}
412
413void ieee80211_napi_schedule(struct ieee80211_hw *hw)
414{
415 struct ieee80211_local *local = hw_to_local(hw);
416
417 napi_schedule(&local->napi);
418}
419EXPORT_SYMBOL(ieee80211_napi_schedule);
420
421void ieee80211_napi_complete(struct ieee80211_hw *hw)
422{
423 struct ieee80211_local *local = hw_to_local(hw);
424
425 napi_complete(&local->napi);
426}
427EXPORT_SYMBOL(ieee80211_napi_complete);
428
429/* There isn't a lot of sense in it, but you can transmit anything you like */
430static const struct ieee80211_txrx_stypes
431ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
432 [NL80211_IFTYPE_ADHOC] = {
433 .tx = 0xffff,
434 .rx = BIT(IEEE80211_STYPE_ACTION >> 4),
435 },
436 [NL80211_IFTYPE_STATION] = {
437 .tx = 0xffff,
438 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
439 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
440 },
441 [NL80211_IFTYPE_AP] = {
442 .tx = 0xffff,
443 .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
444 BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
445 BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
446 BIT(IEEE80211_STYPE_DISASSOC >> 4) |
447 BIT(IEEE80211_STYPE_AUTH >> 4) |
448 BIT(IEEE80211_STYPE_DEAUTH >> 4) |
449 BIT(IEEE80211_STYPE_ACTION >> 4),
450 },
451 [NL80211_IFTYPE_AP_VLAN] = {
452 /* copy AP */
453 .tx = 0xffff,
454 .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
455 BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
456 BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
457 BIT(IEEE80211_STYPE_DISASSOC >> 4) |
458 BIT(IEEE80211_STYPE_AUTH >> 4) |
459 BIT(IEEE80211_STYPE_DEAUTH >> 4) |
460 BIT(IEEE80211_STYPE_ACTION >> 4),
461 },
462 [NL80211_IFTYPE_P2P_CLIENT] = {
463 .tx = 0xffff,
464 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
465 BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
466 },
467 [NL80211_IFTYPE_P2P_GO] = {
468 .tx = 0xffff,
469 .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
470 BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
471 BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
472 BIT(IEEE80211_STYPE_DISASSOC >> 4) |
473 BIT(IEEE80211_STYPE_AUTH >> 4) |
474 BIT(IEEE80211_STYPE_DEAUTH >> 4) |
475 BIT(IEEE80211_STYPE_ACTION >> 4),
476 },
477};
478
393struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 479struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
394 const struct ieee80211_ops *ops) 480 const struct ieee80211_ops *ops)
395{ 481{
@@ -419,6 +505,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
419 if (!wiphy) 505 if (!wiphy)
420 return NULL; 506 return NULL;
421 507
508 wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes;
509
422 wiphy->flags |= WIPHY_FLAG_NETNS_OK | 510 wiphy->flags |= WIPHY_FLAG_NETNS_OK |
423 WIPHY_FLAG_4ADDR_AP | 511 WIPHY_FLAG_4ADDR_AP |
424 WIPHY_FLAG_4ADDR_STATION; 512 WIPHY_FLAG_4ADDR_STATION;
@@ -455,7 +543,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
455 __hw_addr_init(&local->mc_list); 543 __hw_addr_init(&local->mc_list);
456 544
457 mutex_init(&local->iflist_mtx); 545 mutex_init(&local->iflist_mtx);
458 mutex_init(&local->scan_mtx); 546 mutex_init(&local->mtx);
459 547
460 mutex_init(&local->key_mtx); 548 mutex_init(&local->key_mtx);
461 spin_lock_init(&local->filter_lock); 549 spin_lock_init(&local->filter_lock);
@@ -494,6 +582,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
494 skb_queue_head_init(&local->skb_queue); 582 skb_queue_head_init(&local->skb_queue);
495 skb_queue_head_init(&local->skb_queue_unreliable); 583 skb_queue_head_init(&local->skb_queue_unreliable);
496 584
585 /* init dummy netdev for use w/ NAPI */
586 init_dummy_netdev(&local->napi_dev);
587
497 return local_to_hw(local); 588 return local_to_hw(local);
498} 589}
499EXPORT_SYMBOL(ieee80211_alloc_hw); 590EXPORT_SYMBOL(ieee80211_alloc_hw);
@@ -506,6 +597,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
506 int channels, max_bitrates; 597 int channels, max_bitrates;
507 bool supp_ht; 598 bool supp_ht;
508 static const u32 cipher_suites[] = { 599 static const u32 cipher_suites[] = {
600 /* keep WEP first, it may be removed below */
509 WLAN_CIPHER_SUITE_WEP40, 601 WLAN_CIPHER_SUITE_WEP40,
510 WLAN_CIPHER_SUITE_WEP104, 602 WLAN_CIPHER_SUITE_WEP104,
511 WLAN_CIPHER_SUITE_TKIP, 603 WLAN_CIPHER_SUITE_TKIP,
@@ -554,6 +646,14 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
554 /* mac80211 always supports monitor */ 646 /* mac80211 always supports monitor */
555 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); 647 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
556 648
649#ifndef CONFIG_MAC80211_MESH
650 /* mesh depends on Kconfig, but drivers should set it if they want */
651 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
652#endif
653
654 /* mac80211 supports control port protocol changing */
655 local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
656
557 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 657 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
558 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 658 local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
559 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) 659 else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
@@ -589,10 +689,41 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
589 if (local->hw.wiphy->max_scan_ie_len) 689 if (local->hw.wiphy->max_scan_ie_len)
590 local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; 690 local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
591 691
592 local->hw.wiphy->cipher_suites = cipher_suites; 692 /* Set up cipher suites unless driver already did */
593 local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 693 if (!local->hw.wiphy->cipher_suites) {
594 if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE)) 694 local->hw.wiphy->cipher_suites = cipher_suites;
595 local->hw.wiphy->n_cipher_suites--; 695 local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
696 if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE))
697 local->hw.wiphy->n_cipher_suites--;
698 }
699 if (IS_ERR(local->wep_tx_tfm) || IS_ERR(local->wep_rx_tfm)) {
700 if (local->hw.wiphy->cipher_suites == cipher_suites) {
701 local->hw.wiphy->cipher_suites += 2;
702 local->hw.wiphy->n_cipher_suites -= 2;
703 } else {
704 u32 *suites;
705 int r, w = 0;
706
707 /* Filter out WEP */
708
709 suites = kmemdup(
710 local->hw.wiphy->cipher_suites,
711 sizeof(u32) * local->hw.wiphy->n_cipher_suites,
712 GFP_KERNEL);
713 if (!suites)
714 return -ENOMEM;
715 for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
716 u32 suite = local->hw.wiphy->cipher_suites[r];
717 if (suite == WLAN_CIPHER_SUITE_WEP40 ||
718 suite == WLAN_CIPHER_SUITE_WEP104)
719 continue;
720 suites[w++] = suite;
721 }
722 local->hw.wiphy->cipher_suites = suites;
723 local->hw.wiphy->n_cipher_suites = w;
724 local->wiphy_ciphers_allocated = true;
725 }
726 }
596 727
597 result = wiphy_register(local->hw.wiphy); 728 result = wiphy_register(local->hw.wiphy);
598 if (result < 0) 729 if (result < 0)
@@ -641,16 +772,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
641 772
642 result = ieee80211_wep_init(local); 773 result = ieee80211_wep_init(local);
643 if (result < 0) 774 if (result < 0)
644 printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", 775 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
645 wiphy_name(local->hw.wiphy), result); 776 result);
646 777
647 rtnl_lock(); 778 rtnl_lock();
648 779
649 result = ieee80211_init_rate_ctrl_alg(local, 780 result = ieee80211_init_rate_ctrl_alg(local,
650 hw->rate_control_algorithm); 781 hw->rate_control_algorithm);
651 if (result < 0) { 782 if (result < 0) {
652 printk(KERN_DEBUG "%s: Failed to initialize rate control " 783 wiphy_debug(local->hw.wiphy,
653 "algorithm\n", wiphy_name(local->hw.wiphy)); 784 "Failed to initialize rate control algorithm\n");
654 goto fail_rate; 785 goto fail_rate;
655 } 786 }
656 787
@@ -659,8 +790,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
659 result = ieee80211_if_add(local, "wlan%d", NULL, 790 result = ieee80211_if_add(local, "wlan%d", NULL,
660 NL80211_IFTYPE_STATION, NULL); 791 NL80211_IFTYPE_STATION, NULL);
661 if (result) 792 if (result)
662 printk(KERN_WARNING "%s: Failed to add default virtual iface\n", 793 wiphy_warn(local->hw.wiphy,
663 wiphy_name(local->hw.wiphy)); 794 "Failed to add default virtual iface\n");
664 } 795 }
665 796
666 rtnl_unlock(); 797 rtnl_unlock();
@@ -683,6 +814,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
683 goto fail_ifa; 814 goto fail_ifa;
684#endif 815#endif
685 816
817 netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll,
818 local->hw.napi_weight);
819
686 return 0; 820 return 0;
687 821
688#ifdef CONFIG_INET 822#ifdef CONFIG_INET
@@ -703,6 +837,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
703 fail_workqueue: 837 fail_workqueue:
704 wiphy_unregister(local->hw.wiphy); 838 wiphy_unregister(local->hw.wiphy);
705 fail_wiphy_register: 839 fail_wiphy_register:
840 if (local->wiphy_ciphers_allocated)
841 kfree(local->hw.wiphy->cipher_suites);
706 kfree(local->int_scan_req); 842 kfree(local->int_scan_req);
707 return result; 843 return result;
708} 844}
@@ -738,6 +874,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
738 */ 874 */
739 del_timer_sync(&local->work_timer); 875 del_timer_sync(&local->work_timer);
740 876
877 cancel_work_sync(&local->restart_work);
741 cancel_work_sync(&local->reconfig_filter); 878 cancel_work_sync(&local->reconfig_filter);
742 879
743 ieee80211_clear_tx_pending(local); 880 ieee80211_clear_tx_pending(local);
@@ -746,8 +883,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
746 883
747 if (skb_queue_len(&local->skb_queue) || 884 if (skb_queue_len(&local->skb_queue) ||
748 skb_queue_len(&local->skb_queue_unreliable)) 885 skb_queue_len(&local->skb_queue_unreliable))
749 printk(KERN_WARNING "%s: skb_queue not empty\n", 886 wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
750 wiphy_name(local->hw.wiphy));
751 skb_queue_purge(&local->skb_queue); 887 skb_queue_purge(&local->skb_queue);
752 skb_queue_purge(&local->skb_queue_unreliable); 888 skb_queue_purge(&local->skb_queue_unreliable);
753 889
@@ -764,7 +900,10 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
764 struct ieee80211_local *local = hw_to_local(hw); 900 struct ieee80211_local *local = hw_to_local(hw);
765 901
766 mutex_destroy(&local->iflist_mtx); 902 mutex_destroy(&local->iflist_mtx);
767 mutex_destroy(&local->scan_mtx); 903 mutex_destroy(&local->mtx);
904
905 if (local->wiphy_ciphers_allocated)
906 kfree(local->hw.wiphy->cipher_suites);
768 907
769 wiphy_free(local->hw.wiphy); 908 wiphy_free(local->hw.wiphy);
770} 909}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b6c163ac22da..8b733cf6f3ea 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -54,6 +54,12 @@
54 */ 54 */
55#define IEEE80211_SIGNAL_AVE_WEIGHT 3 55#define IEEE80211_SIGNAL_AVE_WEIGHT 3
56 56
57/*
58 * How many Beacon frames need to have been used in average signal strength
59 * before starting to indicate signal change events.
60 */
61#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
62
57#define TMR_RUNNING_TIMER 0 63#define TMR_RUNNING_TIMER 0
58#define TMR_RUNNING_CHANSW 1 64#define TMR_RUNNING_CHANSW 1
59 65
@@ -86,7 +92,7 @@ enum rx_mgmt_action {
86/* utils */ 92/* utils */
87static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd) 93static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd)
88{ 94{
89 WARN_ON(!mutex_is_locked(&ifmgd->mtx)); 95 lockdep_assert_held(&ifmgd->mtx);
90} 96}
91 97
92/* 98/*
@@ -109,7 +115,7 @@ static void run_again(struct ieee80211_if_managed *ifmgd,
109 mod_timer(&ifmgd->timer, timeout); 115 mod_timer(&ifmgd->timer, timeout);
110} 116}
111 117
112static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata) 118void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
113{ 119{
114 if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER) 120 if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER)
115 return; 121 return;
@@ -118,6 +124,19 @@ static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata)
118 round_jiffies_up(jiffies + IEEE80211_BEACON_LOSS_TIME)); 124 round_jiffies_up(jiffies + IEEE80211_BEACON_LOSS_TIME));
119} 125}
120 126
127void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
128{
129 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
130
131 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
132 return;
133
134 mod_timer(&sdata->u.mgd.conn_mon_timer,
135 round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
136
137 ifmgd->probe_send_count = 0;
138}
139
121static int ecw2cw(int ecw) 140static int ecw2cw(int ecw)
122{ 141{
123 return (1 << ecw) - 1; 142 return (1 << ecw) - 1;
@@ -778,16 +797,17 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
778 params.uapsd = uapsd; 797 params.uapsd = uapsd;
779 798
780#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 799#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
781 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d " 800 wiphy_debug(local->hw.wiphy,
782 "cWmin=%d cWmax=%d txop=%d uapsd=%d\n", 801 "WMM queue=%d aci=%d acm=%d aifs=%d "
783 wiphy_name(local->hw.wiphy), queue, aci, acm, 802 "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
784 params.aifs, params.cw_min, params.cw_max, params.txop, 803 queue, aci, acm,
785 params.uapsd); 804 params.aifs, params.cw_min, params.cw_max,
805 params.txop, params.uapsd);
786#endif 806#endif
787 if (drv_conf_tx(local, queue, &params)) 807 if (drv_conf_tx(local, queue, &params))
788 printk(KERN_DEBUG "%s: failed to set TX queue " 808 wiphy_debug(local->hw.wiphy,
789 "parameters for queue %d\n", 809 "failed to set TX queue parameters for queue %d\n",
790 wiphy_name(local->hw.wiphy), queue); 810 queue);
791 } 811 }
792 812
793 /* enable WMM or activate new settings */ 813 /* enable WMM or activate new settings */
@@ -990,6 +1010,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
990 1010
991 if (remove_sta) 1011 if (remove_sta)
992 sta_info_destroy_addr(sdata, bssid); 1012 sta_info_destroy_addr(sdata, bssid);
1013
1014 del_timer_sync(&sdata->u.mgd.conn_mon_timer);
1015 del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
1016 del_timer_sync(&sdata->u.mgd.timer);
1017 del_timer_sync(&sdata->u.mgd.chswitch_timer);
993} 1018}
994 1019
995void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 1020void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1006,21 +1031,26 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1006 if (is_multicast_ether_addr(hdr->addr1)) 1031 if (is_multicast_ether_addr(hdr->addr1))
1007 return; 1032 return;
1008 1033
1009 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) 1034 ieee80211_sta_reset_conn_monitor(sdata);
1010 return;
1011
1012 mod_timer(&sdata->u.mgd.conn_mon_timer,
1013 round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
1014} 1035}
1015 1036
1016static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) 1037static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1017{ 1038{
1018 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1039 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1019 const u8 *ssid; 1040 const u8 *ssid;
1041 u8 *dst = ifmgd->associated->bssid;
1042 u8 unicast_limit = max(1, IEEE80211_MAX_PROBE_TRIES - 3);
1043
1044 /*
1045 * Try sending broadcast probe requests for the last three
1046 * probe requests after the first ones failed since some
1047 * buggy APs only support broadcast probe requests.
1048 */
1049 if (ifmgd->probe_send_count >= unicast_limit)
1050 dst = NULL;
1020 1051
1021 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1052 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1022 ieee80211_send_probe_req(sdata, ifmgd->associated->bssid, 1053 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0);
1023 ssid + 2, ssid[1], NULL, 0);
1024 1054
1025 ifmgd->probe_send_count++; 1055 ifmgd->probe_send_count++;
1026 ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT; 1056 ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT;
@@ -1103,8 +1133,11 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
1103 printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid); 1133 printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid);
1104 1134
1105 ieee80211_set_disassoc(sdata, true); 1135 ieee80211_set_disassoc(sdata, true);
1106 ieee80211_recalc_idle(local);
1107 mutex_unlock(&ifmgd->mtx); 1136 mutex_unlock(&ifmgd->mtx);
1137
1138 mutex_lock(&local->mtx);
1139 ieee80211_recalc_idle(local);
1140 mutex_unlock(&local->mtx);
1108 /* 1141 /*
1109 * must be outside lock due to cfg80211, 1142 * must be outside lock due to cfg80211,
1110 * but that's not a problem. 1143 * but that's not a problem.
@@ -1173,7 +1206,9 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1173 sdata->name, bssid, reason_code); 1206 sdata->name, bssid, reason_code);
1174 1207
1175 ieee80211_set_disassoc(sdata, true); 1208 ieee80211_set_disassoc(sdata, true);
1209 mutex_lock(&sdata->local->mtx);
1176 ieee80211_recalc_idle(sdata->local); 1210 ieee80211_recalc_idle(sdata->local);
1211 mutex_unlock(&sdata->local->mtx);
1177 1212
1178 return RX_MGMT_CFG80211_DEAUTH; 1213 return RX_MGMT_CFG80211_DEAUTH;
1179} 1214}
@@ -1203,7 +1238,9 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1203 sdata->name, mgmt->sa, reason_code); 1238 sdata->name, mgmt->sa, reason_code);
1204 1239
1205 ieee80211_set_disassoc(sdata, true); 1240 ieee80211_set_disassoc(sdata, true);
1241 mutex_lock(&sdata->local->mtx);
1206 ieee80211_recalc_idle(sdata->local); 1242 ieee80211_recalc_idle(sdata->local);
1243 mutex_unlock(&sdata->local->mtx);
1207 return RX_MGMT_CFG80211_DISASSOC; 1244 return RX_MGMT_CFG80211_DISASSOC;
1208} 1245}
1209 1246
@@ -1362,7 +1399,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1362 * Also start the timer that will detect beacon loss. 1399 * Also start the timer that will detect beacon loss.
1363 */ 1400 */
1364 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); 1401 ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
1365 mod_beacon_timer(sdata); 1402 ieee80211_sta_reset_beacon_monitor(sdata);
1366 1403
1367 return true; 1404 return true;
1368} 1405}
@@ -1465,7 +1502,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1465 * we have or will be receiving any beacons or data, so let's 1502 * we have or will be receiving any beacons or data, so let's
1466 * schedule the timers again, just in case. 1503 * schedule the timers again, just in case.
1467 */ 1504 */
1468 mod_beacon_timer(sdata); 1505 ieee80211_sta_reset_beacon_monitor(sdata);
1469 1506
1470 mod_timer(&ifmgd->conn_mon_timer, 1507 mod_timer(&ifmgd->conn_mon_timer,
1471 round_jiffies_up(jiffies + 1508 round_jiffies_up(jiffies +
@@ -1540,15 +1577,18 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1540 ifmgd->last_beacon_signal = rx_status->signal; 1577 ifmgd->last_beacon_signal = rx_status->signal;
1541 if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) { 1578 if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
1542 ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE; 1579 ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
1543 ifmgd->ave_beacon_signal = rx_status->signal; 1580 ifmgd->ave_beacon_signal = rx_status->signal * 16;
1544 ifmgd->last_cqm_event_signal = 0; 1581 ifmgd->last_cqm_event_signal = 0;
1582 ifmgd->count_beacon_signal = 1;
1545 } else { 1583 } else {
1546 ifmgd->ave_beacon_signal = 1584 ifmgd->ave_beacon_signal =
1547 (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 + 1585 (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 +
1548 (16 - IEEE80211_SIGNAL_AVE_WEIGHT) * 1586 (16 - IEEE80211_SIGNAL_AVE_WEIGHT) *
1549 ifmgd->ave_beacon_signal) / 16; 1587 ifmgd->ave_beacon_signal) / 16;
1588 ifmgd->count_beacon_signal++;
1550 } 1589 }
1551 if (bss_conf->cqm_rssi_thold && 1590 if (bss_conf->cqm_rssi_thold &&
1591 ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
1552 !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) { 1592 !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
1553 int sig = ifmgd->ave_beacon_signal / 16; 1593 int sig = ifmgd->ave_beacon_signal / 16;
1554 int last_event = ifmgd->last_cqm_event_signal; 1594 int last_event = ifmgd->last_cqm_event_signal;
@@ -1588,7 +1628,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1588 * Push the beacon loss detection into the future since 1628 * Push the beacon loss detection into the future since
1589 * we are processing a beacon from the AP just now. 1629 * we are processing a beacon from the AP just now.
1590 */ 1630 */
1591 mod_beacon_timer(sdata); 1631 ieee80211_sta_reset_beacon_monitor(sdata);
1592 1632
1593 ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); 1633 ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
1594 ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable, 1634 ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable,
@@ -1751,7 +1791,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1751 struct ieee80211_local *local = sdata->local; 1791 struct ieee80211_local *local = sdata->local;
1752 struct ieee80211_work *wk; 1792 struct ieee80211_work *wk;
1753 1793
1754 mutex_lock(&local->work_mtx); 1794 mutex_lock(&local->mtx);
1755 list_for_each_entry(wk, &local->work_list, list) { 1795 list_for_each_entry(wk, &local->work_list, list) {
1756 if (wk->sdata != sdata) 1796 if (wk->sdata != sdata)
1757 continue; 1797 continue;
@@ -1783,7 +1823,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1783 free_work(wk); 1823 free_work(wk);
1784 break; 1824 break;
1785 } 1825 }
1786 mutex_unlock(&local->work_mtx); 1826 mutex_unlock(&local->mtx);
1787 1827
1788 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 1828 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
1789 } 1829 }
@@ -1840,8 +1880,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
1840 " after %dms, disconnecting.\n", 1880 " after %dms, disconnecting.\n",
1841 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); 1881 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
1842 ieee80211_set_disassoc(sdata, true); 1882 ieee80211_set_disassoc(sdata, true);
1843 ieee80211_recalc_idle(local);
1844 mutex_unlock(&ifmgd->mtx); 1883 mutex_unlock(&ifmgd->mtx);
1884 mutex_lock(&local->mtx);
1885 ieee80211_recalc_idle(local);
1886 mutex_unlock(&local->mtx);
1845 /* 1887 /*
1846 * must be outside lock due to cfg80211, 1888 * must be outside lock due to cfg80211,
1847 * but that's not a problem. 1889 * but that's not a problem.
@@ -1917,6 +1959,8 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
1917 * time -- the code here is properly synchronised. 1959 * time -- the code here is properly synchronised.
1918 */ 1960 */
1919 1961
1962 cancel_work_sync(&ifmgd->request_smps_work);
1963
1920 cancel_work_sync(&ifmgd->beacon_connection_loss_work); 1964 cancel_work_sync(&ifmgd->beacon_connection_loss_work);
1921 if (del_timer_sync(&ifmgd->timer)) 1965 if (del_timer_sync(&ifmgd->timer))
1922 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); 1966 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -1952,6 +1996,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1952 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); 1996 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
1953 INIT_WORK(&ifmgd->beacon_connection_loss_work, 1997 INIT_WORK(&ifmgd->beacon_connection_loss_work,
1954 ieee80211_beacon_connection_loss_work); 1998 ieee80211_beacon_connection_loss_work);
1999 INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_work);
1955 setup_timer(&ifmgd->timer, ieee80211_sta_timer, 2000 setup_timer(&ifmgd->timer, ieee80211_sta_timer,
1956 (unsigned long) sdata); 2001 (unsigned long) sdata);
1957 setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 2002 setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
@@ -2249,6 +2294,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2249 else 2294 else
2250 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT; 2295 ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT;
2251 2296
2297 sdata->control_port_protocol = req->crypto.control_port_ethertype;
2298 sdata->control_port_no_encrypt = req->crypto.control_port_no_encrypt;
2299
2252 ieee80211_add_work(wk); 2300 ieee80211_add_work(wk);
2253 return 0; 2301 return 0;
2254} 2302}
@@ -2275,7 +2323,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2275 2323
2276 mutex_unlock(&ifmgd->mtx); 2324 mutex_unlock(&ifmgd->mtx);
2277 2325
2278 mutex_lock(&local->work_mtx); 2326 mutex_lock(&local->mtx);
2279 list_for_each_entry(wk, &local->work_list, list) { 2327 list_for_each_entry(wk, &local->work_list, list) {
2280 if (wk->sdata != sdata) 2328 if (wk->sdata != sdata)
2281 continue; 2329 continue;
@@ -2294,7 +2342,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2294 free_work(wk); 2342 free_work(wk);
2295 break; 2343 break;
2296 } 2344 }
2297 mutex_unlock(&local->work_mtx); 2345 mutex_unlock(&local->mtx);
2298 2346
2299 /* 2347 /*
2300 * If somebody requests authentication and we haven't 2348 * If somebody requests authentication and we haven't
@@ -2319,7 +2367,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2319 if (assoc_bss) 2367 if (assoc_bss)
2320 sta_info_destroy_addr(sdata, bssid); 2368 sta_info_destroy_addr(sdata, bssid);
2321 2369
2370 mutex_lock(&sdata->local->mtx);
2322 ieee80211_recalc_idle(sdata->local); 2371 ieee80211_recalc_idle(sdata->local);
2372 mutex_unlock(&sdata->local->mtx);
2323 2373
2324 return 0; 2374 return 0;
2325} 2375}
@@ -2357,7 +2407,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2357 cookie, !req->local_state_change); 2407 cookie, !req->local_state_change);
2358 sta_info_destroy_addr(sdata, bssid); 2408 sta_info_destroy_addr(sdata, bssid);
2359 2409
2410 mutex_lock(&sdata->local->mtx);
2360 ieee80211_recalc_idle(sdata->local); 2411 ieee80211_recalc_idle(sdata->local);
2412 mutex_unlock(&sdata->local->mtx);
2361 2413
2362 return 0; 2414 return 0;
2363} 2415}
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index c36b1911987a..4b564091e51d 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -22,12 +22,16 @@
22static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata) 22static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
23{ 23{
24 struct ieee80211_local *local = sdata->local; 24 struct ieee80211_local *local = sdata->local;
25 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
25 26
26 local->offchannel_ps_enabled = false; 27 local->offchannel_ps_enabled = false;
27 28
28 /* FIXME: what to do when local->pspolling is true? */ 29 /* FIXME: what to do when local->pspolling is true? */
29 30
30 del_timer_sync(&local->dynamic_ps_timer); 31 del_timer_sync(&local->dynamic_ps_timer);
32 del_timer_sync(&ifmgd->bcn_mon_timer);
33 del_timer_sync(&ifmgd->conn_mon_timer);
34
31 cancel_work_sync(&local->dynamic_ps_enable_work); 35 cancel_work_sync(&local->dynamic_ps_enable_work);
32 36
33 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 37 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
@@ -85,6 +89,9 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
85 mod_timer(&local->dynamic_ps_timer, jiffies + 89 mod_timer(&local->dynamic_ps_timer, jiffies +
86 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 90 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
87 } 91 }
92
93 ieee80211_sta_reset_beacon_monitor(sdata);
94 ieee80211_sta_reset_conn_monitor(sdata);
88} 95}
89 96
90void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local) 97void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
@@ -112,8 +119,10 @@ void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
112 * used from user space controlled off-channel operations. 119 * used from user space controlled off-channel operations.
113 */ 120 */
114 if (sdata->vif.type != NL80211_IFTYPE_STATION && 121 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
115 sdata->vif.type != NL80211_IFTYPE_MONITOR) 122 sdata->vif.type != NL80211_IFTYPE_MONITOR) {
123 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
116 netif_tx_stop_all_queues(sdata->dev); 124 netif_tx_stop_all_queues(sdata->dev);
125 }
117 } 126 }
118 mutex_unlock(&local->iflist_mtx); 127 mutex_unlock(&local->iflist_mtx);
119} 128}
@@ -131,6 +140,7 @@ void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
131 continue; 140 continue;
132 141
133 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 142 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
143 set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
134 netif_tx_stop_all_queues(sdata->dev); 144 netif_tx_stop_all_queues(sdata->dev);
135 if (sdata->u.mgd.associated) 145 if (sdata->u.mgd.associated)
136 ieee80211_offchannel_ps_enable(sdata); 146 ieee80211_offchannel_ps_enable(sdata);
@@ -155,8 +165,20 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
155 ieee80211_offchannel_ps_disable(sdata); 165 ieee80211_offchannel_ps_disable(sdata);
156 } 166 }
157 167
158 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 168 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
169 clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
170 /*
171 * This may wake up queues even though the driver
172 * currently has them stopped. This is not very
173 * likely, since the driver won't have gotten any
174 * (or hardly any) new packets while we weren't
175 * on the right channel, and even if it happens
176 * it will at most lead to queueing up one more
177 * packet per queue in mac80211 rather than on
178 * the interface qdisc.
179 */
159 netif_tx_wake_all_queues(sdata->dev); 180 netif_tx_wake_all_queues(sdata->dev);
181 }
160 182
161 /* re-enable beaconing */ 183 /* re-enable beaconing */
162 if (enable_beaconing && 184 if (enable_beaconing &&
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index d287fde0431d..ce671dfd238c 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -12,7 +12,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
12 struct ieee80211_sub_if_data *sdata; 12 struct ieee80211_sub_if_data *sdata;
13 struct sta_info *sta; 13 struct sta_info *sta;
14 14
15 ieee80211_scan_cancel(local); 15 if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)))
16 ieee80211_scan_cancel(local);
16 17
17 ieee80211_stop_queues_by_reason(hw, 18 ieee80211_stop_queues_by_reason(hw,
18 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 19 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index be04d46110fe..b0cc385bf989 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -207,7 +207,7 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
207 207
208 fc = hdr->frame_control; 208 fc = hdr->frame_control;
209 209
210 return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc)); 210 return (info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc);
211} 211}
212 212
213static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx) 213static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
@@ -368,8 +368,8 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
368 368
369 ref = rate_control_alloc(name, local); 369 ref = rate_control_alloc(name, local);
370 if (!ref) { 370 if (!ref) {
371 printk(KERN_WARNING "%s: Failed to select rate control " 371 wiphy_warn(local->hw.wiphy,
372 "algorithm\n", wiphy_name(local->hw.wiphy)); 372 "Failed to select rate control algorithm\n");
373 return -ENOENT; 373 return -ENOENT;
374 } 374 }
375 375
@@ -380,9 +380,8 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
380 sta_info_flush(local, NULL); 380 sta_info_flush(local, NULL);
381 } 381 }
382 382
383 printk(KERN_DEBUG "%s: Selected rate control " 383 wiphy_debug(local->hw.wiphy, "Selected rate control algorithm '%s'\n",
384 "algorithm '%s'\n", wiphy_name(local->hw.wiphy), 384 ref->ops->name);
385 ref->ops->name);
386 385
387 return 0; 386 return 0;
388} 387}
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index 47438b4a9af5..135f36fd4d5d 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -162,7 +162,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
162 file_info->next_entry = (file_info->next_entry + 1) % 162 file_info->next_entry = (file_info->next_entry + 1) %
163 RC_PID_EVENT_RING_SIZE; 163 RC_PID_EVENT_RING_SIZE;
164 164
165 /* Print information about the event. Note that userpace needs to 165 /* Print information about the event. Note that userspace needs to
166 * provide large enough buffers. */ 166 * provide large enough buffers. */
167 length = length < RC_PID_PRINT_BUF_SIZE ? 167 length = length < RC_PID_PRINT_BUF_SIZE ?
168 length : RC_PID_PRINT_BUF_SIZE; 168 length : RC_PID_PRINT_BUF_SIZE;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fa0f37e4afe4..c0368152b721 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -538,20 +538,12 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
538 int index, 538 int index,
539 struct sk_buff_head *frames) 539 struct sk_buff_head *frames)
540{ 540{
541 struct ieee80211_supported_band *sband;
542 struct ieee80211_rate *rate = NULL;
543 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 541 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
544 struct ieee80211_rx_status *status;
545 542
546 if (!skb) 543 if (!skb)
547 goto no_frame; 544 goto no_frame;
548 545
549 status = IEEE80211_SKB_RXCB(skb); 546 /* release the frame from the reorder ring buffer */
550
551 /* release the reordered frames to stack */
552 sband = hw->wiphy->bands[status->band];
553 if (!(status->flag & RX_FLAG_HT))
554 rate = &sband->bitrates[status->rate_idx];
555 tid_agg_rx->stored_mpdu_num--; 547 tid_agg_rx->stored_mpdu_num--;
556 tid_agg_rx->reorder_buf[index] = NULL; 548 tid_agg_rx->reorder_buf[index] = NULL;
557 __skb_queue_tail(frames, skb); 549 __skb_queue_tail(frames, skb);
@@ -580,9 +572,78 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
580 * frames that have not yet been received are assumed to be lost and the skb 572 * frames that have not yet been received are assumed to be lost and the skb
581 * can be released for processing. This may also release other skb's from the 573 * can be released for processing. This may also release other skb's from the
582 * reorder buffer if there are no additional gaps between the frames. 574 * reorder buffer if there are no additional gaps between the frames.
575 *
576 * Callers must hold tid_agg_rx->reorder_lock.
583 */ 577 */
584#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 578#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
585 579
580static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
581 struct tid_ampdu_rx *tid_agg_rx,
582 struct sk_buff_head *frames)
583{
584 int index, j;
585
586 /* release the buffer until next missing frame */
587 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
588 tid_agg_rx->buf_size;
589 if (!tid_agg_rx->reorder_buf[index] &&
590 tid_agg_rx->stored_mpdu_num > 1) {
591 /*
592 * No buffers ready to be released, but check whether any
593 * frames in the reorder buffer have timed out.
594 */
595 int skipped = 1;
596 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
597 j = (j + 1) % tid_agg_rx->buf_size) {
598 if (!tid_agg_rx->reorder_buf[j]) {
599 skipped++;
600 continue;
601 }
602 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
603 HT_RX_REORDER_BUF_TIMEOUT))
604 goto set_release_timer;
605
606#ifdef CONFIG_MAC80211_HT_DEBUG
607 if (net_ratelimit())
608 wiphy_debug(hw->wiphy,
609 "release an RX reorder frame due to timeout on earlier frames\n");
610#endif
611 ieee80211_release_reorder_frame(hw, tid_agg_rx,
612 j, frames);
613
614 /*
615 * Increment the head seq# also for the skipped slots.
616 */
617 tid_agg_rx->head_seq_num =
618 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
619 skipped = 0;
620 }
621 } else while (tid_agg_rx->reorder_buf[index]) {
622 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
623 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
624 tid_agg_rx->buf_size;
625 }
626
627 if (tid_agg_rx->stored_mpdu_num) {
628 j = index = seq_sub(tid_agg_rx->head_seq_num,
629 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
630
631 for (; j != (index - 1) % tid_agg_rx->buf_size;
632 j = (j + 1) % tid_agg_rx->buf_size) {
633 if (tid_agg_rx->reorder_buf[j])
634 break;
635 }
636
637 set_release_timer:
638
639 mod_timer(&tid_agg_rx->reorder_timer,
640 tid_agg_rx->reorder_time[j] +
641 HT_RX_REORDER_BUF_TIMEOUT);
642 } else {
643 del_timer(&tid_agg_rx->reorder_timer);
644 }
645}
646
586/* 647/*
587 * As this function belongs to the RX path it must be under 648 * As this function belongs to the RX path it must be under
588 * rcu_read_lock protection. It returns false if the frame 649 * rcu_read_lock protection. It returns false if the frame
@@ -598,14 +659,16 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
598 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 659 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
599 u16 head_seq_num, buf_size; 660 u16 head_seq_num, buf_size;
600 int index; 661 int index;
662 bool ret = true;
601 663
602 buf_size = tid_agg_rx->buf_size; 664 buf_size = tid_agg_rx->buf_size;
603 head_seq_num = tid_agg_rx->head_seq_num; 665 head_seq_num = tid_agg_rx->head_seq_num;
604 666
667 spin_lock(&tid_agg_rx->reorder_lock);
605 /* frame with out of date sequence number */ 668 /* frame with out of date sequence number */
606 if (seq_less(mpdu_seq_num, head_seq_num)) { 669 if (seq_less(mpdu_seq_num, head_seq_num)) {
607 dev_kfree_skb(skb); 670 dev_kfree_skb(skb);
608 return true; 671 goto out;
609 } 672 }
610 673
611 /* 674 /*
@@ -626,7 +689,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
626 /* check if we already stored this frame */ 689 /* check if we already stored this frame */
627 if (tid_agg_rx->reorder_buf[index]) { 690 if (tid_agg_rx->reorder_buf[index]) {
628 dev_kfree_skb(skb); 691 dev_kfree_skb(skb);
629 return true; 692 goto out;
630 } 693 }
631 694
632 /* 695 /*
@@ -636,58 +699,19 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
636 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 699 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
637 tid_agg_rx->stored_mpdu_num == 0) { 700 tid_agg_rx->stored_mpdu_num == 0) {
638 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 701 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
639 return false; 702 ret = false;
703 goto out;
640 } 704 }
641 705
642 /* put the frame in the reordering buffer */ 706 /* put the frame in the reordering buffer */
643 tid_agg_rx->reorder_buf[index] = skb; 707 tid_agg_rx->reorder_buf[index] = skb;
644 tid_agg_rx->reorder_time[index] = jiffies; 708 tid_agg_rx->reorder_time[index] = jiffies;
645 tid_agg_rx->stored_mpdu_num++; 709 tid_agg_rx->stored_mpdu_num++;
646 /* release the buffer until next missing frame */ 710 ieee80211_sta_reorder_release(hw, tid_agg_rx, frames);
647 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
648 tid_agg_rx->buf_size;
649 if (!tid_agg_rx->reorder_buf[index] &&
650 tid_agg_rx->stored_mpdu_num > 1) {
651 /*
652 * No buffers ready to be released, but check whether any
653 * frames in the reorder buffer have timed out.
654 */
655 int j;
656 int skipped = 1;
657 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
658 j = (j + 1) % tid_agg_rx->buf_size) {
659 if (!tid_agg_rx->reorder_buf[j]) {
660 skipped++;
661 continue;
662 }
663 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
664 HT_RX_REORDER_BUF_TIMEOUT))
665 break;
666
667#ifdef CONFIG_MAC80211_HT_DEBUG
668 if (net_ratelimit())
669 printk(KERN_DEBUG "%s: release an RX reorder "
670 "frame due to timeout on earlier "
671 "frames\n",
672 wiphy_name(hw->wiphy));
673#endif
674 ieee80211_release_reorder_frame(hw, tid_agg_rx,
675 j, frames);
676
677 /*
678 * Increment the head seq# also for the skipped slots.
679 */
680 tid_agg_rx->head_seq_num =
681 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
682 skipped = 0;
683 }
684 } else while (tid_agg_rx->reorder_buf[index]) {
685 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
686 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
687 tid_agg_rx->buf_size;
688 }
689 711
690 return true; 712 out:
713 spin_unlock(&tid_agg_rx->reorder_lock);
714 return ret;
691} 715}
692 716
693/* 717/*
@@ -873,6 +897,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
873 897
874 if (!is_multicast_ether_addr(hdr->addr1) && stakey) { 898 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
875 rx->key = stakey; 899 rx->key = stakey;
900 if ((status->flag & RX_FLAG_DECRYPTED) &&
901 (status->flag & RX_FLAG_IV_STRIPPED))
902 return RX_CONTINUE;
876 /* Skip decryption if the frame is not protected. */ 903 /* Skip decryption if the frame is not protected. */
877 if (!ieee80211_has_protected(fc)) 904 if (!ieee80211_has_protected(fc))
878 return RX_CONTINUE; 905 return RX_CONTINUE;
@@ -935,7 +962,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
935 * pairwise or station-to-station keys, but for WEP we allow 962 * pairwise or station-to-station keys, but for WEP we allow
936 * using a key index as well. 963 * using a key index as well.
937 */ 964 */
938 if (rx->key && rx->key->conf.alg != ALG_WEP && 965 if (rx->key && rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
966 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
939 !is_multicast_ether_addr(hdr->addr1)) 967 !is_multicast_ether_addr(hdr->addr1))
940 rx->key = NULL; 968 rx->key = NULL;
941 } 969 }
@@ -951,8 +979,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
951 return RX_DROP_UNUSABLE; 979 return RX_DROP_UNUSABLE;
952 /* the hdr variable is invalid now! */ 980 /* the hdr variable is invalid now! */
953 981
954 switch (rx->key->conf.alg) { 982 switch (rx->key->conf.cipher) {
955 case ALG_WEP: 983 case WLAN_CIPHER_SUITE_WEP40:
984 case WLAN_CIPHER_SUITE_WEP104:
956 /* Check for weak IVs if possible */ 985 /* Check for weak IVs if possible */
957 if (rx->sta && ieee80211_is_data(fc) && 986 if (rx->sta && ieee80211_is_data(fc) &&
958 (!(status->flag & RX_FLAG_IV_STRIPPED) || 987 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
@@ -962,15 +991,21 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
962 991
963 result = ieee80211_crypto_wep_decrypt(rx); 992 result = ieee80211_crypto_wep_decrypt(rx);
964 break; 993 break;
965 case ALG_TKIP: 994 case WLAN_CIPHER_SUITE_TKIP:
966 result = ieee80211_crypto_tkip_decrypt(rx); 995 result = ieee80211_crypto_tkip_decrypt(rx);
967 break; 996 break;
968 case ALG_CCMP: 997 case WLAN_CIPHER_SUITE_CCMP:
969 result = ieee80211_crypto_ccmp_decrypt(rx); 998 result = ieee80211_crypto_ccmp_decrypt(rx);
970 break; 999 break;
971 case ALG_AES_CMAC: 1000 case WLAN_CIPHER_SUITE_AES_CMAC:
972 result = ieee80211_crypto_aes_cmac_decrypt(rx); 1001 result = ieee80211_crypto_aes_cmac_decrypt(rx);
973 break; 1002 break;
1003 default:
1004 /*
1005 * We can reach here only with HW-only algorithms
1006 * but why didn't it decrypt the frame?!
1007 */
1008 return RX_DROP_UNUSABLE;
974 } 1009 }
975 1010
976 /* either the frame has been decrypted or will be dropped */ 1011 /* either the frame has been decrypted or will be dropped */
@@ -1265,7 +1300,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1265 /* This is the first fragment of a new frame. */ 1300 /* This is the first fragment of a new frame. */
1266 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1301 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1267 rx->queue, &(rx->skb)); 1302 rx->queue, &(rx->skb));
1268 if (rx->key && rx->key->conf.alg == ALG_CCMP && 1303 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1269 ieee80211_has_protected(fc)) { 1304 ieee80211_has_protected(fc)) {
1270 int queue = ieee80211_is_mgmt(fc) ? 1305 int queue = ieee80211_is_mgmt(fc) ?
1271 NUM_RX_DATA_QUEUES : rx->queue; 1306 NUM_RX_DATA_QUEUES : rx->queue;
@@ -1294,7 +1329,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1294 int i; 1329 int i;
1295 u8 pn[CCMP_PN_LEN], *rpn; 1330 u8 pn[CCMP_PN_LEN], *rpn;
1296 int queue; 1331 int queue;
1297 if (!rx->key || rx->key->conf.alg != ALG_CCMP) 1332 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
1298 return RX_DROP_UNUSABLE; 1333 return RX_DROP_UNUSABLE;
1299 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 1334 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
1300 for (i = CCMP_PN_LEN - 1; i >= 0; i--) { 1335 for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
@@ -1492,7 +1527,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1492 * Allow EAPOL frames to us/the PAE group address regardless 1527 * Allow EAPOL frames to us/the PAE group address regardless
1493 * of whether the frame was encrypted or not. 1528 * of whether the frame was encrypted or not.
1494 */ 1529 */
1495 if (ehdr->h_proto == htons(ETH_P_PAE) && 1530 if (ehdr->h_proto == rx->sdata->control_port_protocol &&
1496 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || 1531 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
1497 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1532 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
1498 return true; 1533 return true;
@@ -1909,13 +1944,36 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
1909} 1944}
1910 1945
1911static ieee80211_rx_result debug_noinline 1946static ieee80211_rx_result debug_noinline
1947ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
1948{
1949 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1950
1951 /*
1952 * From here on, look only at management frames.
1953 * Data and control frames are already handled,
1954 * and unknown (reserved) frames are useless.
1955 */
1956 if (rx->skb->len < 24)
1957 return RX_DROP_MONITOR;
1958
1959 if (!ieee80211_is_mgmt(mgmt->frame_control))
1960 return RX_DROP_MONITOR;
1961
1962 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1963 return RX_DROP_MONITOR;
1964
1965 if (ieee80211_drop_unencrypted_mgmt(rx))
1966 return RX_DROP_UNUSABLE;
1967
1968 return RX_CONTINUE;
1969}
1970
1971static ieee80211_rx_result debug_noinline
1912ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 1972ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1913{ 1973{
1914 struct ieee80211_local *local = rx->local; 1974 struct ieee80211_local *local = rx->local;
1915 struct ieee80211_sub_if_data *sdata = rx->sdata; 1975 struct ieee80211_sub_if_data *sdata = rx->sdata;
1916 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 1976 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1917 struct sk_buff *nskb;
1918 struct ieee80211_rx_status *status;
1919 int len = rx->skb->len; 1977 int len = rx->skb->len;
1920 1978
1921 if (!ieee80211_is_action(mgmt->frame_control)) 1979 if (!ieee80211_is_action(mgmt->frame_control))
@@ -1931,9 +1989,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1931 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1989 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1932 return RX_DROP_UNUSABLE; 1990 return RX_DROP_UNUSABLE;
1933 1991
1934 if (ieee80211_drop_unencrypted_mgmt(rx))
1935 return RX_DROP_UNUSABLE;
1936
1937 switch (mgmt->u.action.category) { 1992 switch (mgmt->u.action.category) {
1938 case WLAN_CATEGORY_BACK: 1993 case WLAN_CATEGORY_BACK:
1939 /* 1994 /*
@@ -2024,17 +2079,36 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2024 goto queue; 2079 goto queue;
2025 } 2080 }
2026 2081
2082 return RX_CONTINUE;
2083
2027 invalid: 2084 invalid:
2028 /* 2085 rx->flags |= IEEE80211_MALFORMED_ACTION_FRM;
2029 * For AP mode, hostapd is responsible for handling any action 2086 /* will return in the next handlers */
2030 * frames that we didn't handle, including returning unknown 2087 return RX_CONTINUE;
2031 * ones. For all other modes we will return them to the sender, 2088
2032 * setting the 0x80 bit in the action category, as required by 2089 handled:
2033 * 802.11-2007 7.3.1.11. 2090 if (rx->sta)
2034 */ 2091 rx->sta->rx_packets++;
2035 if (sdata->vif.type == NL80211_IFTYPE_AP || 2092 dev_kfree_skb(rx->skb);
2036 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 2093 return RX_QUEUED;
2037 return RX_DROP_MONITOR; 2094
2095 queue:
2096 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2097 skb_queue_tail(&sdata->skb_queue, rx->skb);
2098 ieee80211_queue_work(&local->hw, &sdata->work);
2099 if (rx->sta)
2100 rx->sta->rx_packets++;
2101 return RX_QUEUED;
2102}
2103
2104static ieee80211_rx_result debug_noinline
2105ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
2106{
2107 struct ieee80211_rx_status *status;
2108
2109 /* skip known-bad action frames and return them in the next handler */
2110 if (rx->flags & IEEE80211_MALFORMED_ACTION_FRM)
2111 return RX_CONTINUE;
2038 2112
2039 /* 2113 /*
2040 * Getting here means the kernel doesn't know how to handle 2114 * Getting here means the kernel doesn't know how to handle
@@ -2044,10 +2118,44 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2044 */ 2118 */
2045 status = IEEE80211_SKB_RXCB(rx->skb); 2119 status = IEEE80211_SKB_RXCB(rx->skb);
2046 2120
2047 if (cfg80211_rx_action(rx->sdata->dev, status->freq, 2121 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq,
2048 rx->skb->data, rx->skb->len, 2122 rx->skb->data, rx->skb->len,
2049 GFP_ATOMIC)) 2123 GFP_ATOMIC)) {
2050 goto handled; 2124 if (rx->sta)
2125 rx->sta->rx_packets++;
2126 dev_kfree_skb(rx->skb);
2127 return RX_QUEUED;
2128 }
2129
2130
2131 return RX_CONTINUE;
2132}
2133
2134static ieee80211_rx_result debug_noinline
2135ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
2136{
2137 struct ieee80211_local *local = rx->local;
2138 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
2139 struct sk_buff *nskb;
2140 struct ieee80211_sub_if_data *sdata = rx->sdata;
2141
2142 if (!ieee80211_is_action(mgmt->frame_control))
2143 return RX_CONTINUE;
2144
2145 /*
2146 * For AP mode, hostapd is responsible for handling any action
2147 * frames that we didn't handle, including returning unknown
2148 * ones. For all other modes we will return them to the sender,
2149 * setting the 0x80 bit in the action category, as required by
2150 * 802.11-2007 7.3.1.11.
2151 * Newer versions of hostapd shall also use the management frame
2152 * registration mechanisms, but older ones still use cooked
2153 * monitor interfaces so push all frames there.
2154 */
2155 if (!(rx->flags & IEEE80211_MALFORMED_ACTION_FRM) &&
2156 (sdata->vif.type == NL80211_IFTYPE_AP ||
2157 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
2158 return RX_DROP_MONITOR;
2051 2159
2052 /* do not return rejected action frames */ 2160 /* do not return rejected action frames */
2053 if (mgmt->u.action.category & 0x80) 2161 if (mgmt->u.action.category & 0x80)
@@ -2066,20 +2174,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2066 2174
2067 ieee80211_tx_skb(rx->sdata, nskb); 2175 ieee80211_tx_skb(rx->sdata, nskb);
2068 } 2176 }
2069
2070 handled:
2071 if (rx->sta)
2072 rx->sta->rx_packets++;
2073 dev_kfree_skb(rx->skb); 2177 dev_kfree_skb(rx->skb);
2074 return RX_QUEUED; 2178 return RX_QUEUED;
2075
2076 queue:
2077 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2078 skb_queue_tail(&sdata->skb_queue, rx->skb);
2079 ieee80211_queue_work(&local->hw, &sdata->work);
2080 if (rx->sta)
2081 rx->sta->rx_packets++;
2082 return RX_QUEUED;
2083} 2179}
2084 2180
2085static ieee80211_rx_result debug_noinline 2181static ieee80211_rx_result debug_noinline
@@ -2090,15 +2186,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2090 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2186 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2091 __le16 stype; 2187 __le16 stype;
2092 2188
2093 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
2094 return RX_DROP_MONITOR;
2095
2096 if (rx->skb->len < 24)
2097 return RX_DROP_MONITOR;
2098
2099 if (ieee80211_drop_unencrypted_mgmt(rx))
2100 return RX_DROP_UNUSABLE;
2101
2102 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb); 2189 rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
2103 if (rxs != RX_CONTINUE) 2190 if (rxs != RX_CONTINUE)
2104 return rxs; 2191 return rxs;
@@ -2267,19 +2354,46 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2267 dev_kfree_skb(skb); 2354 dev_kfree_skb(skb);
2268} 2355}
2269 2356
2357static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
2358 ieee80211_rx_result res)
2359{
2360 switch (res) {
2361 case RX_DROP_MONITOR:
2362 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2363 if (rx->sta)
2364 rx->sta->rx_dropped++;
2365 /* fall through */
2366 case RX_CONTINUE: {
2367 struct ieee80211_rate *rate = NULL;
2368 struct ieee80211_supported_band *sband;
2369 struct ieee80211_rx_status *status;
2370
2371 status = IEEE80211_SKB_RXCB((rx->skb));
2372
2373 sband = rx->local->hw.wiphy->bands[status->band];
2374 if (!(status->flag & RX_FLAG_HT))
2375 rate = &sband->bitrates[status->rate_idx];
2376
2377 ieee80211_rx_cooked_monitor(rx, rate);
2378 break;
2379 }
2380 case RX_DROP_UNUSABLE:
2381 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
2382 if (rx->sta)
2383 rx->sta->rx_dropped++;
2384 dev_kfree_skb(rx->skb);
2385 break;
2386 case RX_QUEUED:
2387 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
2388 break;
2389 }
2390}
2270 2391
2271static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, 2392static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
2272 struct ieee80211_rx_data *rx, 2393 struct sk_buff_head *frames)
2273 struct sk_buff *skb,
2274 struct ieee80211_rate *rate)
2275{ 2394{
2276 struct sk_buff_head reorder_release;
2277 ieee80211_rx_result res = RX_DROP_MONITOR; 2395 ieee80211_rx_result res = RX_DROP_MONITOR;
2278 2396 struct sk_buff *skb;
2279 __skb_queue_head_init(&reorder_release);
2280
2281 rx->skb = skb;
2282 rx->sdata = sdata;
2283 2397
2284#define CALL_RXH(rxh) \ 2398#define CALL_RXH(rxh) \
2285 do { \ 2399 do { \
@@ -2288,17 +2402,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2288 goto rxh_next; \ 2402 goto rxh_next; \
2289 } while (0); 2403 } while (0);
2290 2404
2291 /* 2405 while ((skb = __skb_dequeue(frames))) {
2292 * NB: the rxh_next label works even if we jump
2293 * to it from here because then the list will
2294 * be empty, which is a trivial check
2295 */
2296 CALL_RXH(ieee80211_rx_h_passive_scan)
2297 CALL_RXH(ieee80211_rx_h_check)
2298
2299 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2300
2301 while ((skb = __skb_dequeue(&reorder_release))) {
2302 /* 2406 /*
2303 * all the other fields are valid across frames 2407 * all the other fields are valid across frames
2304 * that belong to an aMPDU since they are on the 2408 * that belong to an aMPDU since they are on the
@@ -2316,42 +2420,95 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2316 CALL_RXH(ieee80211_rx_h_remove_qos_control) 2420 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2317 CALL_RXH(ieee80211_rx_h_amsdu) 2421 CALL_RXH(ieee80211_rx_h_amsdu)
2318#ifdef CONFIG_MAC80211_MESH 2422#ifdef CONFIG_MAC80211_MESH
2319 if (ieee80211_vif_is_mesh(&sdata->vif)) 2423 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
2320 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2424 CALL_RXH(ieee80211_rx_h_mesh_fwding);
2321#endif 2425#endif
2322 CALL_RXH(ieee80211_rx_h_data) 2426 CALL_RXH(ieee80211_rx_h_data)
2323 2427
2324 /* special treatment -- needs the queue */ 2428 /* special treatment -- needs the queue */
2325 res = ieee80211_rx_h_ctrl(rx, &reorder_release); 2429 res = ieee80211_rx_h_ctrl(rx, frames);
2326 if (res != RX_CONTINUE) 2430 if (res != RX_CONTINUE)
2327 goto rxh_next; 2431 goto rxh_next;
2328 2432
2433 CALL_RXH(ieee80211_rx_h_mgmt_check)
2329 CALL_RXH(ieee80211_rx_h_action) 2434 CALL_RXH(ieee80211_rx_h_action)
2435 CALL_RXH(ieee80211_rx_h_userspace_mgmt)
2436 CALL_RXH(ieee80211_rx_h_action_return)
2330 CALL_RXH(ieee80211_rx_h_mgmt) 2437 CALL_RXH(ieee80211_rx_h_mgmt)
2331 2438
2439 rxh_next:
2440 ieee80211_rx_handlers_result(rx, res);
2441
2332#undef CALL_RXH 2442#undef CALL_RXH
2443 }
2444}
2445
2446static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
2447 struct ieee80211_rx_data *rx,
2448 struct sk_buff *skb)
2449{
2450 struct sk_buff_head reorder_release;
2451 ieee80211_rx_result res = RX_DROP_MONITOR;
2452
2453 __skb_queue_head_init(&reorder_release);
2454
2455 rx->skb = skb;
2456 rx->sdata = sdata;
2457
2458#define CALL_RXH(rxh) \
2459 do { \
2460 res = rxh(rx); \
2461 if (res != RX_CONTINUE) \
2462 goto rxh_next; \
2463 } while (0);
2464
2465 CALL_RXH(ieee80211_rx_h_passive_scan)
2466 CALL_RXH(ieee80211_rx_h_check)
2467
2468 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
2469
2470 ieee80211_rx_handlers(rx, &reorder_release);
2471 return;
2333 2472
2334 rxh_next: 2473 rxh_next:
2335 switch (res) { 2474 ieee80211_rx_handlers_result(rx, res);
2336 case RX_DROP_MONITOR: 2475
2337 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 2476#undef CALL_RXH
2338 if (rx->sta) 2477}
2339 rx->sta->rx_dropped++; 2478
2340 /* fall through */ 2479/*
2341 case RX_CONTINUE: 2480 * This function makes calls into the RX path. Therefore the
2342 ieee80211_rx_cooked_monitor(rx, rate); 2481 * caller must hold the sta_info->lock and everything has to
2343 break; 2482 * be under rcu_read_lock protection as well.
2344 case RX_DROP_UNUSABLE: 2483 */
2345 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 2484void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2346 if (rx->sta) 2485{
2347 rx->sta->rx_dropped++; 2486 struct sk_buff_head frames;
2348 dev_kfree_skb(rx->skb); 2487 struct ieee80211_rx_data rx = { };
2349 break; 2488 struct tid_ampdu_rx *tid_agg_rx;
2350 case RX_QUEUED: 2489
2351 I802_DEBUG_INC(sdata->local->rx_handlers_queued); 2490 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
2352 break; 2491 if (!tid_agg_rx)
2353 } 2492 return;
2354 } 2493
2494 __skb_queue_head_init(&frames);
2495
2496 /* construct rx struct */
2497 rx.sta = sta;
2498 rx.sdata = sta->sdata;
2499 rx.local = sta->local;
2500 rx.queue = tid;
2501 rx.flags |= IEEE80211_RX_RA_MATCH;
2502
2503 if (unlikely(test_bit(SCAN_HW_SCANNING, &sta->local->scanning) ||
2504 test_bit(SCAN_OFF_CHANNEL, &sta->local->scanning)))
2505 rx.flags |= IEEE80211_RX_IN_SCAN;
2506
2507 spin_lock(&tid_agg_rx->reorder_lock);
2508 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx, &frames);
2509 spin_unlock(&tid_agg_rx->reorder_lock);
2510
2511 ieee80211_rx_handlers(&rx, &frames);
2355} 2512}
2356 2513
2357/* main receive path */ 2514/* main receive path */
@@ -2431,9 +2588,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2431 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) 2588 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
2432 return 0; 2589 return 0;
2433 break; 2590 break;
2434 case NL80211_IFTYPE_MONITOR: 2591 default:
2435 case NL80211_IFTYPE_UNSPECIFIED:
2436 case __NL80211_IFTYPE_AFTER_LAST:
2437 /* should never get here */ 2592 /* should never get here */
2438 WARN_ON(1); 2593 WARN_ON(1);
2439 break; 2594 break;
@@ -2447,8 +2602,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
2447 * be called with rcu_read_lock protection. 2602 * be called with rcu_read_lock protection.
2448 */ 2603 */
2449static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 2604static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2450 struct sk_buff *skb, 2605 struct sk_buff *skb)
2451 struct ieee80211_rate *rate)
2452{ 2606{
2453 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2607 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2454 struct ieee80211_local *local = hw_to_local(hw); 2608 struct ieee80211_local *local = hw_to_local(hw);
@@ -2550,13 +2704,12 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2550 skb_new = skb_copy(skb, GFP_ATOMIC); 2704 skb_new = skb_copy(skb, GFP_ATOMIC);
2551 if (!skb_new) { 2705 if (!skb_new) {
2552 if (net_ratelimit()) 2706 if (net_ratelimit())
2553 printk(KERN_DEBUG "%s: failed to copy " 2707 wiphy_debug(local->hw.wiphy,
2554 "multicast frame for %s\n", 2708 "failed to copy multicast frame for %s\n",
2555 wiphy_name(local->hw.wiphy), 2709 prev->name);
2556 prev->name);
2557 goto next; 2710 goto next;
2558 } 2711 }
2559 ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate); 2712 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
2560next: 2713next:
2561 prev = sdata; 2714 prev = sdata;
2562 } 2715 }
@@ -2572,7 +2725,7 @@ next:
2572 } 2725 }
2573 } 2726 }
2574 if (prev) 2727 if (prev)
2575 ieee80211_invoke_rx_handlers(prev, &rx, skb, rate); 2728 ieee80211_invoke_rx_handlers(prev, &rx, skb);
2576 else 2729 else
2577 dev_kfree_skb(skb); 2730 dev_kfree_skb(skb);
2578} 2731}
@@ -2615,28 +2768,37 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2615 if (WARN_ON(!local->started)) 2768 if (WARN_ON(!local->started))
2616 goto drop; 2769 goto drop;
2617 2770
2618 if (status->flag & RX_FLAG_HT) { 2771 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
2619 /* 2772 /*
2620 * rate_idx is MCS index, which can be [0-76] as documented on: 2773 * Validate the rate, unless a PLCP error means that
2621 * 2774 * we probably can't have a valid rate here anyway.
2622 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2623 *
2624 * Anything else would be some sort of driver or hardware error.
2625 * The driver should catch hardware errors.
2626 */ 2775 */
2627 if (WARN((status->rate_idx < 0 || 2776
2628 status->rate_idx > 76), 2777 if (status->flag & RX_FLAG_HT) {
2629 "Rate marked as an HT rate but passed " 2778 /*
2630 "status->rate_idx is not " 2779 * rate_idx is MCS index, which can be [0-76]
2631 "an MCS index [0-76]: %d (0x%02x)\n", 2780 * as documented on:
2632 status->rate_idx, 2781 *
2633 status->rate_idx)) 2782 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
2634 goto drop; 2783 *
2635 } else { 2784 * Anything else would be some sort of driver or
2636 if (WARN_ON(status->rate_idx < 0 || 2785 * hardware error. The driver should catch hardware
2637 status->rate_idx >= sband->n_bitrates)) 2786 * errors.
2638 goto drop; 2787 */
2639 rate = &sband->bitrates[status->rate_idx]; 2788 if (WARN((status->rate_idx < 0 ||
2789 status->rate_idx > 76),
2790 "Rate marked as an HT rate but passed "
2791 "status->rate_idx is not "
2792 "an MCS index [0-76]: %d (0x%02x)\n",
2793 status->rate_idx,
2794 status->rate_idx))
2795 goto drop;
2796 } else {
2797 if (WARN_ON(status->rate_idx < 0 ||
2798 status->rate_idx >= sband->n_bitrates))
2799 goto drop;
2800 rate = &sband->bitrates[status->rate_idx];
2801 }
2640 } 2802 }
2641 2803
2642 /* 2804 /*
@@ -2658,7 +2820,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2658 return; 2820 return;
2659 } 2821 }
2660 2822
2661 __ieee80211_rx_handle_packet(hw, skb, rate); 2823 __ieee80211_rx_handle_packet(hw, skb);
2662 2824
2663 rcu_read_unlock(); 2825 rcu_read_unlock();
2664 2826
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 872d7b6ef6b3..d60389ba9b95 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -248,14 +248,12 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
248 return true; 248 return true;
249} 249}
250 250
251void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 251static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
252{ 252{
253 struct ieee80211_local *local = hw_to_local(hw); 253 struct ieee80211_local *local = hw_to_local(hw);
254 bool was_hw_scan; 254 bool was_hw_scan;
255 255
256 trace_api_scan_completed(local, aborted); 256 mutex_lock(&local->mtx);
257
258 mutex_lock(&local->scan_mtx);
259 257
260 /* 258 /*
261 * It's ok to abort a not-yet-running scan (that 259 * It's ok to abort a not-yet-running scan (that
@@ -267,7 +265,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
267 aborted = true; 265 aborted = true;
268 266
269 if (WARN_ON(!local->scan_req)) { 267 if (WARN_ON(!local->scan_req)) {
270 mutex_unlock(&local->scan_mtx); 268 mutex_unlock(&local->mtx);
271 return; 269 return;
272 } 270 }
273 271
@@ -275,7 +273,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
275 if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) { 273 if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) {
276 ieee80211_queue_delayed_work(&local->hw, 274 ieee80211_queue_delayed_work(&local->hw,
277 &local->scan_work, 0); 275 &local->scan_work, 0);
278 mutex_unlock(&local->scan_mtx); 276 mutex_unlock(&local->mtx);
279 return; 277 return;
280 } 278 }
281 279
@@ -291,7 +289,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
291 local->scan_channel = NULL; 289 local->scan_channel = NULL;
292 290
293 /* we only have to protect scan_req and hw/sw scan */ 291 /* we only have to protect scan_req and hw/sw scan */
294 mutex_unlock(&local->scan_mtx); 292 mutex_unlock(&local->mtx);
295 293
296 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 294 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
297 if (was_hw_scan) 295 if (was_hw_scan)
@@ -304,12 +302,26 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
304 ieee80211_offchannel_return(local, true); 302 ieee80211_offchannel_return(local, true);
305 303
306 done: 304 done:
305 mutex_lock(&local->mtx);
307 ieee80211_recalc_idle(local); 306 ieee80211_recalc_idle(local);
307 mutex_unlock(&local->mtx);
308 ieee80211_mlme_notify_scan_completed(local); 308 ieee80211_mlme_notify_scan_completed(local);
309 ieee80211_ibss_notify_scan_completed(local); 309 ieee80211_ibss_notify_scan_completed(local);
310 ieee80211_mesh_notify_scan_completed(local); 310 ieee80211_mesh_notify_scan_completed(local);
311 ieee80211_queue_work(&local->hw, &local->work_work); 311 ieee80211_queue_work(&local->hw, &local->work_work);
312} 312}
313
314void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
315{
316 struct ieee80211_local *local = hw_to_local(hw);
317
318 trace_api_scan_completed(local, aborted);
319
320 set_bit(SCAN_COMPLETED, &local->scanning);
321 if (aborted)
322 set_bit(SCAN_ABORTED, &local->scanning);
323 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
324}
313EXPORT_SYMBOL(ieee80211_scan_completed); 325EXPORT_SYMBOL(ieee80211_scan_completed);
314 326
315static int ieee80211_start_sw_scan(struct ieee80211_local *local) 327static int ieee80211_start_sw_scan(struct ieee80211_local *local)
@@ -447,7 +459,7 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
447 459
448 /* if no more bands/channels left, complete scan and advance to the idle state */ 460 /* if no more bands/channels left, complete scan and advance to the idle state */
449 if (local->scan_channel_idx >= local->scan_req->n_channels) { 461 if (local->scan_channel_idx >= local->scan_req->n_channels) {
450 ieee80211_scan_completed(&local->hw, false); 462 __ieee80211_scan_completed(&local->hw, false);
451 return 1; 463 return 1;
452 } 464 }
453 465
@@ -639,17 +651,25 @@ void ieee80211_scan_work(struct work_struct *work)
639 struct ieee80211_sub_if_data *sdata = local->scan_sdata; 651 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
640 unsigned long next_delay = 0; 652 unsigned long next_delay = 0;
641 653
642 mutex_lock(&local->scan_mtx); 654 if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) {
655 bool aborted;
656
657 aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
658 __ieee80211_scan_completed(&local->hw, aborted);
659 return;
660 }
661
662 mutex_lock(&local->mtx);
643 if (!sdata || !local->scan_req) { 663 if (!sdata || !local->scan_req) {
644 mutex_unlock(&local->scan_mtx); 664 mutex_unlock(&local->mtx);
645 return; 665 return;
646 } 666 }
647 667
648 if (local->hw_scan_req) { 668 if (local->hw_scan_req) {
649 int rc = drv_hw_scan(local, sdata, local->hw_scan_req); 669 int rc = drv_hw_scan(local, sdata, local->hw_scan_req);
650 mutex_unlock(&local->scan_mtx); 670 mutex_unlock(&local->mtx);
651 if (rc) 671 if (rc)
652 ieee80211_scan_completed(&local->hw, true); 672 __ieee80211_scan_completed(&local->hw, true);
653 return; 673 return;
654 } 674 }
655 675
@@ -661,20 +681,20 @@ void ieee80211_scan_work(struct work_struct *work)
661 local->scan_sdata = NULL; 681 local->scan_sdata = NULL;
662 682
663 rc = __ieee80211_start_scan(sdata, req); 683 rc = __ieee80211_start_scan(sdata, req);
664 mutex_unlock(&local->scan_mtx); 684 mutex_unlock(&local->mtx);
665 685
666 if (rc) 686 if (rc)
667 ieee80211_scan_completed(&local->hw, true); 687 __ieee80211_scan_completed(&local->hw, true);
668 return; 688 return;
669 } 689 }
670 690
671 mutex_unlock(&local->scan_mtx); 691 mutex_unlock(&local->mtx);
672 692
673 /* 693 /*
674 * Avoid re-scheduling when the sdata is going away. 694 * Avoid re-scheduling when the sdata is going away.
675 */ 695 */
676 if (!ieee80211_sdata_running(sdata)) { 696 if (!ieee80211_sdata_running(sdata)) {
677 ieee80211_scan_completed(&local->hw, true); 697 __ieee80211_scan_completed(&local->hw, true);
678 return; 698 return;
679 } 699 }
680 700
@@ -711,9 +731,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
711{ 731{
712 int res; 732 int res;
713 733
714 mutex_lock(&sdata->local->scan_mtx); 734 mutex_lock(&sdata->local->mtx);
715 res = __ieee80211_start_scan(sdata, req); 735 res = __ieee80211_start_scan(sdata, req);
716 mutex_unlock(&sdata->local->scan_mtx); 736 mutex_unlock(&sdata->local->mtx);
717 737
718 return res; 738 return res;
719} 739}
@@ -726,7 +746,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
726 int ret = -EBUSY; 746 int ret = -EBUSY;
727 enum ieee80211_band band; 747 enum ieee80211_band band;
728 748
729 mutex_lock(&local->scan_mtx); 749 mutex_lock(&local->mtx);
730 750
731 /* busy scanning */ 751 /* busy scanning */
732 if (local->scan_req) 752 if (local->scan_req)
@@ -761,7 +781,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
761 781
762 ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req); 782 ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req);
763 unlock: 783 unlock:
764 mutex_unlock(&local->scan_mtx); 784 mutex_unlock(&local->mtx);
765 return ret; 785 return ret;
766} 786}
767 787
@@ -775,11 +795,11 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
775 * Only call this function when a scan can't be 795 * Only call this function when a scan can't be
776 * queued -- mostly at suspend under RTNL. 796 * queued -- mostly at suspend under RTNL.
777 */ 797 */
778 mutex_lock(&local->scan_mtx); 798 mutex_lock(&local->mtx);
779 abortscan = test_bit(SCAN_SW_SCANNING, &local->scanning) || 799 abortscan = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
780 (!local->scanning && local->scan_req); 800 (!local->scanning && local->scan_req);
781 mutex_unlock(&local->scan_mtx); 801 mutex_unlock(&local->mtx);
782 802
783 if (abortscan) 803 if (abortscan)
784 ieee80211_scan_completed(&local->hw, true); 804 __ieee80211_scan_completed(&local->hw, true);
785} 805}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 6d86f0c1ad04..44e10a9de0a7 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -125,7 +125,7 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
125 lockdep_is_held(&local->sta_mtx)); 125 lockdep_is_held(&local->sta_mtx));
126 while (sta) { 126 while (sta) {
127 if ((sta->sdata == sdata || 127 if ((sta->sdata == sdata ||
128 sta->sdata->bss == sdata->bss) && 128 (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
129 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 129 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
130 break; 130 break;
131 sta = rcu_dereference_check(sta->hnext, 131 sta = rcu_dereference_check(sta->hnext,
@@ -174,8 +174,7 @@ static void __sta_info_free(struct ieee80211_local *local,
174 } 174 }
175 175
176#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 176#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
177 printk(KERN_DEBUG "%s: Destroyed STA %pM\n", 177 wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr);
178 wiphy_name(local->hw.wiphy), sta->sta.addr);
179#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 178#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
180 179
181 kfree(sta); 180 kfree(sta);
@@ -262,8 +261,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
262 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); 261 sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
263 262
264#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 263#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
265 printk(KERN_DEBUG "%s: Allocated STA %pM\n", 264 wiphy_debug(local->hw.wiphy, "Allocated STA %pM\n", sta->sta.addr);
266 wiphy_name(local->hw.wiphy), sta->sta.addr);
267#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 265#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
268 266
269#ifdef CONFIG_MAC80211_MESH 267#ifdef CONFIG_MAC80211_MESH
@@ -282,7 +280,7 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
282 unsigned long flags; 280 unsigned long flags;
283 int err = 0; 281 int err = 0;
284 282
285 WARN_ON(!mutex_is_locked(&local->sta_mtx)); 283 lockdep_assert_held(&local->sta_mtx);
286 284
287 /* notify driver */ 285 /* notify driver */
288 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 286 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -300,8 +298,9 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
300 sta->uploaded = true; 298 sta->uploaded = true;
301#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 299#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
302 if (async) 300 if (async)
303 printk(KERN_DEBUG "%s: Finished adding IBSS STA %pM\n", 301 wiphy_debug(local->hw.wiphy,
304 wiphy_name(local->hw.wiphy), sta->sta.addr); 302 "Finished adding IBSS STA %pM\n",
303 sta->sta.addr);
305#endif 304#endif
306 } 305 }
307 306
@@ -411,8 +410,8 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
411 spin_unlock_irqrestore(&local->sta_lock, flags); 410 spin_unlock_irqrestore(&local->sta_lock, flags);
412 411
413#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 412#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
414 printk(KERN_DEBUG "%s: Added IBSS STA %pM\n", 413 wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
415 wiphy_name(local->hw.wiphy), sta->sta.addr); 414 sta->sta.addr);
416#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 415#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
417 416
418 ieee80211_queue_work(&local->hw, &local->sta_finish_work); 417 ieee80211_queue_work(&local->hw, &local->sta_finish_work);
@@ -459,8 +458,7 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
459 } 458 }
460 459
461#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 460#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
462 printk(KERN_DEBUG "%s: Inserted STA %pM\n", 461 wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
463 wiphy_name(local->hw.wiphy), sta->sta.addr);
464#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 462#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
465 463
466 /* move reference to rcu-protected */ 464 /* move reference to rcu-protected */
@@ -690,8 +688,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
690#endif 688#endif
691 689
692#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 690#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
693 printk(KERN_DEBUG "%s: Removed STA %pM\n", 691 wiphy_debug(local->hw.wiphy, "Removed STA %pM\n", sta->sta.addr);
694 wiphy_name(local->hw.wiphy), sta->sta.addr);
695#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 692#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
696 cancel_work_sync(&sta->drv_unblock_wk); 693 cancel_work_sync(&sta->drv_unblock_wk);
697 694
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 54262e72376d..810c5ce98316 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -103,6 +103,7 @@ struct tid_ampdu_tx {
103 * @reorder_buf: buffer to reorder incoming aggregated MPDUs 103 * @reorder_buf: buffer to reorder incoming aggregated MPDUs
104 * @reorder_time: jiffies when skb was added 104 * @reorder_time: jiffies when skb was added
105 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) 105 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
106 * @reorder_timer: releases expired frames from the reorder buffer.
106 * @head_seq_num: head sequence number in reordering buffer. 107 * @head_seq_num: head sequence number in reordering buffer.
107 * @stored_mpdu_num: number of MPDUs in reordering buffer 108 * @stored_mpdu_num: number of MPDUs in reordering buffer
108 * @ssn: Starting Sequence Number expected to be aggregated. 109 * @ssn: Starting Sequence Number expected to be aggregated.
@@ -110,20 +111,25 @@ struct tid_ampdu_tx {
110 * @timeout: reset timer value (in TUs). 111 * @timeout: reset timer value (in TUs).
111 * @dialog_token: dialog token for aggregation session 112 * @dialog_token: dialog token for aggregation session
112 * @rcu_head: RCU head used for freeing this struct 113 * @rcu_head: RCU head used for freeing this struct
114 * @reorder_lock: serializes access to reorder buffer, see below.
113 * 115 *
114 * This structure is protected by RCU and the per-station 116 * This structure is protected by RCU and the per-station
115 * spinlock. Assignments to the array holding it must hold 117 * spinlock. Assignments to the array holding it must hold
116 * the spinlock, only the RX path can access it under RCU 118 * the spinlock.
117 * lock-free. The RX path, since it is single-threaded, 119 *
118 * can even modify the structure without locking since the 120 * The @reorder_lock is used to protect the variables and
119 * only other modifications to it are done when the struct 121 * arrays such as @reorder_buf, @reorder_time, @head_seq_num,
120 * can not yet or no longer be found by the RX path. 122 * @stored_mpdu_num and @reorder_time from being corrupted by
123 * concurrent access of the RX path and the expired frame
124 * release timer.
121 */ 125 */
122struct tid_ampdu_rx { 126struct tid_ampdu_rx {
123 struct rcu_head rcu_head; 127 struct rcu_head rcu_head;
128 spinlock_t reorder_lock;
124 struct sk_buff **reorder_buf; 129 struct sk_buff **reorder_buf;
125 unsigned long *reorder_time; 130 unsigned long *reorder_time;
126 struct timer_list session_timer; 131 struct timer_list session_timer;
132 struct timer_list reorder_timer;
127 u16 head_seq_num; 133 u16 head_seq_num;
128 u16 stored_mpdu_num; 134 u16 stored_mpdu_num;
129 u16 ssn; 135 u16 ssn;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 10caec5ea8fa..571b32bfc54c 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -114,11 +114,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
114 114
115#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 115#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
116 if (net_ratelimit()) 116 if (net_ratelimit())
117 printk(KERN_DEBUG "%s: dropped TX filtered frame, " 117 wiphy_debug(local->hw.wiphy,
118 "queue_len=%d PS=%d @%lu\n", 118 "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
119 wiphy_name(local->hw.wiphy), 119 skb_queue_len(&sta->tx_filtered),
120 skb_queue_len(&sta->tx_filtered), 120 !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
121 !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
122#endif 121#endif
123 dev_kfree_skb(skb); 122 dev_kfree_skb(skb);
124} 123}
@@ -296,7 +295,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
296 } 295 }
297 296
298 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) 297 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX)
299 cfg80211_action_tx_status( 298 cfg80211_mgmt_tx_status(
300 skb->dev, (unsigned long) skb, skb->data, skb->len, 299 skb->dev, (unsigned long) skb, skb->data, skb->len,
301 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); 300 !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
302 301
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c54db966926b..e1733dcb58a7 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -351,8 +351,8 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
351 351
352 local->total_ps_buffered = total; 352 local->total_ps_buffered = total;
353#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 353#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
354 printk(KERN_DEBUG "%s: PS buffers full - purged %d frames\n", 354 wiphy_debug(local->hw.wiphy, "PS buffers full - purged %d frames\n",
355 wiphy_name(local->hw.wiphy), purged); 355 purged);
356#endif 356#endif
357} 357}
358 358
@@ -509,6 +509,18 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
509} 509}
510 510
511static ieee80211_tx_result debug_noinline 511static ieee80211_tx_result debug_noinline
512ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
513{
514 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
515
516 if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol &&
517 tx->sdata->control_port_no_encrypt))
518 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
519
520 return TX_CONTINUE;
521}
522
523static ieee80211_tx_result debug_noinline
512ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) 524ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
513{ 525{
514 struct ieee80211_key *key = NULL; 526 struct ieee80211_key *key = NULL;
@@ -527,7 +539,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
527 else if ((key = rcu_dereference(tx->sdata->default_key))) 539 else if ((key = rcu_dereference(tx->sdata->default_key)))
528 tx->key = key; 540 tx->key = key;
529 else if (tx->sdata->drop_unencrypted && 541 else if (tx->sdata->drop_unencrypted &&
530 (tx->skb->protocol != cpu_to_be16(ETH_P_PAE)) && 542 (tx->skb->protocol != tx->sdata->control_port_protocol) &&
531 !(info->flags & IEEE80211_TX_CTL_INJECTED) && 543 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
532 (!ieee80211_is_robust_mgmt_frame(hdr) || 544 (!ieee80211_is_robust_mgmt_frame(hdr) ||
533 (ieee80211_is_action(hdr->frame_control) && 545 (ieee80211_is_action(hdr->frame_control) &&
@@ -543,15 +555,16 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
543 tx->key->tx_rx_count++; 555 tx->key->tx_rx_count++;
544 /* TODO: add threshold stuff again */ 556 /* TODO: add threshold stuff again */
545 557
546 switch (tx->key->conf.alg) { 558 switch (tx->key->conf.cipher) {
547 case ALG_WEP: 559 case WLAN_CIPHER_SUITE_WEP40:
560 case WLAN_CIPHER_SUITE_WEP104:
548 if (ieee80211_is_auth(hdr->frame_control)) 561 if (ieee80211_is_auth(hdr->frame_control))
549 break; 562 break;
550 case ALG_TKIP: 563 case WLAN_CIPHER_SUITE_TKIP:
551 if (!ieee80211_is_data_present(hdr->frame_control)) 564 if (!ieee80211_is_data_present(hdr->frame_control))
552 tx->key = NULL; 565 tx->key = NULL;
553 break; 566 break;
554 case ALG_CCMP: 567 case WLAN_CIPHER_SUITE_CCMP:
555 if (!ieee80211_is_data_present(hdr->frame_control) && 568 if (!ieee80211_is_data_present(hdr->frame_control) &&
556 !ieee80211_use_mfp(hdr->frame_control, tx->sta, 569 !ieee80211_use_mfp(hdr->frame_control, tx->sta,
557 tx->skb)) 570 tx->skb))
@@ -561,7 +574,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
561 IEEE80211_KEY_FLAG_SW_MGMT) && 574 IEEE80211_KEY_FLAG_SW_MGMT) &&
562 ieee80211_is_mgmt(hdr->frame_control); 575 ieee80211_is_mgmt(hdr->frame_control);
563 break; 576 break;
564 case ALG_AES_CMAC: 577 case WLAN_CIPHER_SUITE_AES_CMAC:
565 if (!ieee80211_is_mgmt(hdr->frame_control)) 578 if (!ieee80211_is_mgmt(hdr->frame_control))
566 tx->key = NULL; 579 tx->key = NULL;
567 break; 580 break;
@@ -946,22 +959,31 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
946static ieee80211_tx_result debug_noinline 959static ieee80211_tx_result debug_noinline
947ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) 960ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
948{ 961{
962 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
963
949 if (!tx->key) 964 if (!tx->key)
950 return TX_CONTINUE; 965 return TX_CONTINUE;
951 966
952 switch (tx->key->conf.alg) { 967 switch (tx->key->conf.cipher) {
953 case ALG_WEP: 968 case WLAN_CIPHER_SUITE_WEP40:
969 case WLAN_CIPHER_SUITE_WEP104:
954 return ieee80211_crypto_wep_encrypt(tx); 970 return ieee80211_crypto_wep_encrypt(tx);
955 case ALG_TKIP: 971 case WLAN_CIPHER_SUITE_TKIP:
956 return ieee80211_crypto_tkip_encrypt(tx); 972 return ieee80211_crypto_tkip_encrypt(tx);
957 case ALG_CCMP: 973 case WLAN_CIPHER_SUITE_CCMP:
958 return ieee80211_crypto_ccmp_encrypt(tx); 974 return ieee80211_crypto_ccmp_encrypt(tx);
959 case ALG_AES_CMAC: 975 case WLAN_CIPHER_SUITE_AES_CMAC:
960 return ieee80211_crypto_aes_cmac_encrypt(tx); 976 return ieee80211_crypto_aes_cmac_encrypt(tx);
977 default:
978 /* handle hw-only algorithm */
979 if (info->control.hw_key) {
980 ieee80211_tx_set_protected(tx);
981 return TX_CONTINUE;
982 }
983 break;
984
961 } 985 }
962 986
963 /* not reached */
964 WARN_ON(1);
965 return TX_DROP; 987 return TX_DROP;
966} 988}
967 989
@@ -1339,6 +1361,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1339 CALL_TXH(ieee80211_tx_h_dynamic_ps); 1361 CALL_TXH(ieee80211_tx_h_dynamic_ps);
1340 CALL_TXH(ieee80211_tx_h_check_assoc); 1362 CALL_TXH(ieee80211_tx_h_check_assoc);
1341 CALL_TXH(ieee80211_tx_h_ps_buf); 1363 CALL_TXH(ieee80211_tx_h_ps_buf);
1364 CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
1342 CALL_TXH(ieee80211_tx_h_select_key); 1365 CALL_TXH(ieee80211_tx_h_select_key);
1343 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) 1366 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1344 CALL_TXH(ieee80211_tx_h_rate_ctrl); 1367 CALL_TXH(ieee80211_tx_h_rate_ctrl);
@@ -1511,8 +1534,8 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1511 I802_DEBUG_INC(local->tx_expand_skb_head); 1534 I802_DEBUG_INC(local->tx_expand_skb_head);
1512 1535
1513 if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) { 1536 if (pskb_expand_head(skb, head_need, tail_need, GFP_ATOMIC)) {
1514 printk(KERN_DEBUG "%s: failed to reallocate TX buffer\n", 1537 wiphy_debug(local->hw.wiphy,
1515 wiphy_name(local->hw.wiphy)); 1538 "failed to reallocate TX buffer\n");
1516 return -ENOMEM; 1539 return -ENOMEM;
1517 } 1540 }
1518 1541
@@ -1586,6 +1609,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1586 return; 1609 return;
1587 } 1610 }
1588 1611
1612 hdr = (struct ieee80211_hdr *) skb->data;
1589 info->control.vif = &sdata->vif; 1613 info->control.vif = &sdata->vif;
1590 1614
1591 if (ieee80211_vif_is_mesh(&sdata->vif) && 1615 if (ieee80211_vif_is_mesh(&sdata->vif) &&
@@ -1699,7 +1723,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1699 u16 ethertype, hdrlen, meshhdrlen = 0; 1723 u16 ethertype, hdrlen, meshhdrlen = 0;
1700 __le16 fc; 1724 __le16 fc;
1701 struct ieee80211_hdr hdr; 1725 struct ieee80211_hdr hdr;
1702 struct ieee80211s_hdr mesh_hdr; 1726 struct ieee80211s_hdr mesh_hdr __maybe_unused;
1703 const u8 *encaps_data; 1727 const u8 *encaps_data;
1704 int encaps_len, skip_header_bytes; 1728 int encaps_len, skip_header_bytes;
1705 int nh_pos, h_pos; 1729 int nh_pos, h_pos;
@@ -1816,7 +1840,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1816#endif 1840#endif
1817 case NL80211_IFTYPE_STATION: 1841 case NL80211_IFTYPE_STATION:
1818 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN); 1842 memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
1819 if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) { 1843 if (sdata->u.mgd.use_4addr &&
1844 cpu_to_be16(ethertype) != sdata->control_port_protocol) {
1820 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1845 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1821 /* RA TA DA SA */ 1846 /* RA TA DA SA */
1822 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN); 1847 memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
@@ -1869,7 +1894,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1869 if (!ieee80211_vif_is_mesh(&sdata->vif) && 1894 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
1870 unlikely(!is_multicast_ether_addr(hdr.addr1) && 1895 unlikely(!is_multicast_ether_addr(hdr.addr1) &&
1871 !(sta_flags & WLAN_STA_AUTHORIZED) && 1896 !(sta_flags & WLAN_STA_AUTHORIZED) &&
1872 !(ethertype == ETH_P_PAE && 1897 !(cpu_to_be16(ethertype) == sdata->control_port_protocol &&
1873 compare_ether_addr(sdata->vif.addr, 1898 compare_ether_addr(sdata->vif.addr,
1874 skb->data + ETH_ALEN) == 0))) { 1899 skb->data + ETH_ALEN) == 0))) {
1875#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1900#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -2068,8 +2093,7 @@ void ieee80211_tx_pending(unsigned long data)
2068 2093
2069 if (skb_queue_empty(&local->pending[i])) 2094 if (skb_queue_empty(&local->pending[i]))
2070 list_for_each_entry_rcu(sdata, &local->interfaces, list) 2095 list_for_each_entry_rcu(sdata, &local->interfaces, list)
2071 netif_tx_wake_queue( 2096 netif_wake_subqueue(sdata->dev, i);
2072 netdev_get_tx_queue(sdata->dev, i));
2073 } 2097 }
2074 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 2098 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
2075 2099
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 748387d45bc0..737f4267c335 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -283,8 +283,11 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
283 283
284 if (skb_queue_empty(&local->pending[queue])) { 284 if (skb_queue_empty(&local->pending[queue])) {
285 rcu_read_lock(); 285 rcu_read_lock();
286 list_for_each_entry_rcu(sdata, &local->interfaces, list) 286 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
287 netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); 287 if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
288 continue;
289 netif_wake_subqueue(sdata->dev, queue);
290 }
288 rcu_read_unlock(); 291 rcu_read_unlock();
289 } else 292 } else
290 tasklet_schedule(&local->tx_pending_tasklet); 293 tasklet_schedule(&local->tx_pending_tasklet);
@@ -323,7 +326,7 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
323 326
324 rcu_read_lock(); 327 rcu_read_lock();
325 list_for_each_entry_rcu(sdata, &local->interfaces, list) 328 list_for_each_entry_rcu(sdata, &local->interfaces, list)
326 netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue)); 329 netif_stop_subqueue(sdata->dev, queue);
327 rcu_read_unlock(); 330 rcu_read_unlock();
328} 331}
329 332
@@ -471,16 +474,10 @@ void ieee80211_iterate_active_interfaces(
471 474
472 list_for_each_entry(sdata, &local->interfaces, list) { 475 list_for_each_entry(sdata, &local->interfaces, list) {
473 switch (sdata->vif.type) { 476 switch (sdata->vif.type) {
474 case __NL80211_IFTYPE_AFTER_LAST:
475 case NL80211_IFTYPE_UNSPECIFIED:
476 case NL80211_IFTYPE_MONITOR: 477 case NL80211_IFTYPE_MONITOR:
477 case NL80211_IFTYPE_AP_VLAN: 478 case NL80211_IFTYPE_AP_VLAN:
478 continue; 479 continue;
479 case NL80211_IFTYPE_AP: 480 default:
480 case NL80211_IFTYPE_STATION:
481 case NL80211_IFTYPE_ADHOC:
482 case NL80211_IFTYPE_WDS:
483 case NL80211_IFTYPE_MESH_POINT:
484 break; 481 break;
485 } 482 }
486 if (ieee80211_sdata_running(sdata)) 483 if (ieee80211_sdata_running(sdata))
@@ -505,16 +502,10 @@ void ieee80211_iterate_active_interfaces_atomic(
505 502
506 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 503 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
507 switch (sdata->vif.type) { 504 switch (sdata->vif.type) {
508 case __NL80211_IFTYPE_AFTER_LAST:
509 case NL80211_IFTYPE_UNSPECIFIED:
510 case NL80211_IFTYPE_MONITOR: 505 case NL80211_IFTYPE_MONITOR:
511 case NL80211_IFTYPE_AP_VLAN: 506 case NL80211_IFTYPE_AP_VLAN:
512 continue; 507 continue;
513 case NL80211_IFTYPE_AP: 508 default:
514 case NL80211_IFTYPE_STATION:
515 case NL80211_IFTYPE_ADHOC:
516 case NL80211_IFTYPE_WDS:
517 case NL80211_IFTYPE_MESH_POINT:
518 break; 509 break;
519 } 510 }
520 if (ieee80211_sdata_running(sdata)) 511 if (ieee80211_sdata_running(sdata))
@@ -1189,7 +1180,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1189 /* ignore virtual */ 1180 /* ignore virtual */
1190 break; 1181 break;
1191 case NL80211_IFTYPE_UNSPECIFIED: 1182 case NL80211_IFTYPE_UNSPECIFIED:
1192 case __NL80211_IFTYPE_AFTER_LAST: 1183 case NUM_NL80211_IFTYPES:
1184 case NL80211_IFTYPE_P2P_CLIENT:
1185 case NL80211_IFTYPE_P2P_GO:
1193 WARN_ON(1); 1186 WARN_ON(1);
1194 break; 1187 break;
1195 } 1188 }
@@ -1293,9 +1286,9 @@ void ieee80211_recalc_smps(struct ieee80211_local *local,
1293 int count = 0; 1286 int count = 0;
1294 1287
1295 if (forsdata) 1288 if (forsdata)
1296 WARN_ON(!mutex_is_locked(&forsdata->u.mgd.mtx)); 1289 lockdep_assert_held(&forsdata->u.mgd.mtx);
1297 1290
1298 WARN_ON(!mutex_is_locked(&local->iflist_mtx)); 1291 lockdep_assert_held(&local->iflist_mtx);
1299 1292
1300 /* 1293 /*
1301 * This function could be improved to handle multiple 1294 * This function could be improved to handle multiple
@@ -1308,7 +1301,7 @@ void ieee80211_recalc_smps(struct ieee80211_local *local,
1308 */ 1301 */
1309 1302
1310 list_for_each_entry(sdata, &local->interfaces, list) { 1303 list_for_each_entry(sdata, &local->interfaces, list) {
1311 if (!netif_running(sdata->dev)) 1304 if (!ieee80211_sdata_running(sdata))
1312 continue; 1305 continue;
1313 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1306 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1314 goto set; 1307 goto set;
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 9ebc8d8a1f5b..f27484c22b9f 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -240,7 +240,7 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local,
240 240
241 keyidx = skb->data[hdrlen + 3] >> 6; 241 keyidx = skb->data[hdrlen + 3] >> 6;
242 242
243 if (!key || keyidx != key->conf.keyidx || key->conf.alg != ALG_WEP) 243 if (!key || keyidx != key->conf.keyidx)
244 return -1; 244 return -1;
245 245
246 klen = 3 + key->conf.keylen; 246 klen = 3 + key->conf.keylen;
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 81d4ad64184a..ae344d1ba056 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -43,7 +43,7 @@ enum work_action {
43/* utils */ 43/* utils */
44static inline void ASSERT_WORK_MTX(struct ieee80211_local *local) 44static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
45{ 45{
46 WARN_ON(!mutex_is_locked(&local->work_mtx)); 46 lockdep_assert_held(&local->mtx);
47} 47}
48 48
49/* 49/*
@@ -757,7 +757,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
757 mgmt = (struct ieee80211_mgmt *) skb->data; 757 mgmt = (struct ieee80211_mgmt *) skb->data;
758 fc = le16_to_cpu(mgmt->frame_control); 758 fc = le16_to_cpu(mgmt->frame_control);
759 759
760 mutex_lock(&local->work_mtx); 760 mutex_lock(&local->mtx);
761 761
762 list_for_each_entry(wk, &local->work_list, list) { 762 list_for_each_entry(wk, &local->work_list, list) {
763 const u8 *bssid = NULL; 763 const u8 *bssid = NULL;
@@ -833,7 +833,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
833 WARN(1, "unexpected: %d", rma); 833 WARN(1, "unexpected: %d", rma);
834 } 834 }
835 835
836 mutex_unlock(&local->work_mtx); 836 mutex_unlock(&local->mtx);
837 837
838 if (rma != WORK_ACT_DONE) 838 if (rma != WORK_ACT_DONE)
839 goto out; 839 goto out;
@@ -845,9 +845,9 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
845 case WORK_DONE_REQUEUE: 845 case WORK_DONE_REQUEUE:
846 synchronize_rcu(); 846 synchronize_rcu();
847 wk->started = false; /* restart */ 847 wk->started = false; /* restart */
848 mutex_lock(&local->work_mtx); 848 mutex_lock(&local->mtx);
849 list_add_tail(&wk->list, &local->work_list); 849 list_add_tail(&wk->list, &local->work_list);
850 mutex_unlock(&local->work_mtx); 850 mutex_unlock(&local->mtx);
851 } 851 }
852 852
853 out: 853 out:
@@ -888,9 +888,9 @@ static void ieee80211_work_work(struct work_struct *work)
888 while ((skb = skb_dequeue(&local->work_skb_queue))) 888 while ((skb = skb_dequeue(&local->work_skb_queue)))
889 ieee80211_work_rx_queued_mgmt(local, skb); 889 ieee80211_work_rx_queued_mgmt(local, skb);
890 890
891 ieee80211_recalc_idle(local); 891 mutex_lock(&local->mtx);
892 892
893 mutex_lock(&local->work_mtx); 893 ieee80211_recalc_idle(local);
894 894
895 list_for_each_entry_safe(wk, tmp, &local->work_list, list) { 895 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
896 bool started = wk->started; 896 bool started = wk->started;
@@ -995,20 +995,16 @@ static void ieee80211_work_work(struct work_struct *work)
995 run_again(local, jiffies + HZ/2); 995 run_again(local, jiffies + HZ/2);
996 } 996 }
997 997
998 mutex_lock(&local->scan_mtx);
999
1000 if (list_empty(&local->work_list) && local->scan_req && 998 if (list_empty(&local->work_list) && local->scan_req &&
1001 !local->scanning) 999 !local->scanning)
1002 ieee80211_queue_delayed_work(&local->hw, 1000 ieee80211_queue_delayed_work(&local->hw,
1003 &local->scan_work, 1001 &local->scan_work,
1004 round_jiffies_relative(0)); 1002 round_jiffies_relative(0));
1005 1003
1006 mutex_unlock(&local->scan_mtx);
1007
1008 mutex_unlock(&local->work_mtx);
1009
1010 ieee80211_recalc_idle(local); 1004 ieee80211_recalc_idle(local);
1011 1005
1006 mutex_unlock(&local->mtx);
1007
1012 list_for_each_entry_safe(wk, tmp, &free_work, list) { 1008 list_for_each_entry_safe(wk, tmp, &free_work, list) {
1013 wk->done(wk, NULL); 1009 wk->done(wk, NULL);
1014 list_del(&wk->list); 1010 list_del(&wk->list);
@@ -1035,16 +1031,15 @@ void ieee80211_add_work(struct ieee80211_work *wk)
1035 wk->started = false; 1031 wk->started = false;
1036 1032
1037 local = wk->sdata->local; 1033 local = wk->sdata->local;
1038 mutex_lock(&local->work_mtx); 1034 mutex_lock(&local->mtx);
1039 list_add_tail(&wk->list, &local->work_list); 1035 list_add_tail(&wk->list, &local->work_list);
1040 mutex_unlock(&local->work_mtx); 1036 mutex_unlock(&local->mtx);
1041 1037
1042 ieee80211_queue_work(&local->hw, &local->work_work); 1038 ieee80211_queue_work(&local->hw, &local->work_work);
1043} 1039}
1044 1040
1045void ieee80211_work_init(struct ieee80211_local *local) 1041void ieee80211_work_init(struct ieee80211_local *local)
1046{ 1042{
1047 mutex_init(&local->work_mtx);
1048 INIT_LIST_HEAD(&local->work_list); 1043 INIT_LIST_HEAD(&local->work_list);
1049 setup_timer(&local->work_timer, ieee80211_work_timer, 1044 setup_timer(&local->work_timer, ieee80211_work_timer,
1050 (unsigned long)local); 1045 (unsigned long)local);
@@ -1057,7 +1052,7 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
1057 struct ieee80211_local *local = sdata->local; 1052 struct ieee80211_local *local = sdata->local;
1058 struct ieee80211_work *wk; 1053 struct ieee80211_work *wk;
1059 1054
1060 mutex_lock(&local->work_mtx); 1055 mutex_lock(&local->mtx);
1061 list_for_each_entry(wk, &local->work_list, list) { 1056 list_for_each_entry(wk, &local->work_list, list) {
1062 if (wk->sdata != sdata) 1057 if (wk->sdata != sdata)
1063 continue; 1058 continue;
@@ -1065,19 +1060,19 @@ void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
1065 wk->started = true; 1060 wk->started = true;
1066 wk->timeout = jiffies; 1061 wk->timeout = jiffies;
1067 } 1062 }
1068 mutex_unlock(&local->work_mtx); 1063 mutex_unlock(&local->mtx);
1069 1064
1070 /* run cleanups etc. */ 1065 /* run cleanups etc. */
1071 ieee80211_work_work(&local->work_work); 1066 ieee80211_work_work(&local->work_work);
1072 1067
1073 mutex_lock(&local->work_mtx); 1068 mutex_lock(&local->mtx);
1074 list_for_each_entry(wk, &local->work_list, list) { 1069 list_for_each_entry(wk, &local->work_list, list) {
1075 if (wk->sdata != sdata) 1070 if (wk->sdata != sdata)
1076 continue; 1071 continue;
1077 WARN_ON(1); 1072 WARN_ON(1);
1078 break; 1073 break;
1079 } 1074 }
1080 mutex_unlock(&local->work_mtx); 1075 mutex_unlock(&local->mtx);
1081} 1076}
1082 1077
1083ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata, 1078ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -1163,7 +1158,7 @@ int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1163 struct ieee80211_work *wk, *tmp; 1158 struct ieee80211_work *wk, *tmp;
1164 bool found = false; 1159 bool found = false;
1165 1160
1166 mutex_lock(&local->work_mtx); 1161 mutex_lock(&local->mtx);
1167 list_for_each_entry_safe(wk, tmp, &local->work_list, list) { 1162 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
1168 if ((unsigned long) wk == cookie) { 1163 if ((unsigned long) wk == cookie) {
1169 wk->timeout = jiffies; 1164 wk->timeout = jiffies;
@@ -1171,7 +1166,7 @@ int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
1171 break; 1166 break;
1172 } 1167 }
1173 } 1168 }
1174 mutex_unlock(&local->work_mtx); 1169 mutex_unlock(&local->mtx);
1175 1170
1176 if (!found) 1171 if (!found)
1177 return -ENOENT; 1172 return -ENOENT;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 8d59d27d887e..43882b36da55 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -36,8 +36,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
36 int tail; 36 int tail;
37 37
38 hdr = (struct ieee80211_hdr *)skb->data; 38 hdr = (struct ieee80211_hdr *)skb->data;
39 if (!tx->key || tx->key->conf.alg != ALG_TKIP || skb->len < 24 || 39 if (!tx->key || tx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
40 !ieee80211_is_data_present(hdr->frame_control)) 40 skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control))
41 return TX_CONTINUE; 41 return TX_CONTINUE;
42 42
43 hdrlen = ieee80211_hdrlen(hdr->frame_control); 43 hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -94,7 +94,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
94 if (status->flag & RX_FLAG_MMIC_STRIPPED) 94 if (status->flag & RX_FLAG_MMIC_STRIPPED)
95 return RX_CONTINUE; 95 return RX_CONTINUE;
96 96
97 if (!rx->key || rx->key->conf.alg != ALG_TKIP || 97 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
98 !ieee80211_has_protected(hdr->frame_control) || 98 !ieee80211_has_protected(hdr->frame_control) ||
99 !ieee80211_is_data_present(hdr->frame_control)) 99 !ieee80211_is_data_present(hdr->frame_control))
100 return RX_CONTINUE; 100 return RX_CONTINUE;
@@ -221,19 +221,13 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
221 if (!rx->sta || skb->len - hdrlen < 12) 221 if (!rx->sta || skb->len - hdrlen < 12)
222 return RX_DROP_UNUSABLE; 222 return RX_DROP_UNUSABLE;
223 223
224 if (status->flag & RX_FLAG_DECRYPTED) { 224 /*
225 if (status->flag & RX_FLAG_IV_STRIPPED) { 225 * Let TKIP code verify IV, but skip decryption.
226 /* 226 * In the case where hardware checks the IV as well,
227 * Hardware took care of all processing, including 227 * we don't even get here, see ieee80211_rx_h_decrypt()
228 * replay protection, and stripped the ICV/IV so 228 */
229 * we cannot do any checks here. 229 if (status->flag & RX_FLAG_DECRYPTED)
230 */
231 return RX_CONTINUE;
232 }
233
234 /* let TKIP code verify IV, but skip decryption */
235 hwaccel = 1; 230 hwaccel = 1;
236 }
237 231
238 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, 232 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
239 key, skb->data + hdrlen, 233 key, skb->data + hdrlen,
@@ -447,10 +441,6 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
447 if (!rx->sta || data_len < 0) 441 if (!rx->sta || data_len < 0)
448 return RX_DROP_UNUSABLE; 442 return RX_DROP_UNUSABLE;
449 443
450 if ((status->flag & RX_FLAG_DECRYPTED) &&
451 (status->flag & RX_FLAG_IV_STRIPPED))
452 return RX_CONTINUE;
453
454 ccmp_hdr2pn(pn, skb->data + hdrlen); 444 ccmp_hdr2pn(pn, skb->data + hdrlen);
455 445
456 queue = ieee80211_is_mgmt(hdr->frame_control) ? 446 queue = ieee80211_is_mgmt(hdr->frame_control) ?
@@ -564,10 +554,6 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
564 if (!ieee80211_is_mgmt(hdr->frame_control)) 554 if (!ieee80211_is_mgmt(hdr->frame_control))
565 return RX_CONTINUE; 555 return RX_CONTINUE;
566 556
567 if ((status->flag & RX_FLAG_DECRYPTED) &&
568 (status->flag & RX_FLAG_IV_STRIPPED))
569 return RX_CONTINUE;
570
571 if (skb->len < 24 + sizeof(*mmie)) 557 if (skb->len < 24 + sizeof(*mmie))
572 return RX_DROP_UNUSABLE; 558 return RX_DROP_UNUSABLE;
573 559
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4c2f89df5cce..0c043b6ce65e 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -40,6 +40,7 @@
40#include <net/udp.h> 40#include <net/udp.h>
41#include <net/icmp.h> /* for icmp_send */ 41#include <net/icmp.h> /* for icmp_send */
42#include <net/route.h> 42#include <net/route.h>
43#include <net/ip6_checksum.h>
43 44
44#include <linux/netfilter.h> 45#include <linux/netfilter.h>
45#include <linux/netfilter_ipv4.h> 46#include <linux/netfilter_ipv4.h>
@@ -637,10 +638,12 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
637 } 638 }
638 639
639 /* And finally the ICMP checksum */ 640 /* And finally the ICMP checksum */
640 icmph->icmp6_cksum = 0; 641 icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
641 /* TODO IPv6: is this correct for ICMPv6? */ 642 skb->len - icmp_offset,
642 ip_vs_checksum_complete(skb, icmp_offset); 643 IPPROTO_ICMPV6, 0);
643 skb->ip_summed = CHECKSUM_UNNECESSARY; 644 skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
645 skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
646 skb->ip_summed = CHECKSUM_PARTIAL;
644 647
645 if (inout) 648 if (inout)
646 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph, 649 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
@@ -1381,8 +1384,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1381 if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) && 1384 if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1382 cp->protocol == IPPROTO_SCTP) { 1385 cp->protocol == IPPROTO_SCTP) {
1383 if ((cp->state == IP_VS_SCTP_S_ESTABLISHED && 1386 if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
1384 (atomic_read(&cp->in_pkts) % 1387 (pkts % sysctl_ip_vs_sync_threshold[1]
1385 sysctl_ip_vs_sync_threshold[1]
1386 == sysctl_ip_vs_sync_threshold[0])) || 1388 == sysctl_ip_vs_sync_threshold[0])) ||
1387 (cp->old_state != cp->state && 1389 (cp->old_state != cp->state &&
1388 ((cp->state == IP_VS_SCTP_S_CLOSED) || 1390 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
@@ -1393,7 +1395,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
1393 } 1395 }
1394 } 1396 }
1395 1397
1396 if (af == AF_INET && 1398 /* Keep this block last: TCP and others with pp->num_states <= 1 */
1399 else if (af == AF_INET &&
1397 (ip_vs_sync_state & IP_VS_STATE_MASTER) && 1400 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
1398 (((cp->protocol != IPPROTO_TCP || 1401 (((cp->protocol != IPPROTO_TCP ||
1399 cp->state == IP_VS_TCP_S_ESTABLISHED) && 1402 cp->state == IP_VS_TCP_S_ESTABLISHED) &&
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 0f0c079c422a..ca8ec8c4f311 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -61,7 +61,7 @@ static DEFINE_RWLOCK(__ip_vs_svc_lock);
61static DEFINE_RWLOCK(__ip_vs_rs_lock); 61static DEFINE_RWLOCK(__ip_vs_rs_lock);
62 62
63/* lock for state and timeout tables */ 63/* lock for state and timeout tables */
64static DEFINE_RWLOCK(__ip_vs_securetcp_lock); 64static DEFINE_SPINLOCK(ip_vs_securetcp_lock);
65 65
66/* lock for drop entry handling */ 66/* lock for drop entry handling */
67static DEFINE_SPINLOCK(__ip_vs_dropentry_lock); 67static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
@@ -204,7 +204,7 @@ static void update_defense_level(void)
204 spin_unlock(&__ip_vs_droppacket_lock); 204 spin_unlock(&__ip_vs_droppacket_lock);
205 205
206 /* secure_tcp */ 206 /* secure_tcp */
207 write_lock(&__ip_vs_securetcp_lock); 207 spin_lock(&ip_vs_securetcp_lock);
208 switch (sysctl_ip_vs_secure_tcp) { 208 switch (sysctl_ip_vs_secure_tcp) {
209 case 0: 209 case 0:
210 if (old_secure_tcp >= 2) 210 if (old_secure_tcp >= 2)
@@ -238,7 +238,7 @@ static void update_defense_level(void)
238 old_secure_tcp = sysctl_ip_vs_secure_tcp; 238 old_secure_tcp = sysctl_ip_vs_secure_tcp;
239 if (to_change >= 0) 239 if (to_change >= 0)
240 ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1); 240 ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
241 write_unlock(&__ip_vs_securetcp_lock); 241 spin_unlock(&ip_vs_securetcp_lock);
242 242
243 local_bh_enable(); 243 local_bh_enable();
244} 244}
@@ -843,7 +843,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
843 return -EINVAL; 843 return -EINVAL;
844 } 844 }
845 845
846 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); 846 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL);
847 if (dest == NULL) { 847 if (dest == NULL) {
848 pr_err("%s(): no memory.\n", __func__); 848 pr_err("%s(): no memory.\n", __func__);
849 return -ENOMEM; 849 return -ENOMEM;
@@ -1177,7 +1177,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
1177 } 1177 }
1178#endif 1178#endif
1179 1179
1180 svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); 1180 svc = kzalloc(sizeof(struct ip_vs_service), GFP_KERNEL);
1181 if (svc == NULL) { 1181 if (svc == NULL) {
1182 IP_VS_DBG(1, "%s(): no memory\n", __func__); 1182 IP_VS_DBG(1, "%s(): no memory\n", __func__);
1183 ret = -ENOMEM; 1183 ret = -ENOMEM;
@@ -2155,7 +2155,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2155 if (cmd != IP_VS_SO_SET_ADD 2155 if (cmd != IP_VS_SO_SET_ADD
2156 && (svc == NULL || svc->protocol != usvc.protocol)) { 2156 && (svc == NULL || svc->protocol != usvc.protocol)) {
2157 ret = -ESRCH; 2157 ret = -ESRCH;
2158 goto out_unlock; 2158 goto out_drop_service;
2159 } 2159 }
2160 2160
2161 switch (cmd) { 2161 switch (cmd) {
@@ -2189,6 +2189,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2189 ret = -EINVAL; 2189 ret = -EINVAL;
2190 } 2190 }
2191 2191
2192out_drop_service:
2192 if (svc) 2193 if (svc)
2193 ip_vs_service_put(svc); 2194 ip_vs_service_put(svc);
2194 2195
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index bbc1ac795952..727e45b66953 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -35,7 +35,7 @@
35static LIST_HEAD(ip_vs_schedulers); 35static LIST_HEAD(ip_vs_schedulers);
36 36
37/* lock for service table */ 37/* lock for service table */
38static DEFINE_RWLOCK(__ip_vs_sched_lock); 38static DEFINE_SPINLOCK(ip_vs_sched_lock);
39 39
40 40
41/* 41/*
@@ -108,7 +108,7 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
108 108
109 IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name); 109 IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name);
110 110
111 read_lock_bh(&__ip_vs_sched_lock); 111 spin_lock_bh(&ip_vs_sched_lock);
112 112
113 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { 113 list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
114 /* 114 /*
@@ -122,14 +122,14 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
122 } 122 }
123 if (strcmp(sched_name, sched->name)==0) { 123 if (strcmp(sched_name, sched->name)==0) {
124 /* HIT */ 124 /* HIT */
125 read_unlock_bh(&__ip_vs_sched_lock); 125 spin_unlock_bh(&ip_vs_sched_lock);
126 return sched; 126 return sched;
127 } 127 }
128 if (sched->module) 128 if (sched->module)
129 module_put(sched->module); 129 module_put(sched->module);
130 } 130 }
131 131
132 read_unlock_bh(&__ip_vs_sched_lock); 132 spin_unlock_bh(&ip_vs_sched_lock);
133 return NULL; 133 return NULL;
134} 134}
135 135
@@ -184,10 +184,10 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
184 /* increase the module use count */ 184 /* increase the module use count */
185 ip_vs_use_count_inc(); 185 ip_vs_use_count_inc();
186 186
187 write_lock_bh(&__ip_vs_sched_lock); 187 spin_lock_bh(&ip_vs_sched_lock);
188 188
189 if (!list_empty(&scheduler->n_list)) { 189 if (!list_empty(&scheduler->n_list)) {
190 write_unlock_bh(&__ip_vs_sched_lock); 190 spin_unlock_bh(&ip_vs_sched_lock);
191 ip_vs_use_count_dec(); 191 ip_vs_use_count_dec();
192 pr_err("%s(): [%s] scheduler already linked\n", 192 pr_err("%s(): [%s] scheduler already linked\n",
193 __func__, scheduler->name); 193 __func__, scheduler->name);
@@ -200,7 +200,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
200 */ 200 */
201 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { 201 list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
202 if (strcmp(scheduler->name, sched->name) == 0) { 202 if (strcmp(scheduler->name, sched->name) == 0) {
203 write_unlock_bh(&__ip_vs_sched_lock); 203 spin_unlock_bh(&ip_vs_sched_lock);
204 ip_vs_use_count_dec(); 204 ip_vs_use_count_dec();
205 pr_err("%s(): [%s] scheduler already existed " 205 pr_err("%s(): [%s] scheduler already existed "
206 "in the system\n", __func__, scheduler->name); 206 "in the system\n", __func__, scheduler->name);
@@ -211,7 +211,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
211 * Add it into the d-linked scheduler list 211 * Add it into the d-linked scheduler list
212 */ 212 */
213 list_add(&scheduler->n_list, &ip_vs_schedulers); 213 list_add(&scheduler->n_list, &ip_vs_schedulers);
214 write_unlock_bh(&__ip_vs_sched_lock); 214 spin_unlock_bh(&ip_vs_sched_lock);
215 215
216 pr_info("[%s] scheduler registered.\n", scheduler->name); 216 pr_info("[%s] scheduler registered.\n", scheduler->name);
217 217
@@ -229,9 +229,9 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
229 return -EINVAL; 229 return -EINVAL;
230 } 230 }
231 231
232 write_lock_bh(&__ip_vs_sched_lock); 232 spin_lock_bh(&ip_vs_sched_lock);
233 if (list_empty(&scheduler->n_list)) { 233 if (list_empty(&scheduler->n_list)) {
234 write_unlock_bh(&__ip_vs_sched_lock); 234 spin_unlock_bh(&ip_vs_sched_lock);
235 pr_err("%s(): [%s] scheduler is not in the list. failed\n", 235 pr_err("%s(): [%s] scheduler is not in the list. failed\n",
236 __func__, scheduler->name); 236 __func__, scheduler->name);
237 return -EINVAL; 237 return -EINVAL;
@@ -241,7 +241,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
241 * Remove it from the d-linked scheduler list 241 * Remove it from the d-linked scheduler list
242 */ 242 */
243 list_del(&scheduler->n_list); 243 list_del(&scheduler->n_list);
244 write_unlock_bh(&__ip_vs_sched_lock); 244 spin_unlock_bh(&ip_vs_sched_lock);
245 245
246 /* decrease the module use count */ 246 /* decrease the module use count */
247 ip_vs_use_count_dec(); 247 ip_vs_use_count_dec();
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index b46a8390896d..9228ee0dc11a 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -448,6 +448,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
448{ 448{
449 __be16 _ports[2], *ports; 449 __be16 _ports[2], *ports;
450 u8 nexthdr; 450 u8 nexthdr;
451 int poff;
451 452
452 memset(dst, 0, sizeof(*dst)); 453 memset(dst, 0, sizeof(*dst));
453 454
@@ -492,19 +493,13 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
492 return 0; 493 return 0;
493 } 494 }
494 495
495 switch (nexthdr) { 496 poff = proto_ports_offset(nexthdr);
496 case IPPROTO_TCP: 497 if (poff >= 0) {
497 case IPPROTO_UDP: 498 ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports),
498 case IPPROTO_UDPLITE:
499 case IPPROTO_SCTP:
500 case IPPROTO_DCCP:
501 ports = skb_header_pointer(skb, protoff, sizeof(_ports),
502 &_ports); 499 &_ports);
503 break; 500 } else {
504 default:
505 _ports[0] = _ports[1] = 0; 501 _ports[0] = _ports[1] = 0;
506 ports = _ports; 502 ports = _ports;
507 break;
508 } 503 }
509 if (!ports) 504 if (!ports)
510 return -1; 505 return -1;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9a17f28b1253..3616f27b9d46 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -488,7 +488,7 @@ retry:
488 skb->dev = dev; 488 skb->dev = dev;
489 skb->priority = sk->sk_priority; 489 skb->priority = sk->sk_priority;
490 skb->mark = sk->sk_mark; 490 skb->mark = sk->sk_mark;
491 err = sock_tx_timestamp(msg, sk, skb_tx(skb)); 491 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
492 if (err < 0) 492 if (err < 0)
493 goto out_unlock; 493 goto out_unlock;
494 494
@@ -1209,7 +1209,7 @@ static int packet_snd(struct socket *sock,
1209 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); 1209 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1210 if (err) 1210 if (err)
1211 goto out_free; 1211 goto out_free;
1212 err = sock_tx_timestamp(msg, sk, skb_tx(skb)); 1212 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1213 if (err < 0) 1213 if (err < 0)
1214 goto out_free; 1214 goto out_free;
1215 1215
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 73aee7f2fcdc..fd95beb72f5d 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -251,6 +251,16 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
251 else if (phonet_address_lookup(net, daddr) == 0) { 251 else if (phonet_address_lookup(net, daddr) == 0) {
252 dev = phonet_device_get(net); 252 dev = phonet_device_get(net);
253 skb->pkt_type = PACKET_LOOPBACK; 253 skb->pkt_type = PACKET_LOOPBACK;
254 } else if (pn_sockaddr_get_object(target) == 0) {
255 /* Resource routing (small race until phonet_rcv()) */
256 struct sock *sk = pn_find_sock_by_res(net,
257 target->spn_resource);
258 if (sk) {
259 sock_put(sk);
260 dev = phonet_device_get(net);
261 skb->pkt_type = PACKET_LOOPBACK;
262 } else
263 dev = phonet_route_output(net, daddr);
254 } else 264 } else
255 dev = phonet_route_output(net, daddr); 265 dev = phonet_route_output(net, daddr);
256 266
@@ -383,6 +393,13 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
383 goto out; 393 goto out;
384 } 394 }
385 395
396 /* resource routing */
397 if (pn_sockaddr_get_object(&sa) == 0) {
398 struct sock *sk = pn_find_sock_by_res(net, sa.spn_resource);
399 if (sk)
400 return sk_receive_skb(sk, skb, 0);
401 }
402
386 /* check if we are the destination */ 403 /* check if we are the destination */
387 if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) { 404 if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) {
388 /* Phonet packet input */ 405 /* Phonet packet input */
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 1bd38db4fe1e..2f032381bd45 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -52,6 +52,19 @@ static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
52 answ = skb ? skb->len : 0; 52 answ = skb ? skb->len : 0;
53 release_sock(sk); 53 release_sock(sk);
54 return put_user(answ, (int __user *)arg); 54 return put_user(answ, (int __user *)arg);
55
56 case SIOCPNADDRESOURCE:
57 case SIOCPNDELRESOURCE: {
58 u32 res;
59 if (get_user(res, (u32 __user *)arg))
60 return -EFAULT;
61 if (res >= 256)
62 return -EINVAL;
63 if (cmd == SIOCPNADDRESOURCE)
64 return pn_sock_bind_res(sk, res);
65 else
66 return pn_sock_unbind_res(sk, res);
67 }
55 } 68 }
56 69
57 return -ENOIOCTLCMD; 70 return -ENOIOCTLCMD;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b2a3ae6cad78..d0e7eb24c8b9 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -620,6 +620,28 @@ drop:
620 return err; 620 return err;
621} 621}
622 622
623static int pipe_do_remove(struct sock *sk)
624{
625 struct pep_sock *pn = pep_sk(sk);
626 struct pnpipehdr *ph;
627 struct sk_buff *skb;
628
629 skb = alloc_skb(MAX_PNPIPE_HEADER, GFP_KERNEL);
630 if (!skb)
631 return -ENOMEM;
632
633 skb_reserve(skb, MAX_PNPIPE_HEADER);
634 __skb_push(skb, sizeof(*ph));
635 skb_reset_transport_header(skb);
636 ph = pnp_hdr(skb);
637 ph->utid = 0;
638 ph->message_id = PNS_PIPE_REMOVE_REQ;
639 ph->pipe_handle = pn->pipe_handle;
640 ph->data[0] = PAD;
641
642 return pn_skb_send(sk, skb, &pipe_srv);
643}
644
623/* associated socket ceases to exist */ 645/* associated socket ceases to exist */
624static void pep_sock_close(struct sock *sk, long timeout) 646static void pep_sock_close(struct sock *sk, long timeout)
625{ 647{
@@ -638,7 +660,10 @@ static void pep_sock_close(struct sock *sk, long timeout)
638 sk_for_each_safe(sknode, p, n, &pn->ackq) 660 sk_for_each_safe(sknode, p, n, &pn->ackq)
639 sk_del_node_init(sknode); 661 sk_del_node_init(sknode);
640 sk->sk_state = TCP_CLOSE; 662 sk->sk_state = TCP_CLOSE;
641 } 663 } else if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
664 /* Forcefully remove dangling Phonet pipe */
665 pipe_do_remove(sk);
666
642 ifindex = pn->ifindex; 667 ifindex = pn->ifindex;
643 pn->ifindex = 0; 668 pn->ifindex = 0;
644 release_sock(sk); 669 release_sock(sk);
@@ -834,6 +859,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
834{ 859{
835 struct pep_sock *pn = pep_sk(sk); 860 struct pep_sock *pn = pep_sk(sk);
836 struct pnpipehdr *ph; 861 struct pnpipehdr *ph;
862 int err;
837 863
838 if (pn_flow_safe(pn->tx_fc) && 864 if (pn_flow_safe(pn->tx_fc) &&
839 !atomic_add_unless(&pn->tx_credits, -1, 0)) { 865 !atomic_add_unless(&pn->tx_credits, -1, 0)) {
@@ -852,7 +878,10 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
852 ph->message_id = PNS_PIPE_DATA; 878 ph->message_id = PNS_PIPE_DATA;
853 ph->pipe_handle = pn->pipe_handle; 879 ph->pipe_handle = pn->pipe_handle;
854 880
855 return pn_skb_send(sk, skb, &pipe_srv); 881 err = pn_skb_send(sk, skb, &pipe_srv);
882 if (err && pn_flow_safe(pn->tx_fc))
883 atomic_inc(&pn->tx_credits);
884 return err;
856} 885}
857 886
858static int pep_sendmsg(struct kiocb *iocb, struct sock *sk, 887static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
@@ -872,7 +901,7 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
872 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, 901 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
873 flags & MSG_DONTWAIT, &err); 902 flags & MSG_DONTWAIT, &err);
874 if (!skb) 903 if (!skb)
875 return -ENOBUFS; 904 return err;
876 905
877 skb_reserve(skb, MAX_PHONET_HEADER + 3); 906 skb_reserve(skb, MAX_PHONET_HEADER + 3);
878 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 907 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index b18e48fae975..947038ddd04c 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -292,8 +292,7 @@ static void phonet_route_autodel(struct net_device *dev)
292 if (bitmap_empty(deleted, 64)) 292 if (bitmap_empty(deleted, 64))
293 return; /* short-circuit RCU */ 293 return; /* short-circuit RCU */
294 synchronize_rcu(); 294 synchronize_rcu();
295 for (i = find_first_bit(deleted, 64); i < 64; 295 for_each_set_bit(i, deleted, 64) {
296 i = find_next_bit(deleted, 64, i + 1)) {
297 rtm_phonet_notify(RTM_DELROUTE, dev, i); 296 rtm_phonet_notify(RTM_DELROUTE, dev, i);
298 dev_put(dev); 297 dev_put(dev);
299 } 298 }
@@ -374,6 +373,7 @@ int __init phonet_device_init(void)
374 if (err) 373 if (err)
375 return err; 374 return err;
376 375
376 proc_net_fops_create(&init_net, "pnresource", 0, &pn_res_seq_fops);
377 register_netdevice_notifier(&phonet_device_notifier); 377 register_netdevice_notifier(&phonet_device_notifier);
378 err = phonet_netlink_register(); 378 err = phonet_netlink_register();
379 if (err) 379 if (err)
@@ -386,6 +386,7 @@ void phonet_device_exit(void)
386 rtnl_unregister_all(PF_PHONET); 386 rtnl_unregister_all(PF_PHONET);
387 unregister_netdevice_notifier(&phonet_device_notifier); 387 unregister_netdevice_notifier(&phonet_device_notifier);
388 unregister_pernet_device(&phonet_net_ops); 388 unregister_pernet_device(&phonet_net_ops);
389 proc_net_remove(&init_net, "pnresource");
389} 390}
390 391
391int phonet_route_add(struct net_device *dev, u8 daddr) 392int phonet_route_add(struct net_device *dev, u8 daddr)
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 6e9848bf0370..aca8fba099e9 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -158,6 +158,7 @@ void pn_sock_unhash(struct sock *sk)
158 spin_lock_bh(&pnsocks.lock); 158 spin_lock_bh(&pnsocks.lock);
159 sk_del_node_init(sk); 159 sk_del_node_init(sk);
160 spin_unlock_bh(&pnsocks.lock); 160 spin_unlock_bh(&pnsocks.lock);
161 pn_sock_unbind_all_res(sk);
161} 162}
162EXPORT_SYMBOL(pn_sock_unhash); 163EXPORT_SYMBOL(pn_sock_unhash);
163 164
@@ -281,7 +282,9 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
281 if (!mask && sk->sk_state == TCP_CLOSE_WAIT) 282 if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
282 return POLLHUP; 283 return POLLHUP;
283 284
284 if (sk->sk_state == TCP_ESTABLISHED && atomic_read(&pn->tx_credits)) 285 if (sk->sk_state == TCP_ESTABLISHED &&
286 atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
287 atomic_read(&pn->tx_credits))
285 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 288 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
286 289
287 return mask; 290 return mask;
@@ -563,3 +566,188 @@ const struct file_operations pn_sock_seq_fops = {
563 .release = seq_release_net, 566 .release = seq_release_net,
564}; 567};
565#endif 568#endif
569
570static struct {
571 struct sock *sk[256];
572} pnres;
573
574/*
575 * Find and hold socket based on resource.
576 */
577struct sock *pn_find_sock_by_res(struct net *net, u8 res)
578{
579 struct sock *sk;
580
581 if (!net_eq(net, &init_net))
582 return NULL;
583
584 rcu_read_lock();
585 sk = rcu_dereference(pnres.sk[res]);
586 if (sk)
587 sock_hold(sk);
588 rcu_read_unlock();
589 return sk;
590}
591
592static DEFINE_MUTEX(resource_mutex);
593
594int pn_sock_bind_res(struct sock *sk, u8 res)
595{
596 int ret = -EADDRINUSE;
597
598 if (!net_eq(sock_net(sk), &init_net))
599 return -ENOIOCTLCMD;
600 if (!capable(CAP_SYS_ADMIN))
601 return -EPERM;
602 if (pn_socket_autobind(sk->sk_socket))
603 return -EAGAIN;
604
605 mutex_lock(&resource_mutex);
606 if (pnres.sk[res] == NULL) {
607 sock_hold(sk);
608 rcu_assign_pointer(pnres.sk[res], sk);
609 ret = 0;
610 }
611 mutex_unlock(&resource_mutex);
612 return ret;
613}
614
615int pn_sock_unbind_res(struct sock *sk, u8 res)
616{
617 int ret = -ENOENT;
618
619 if (!capable(CAP_SYS_ADMIN))
620 return -EPERM;
621
622 mutex_lock(&resource_mutex);
623 if (pnres.sk[res] == sk) {
624 rcu_assign_pointer(pnres.sk[res], NULL);
625 ret = 0;
626 }
627 mutex_unlock(&resource_mutex);
628
629 if (ret == 0) {
630 synchronize_rcu();
631 sock_put(sk);
632 }
633 return ret;
634}
635
636void pn_sock_unbind_all_res(struct sock *sk)
637{
638 unsigned res, match = 0;
639
640 mutex_lock(&resource_mutex);
641 for (res = 0; res < 256; res++) {
642 if (pnres.sk[res] == sk) {
643 rcu_assign_pointer(pnres.sk[res], NULL);
644 match++;
645 }
646 }
647 mutex_unlock(&resource_mutex);
648
649 if (match == 0)
650 return;
651 synchronize_rcu();
652 while (match > 0) {
653 sock_put(sk);
654 match--;
655 }
656}
657
658#ifdef CONFIG_PROC_FS
659static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
660{
661 struct net *net = seq_file_net(seq);
662 unsigned i;
663
664 if (!net_eq(net, &init_net))
665 return NULL;
666
667 for (i = 0; i < 256; i++) {
668 if (pnres.sk[i] == NULL)
669 continue;
670 if (!pos)
671 return pnres.sk + i;
672 pos--;
673 }
674 return NULL;
675}
676
677static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
678{
679 struct net *net = seq_file_net(seq);
680 unsigned i;
681
682 BUG_ON(!net_eq(net, &init_net));
683
684 for (i = (sk - pnres.sk) + 1; i < 256; i++)
685 if (pnres.sk[i])
686 return pnres.sk + i;
687 return NULL;
688}
689
690static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
691 __acquires(resource_mutex)
692{
693 mutex_lock(&resource_mutex);
694 return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
695}
696
697static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
698{
699 struct sock **sk;
700
701 if (v == SEQ_START_TOKEN)
702 sk = pn_res_get_idx(seq, 0);
703 else
704 sk = pn_res_get_next(seq, v);
705 (*pos)++;
706 return sk;
707}
708
709static void pn_res_seq_stop(struct seq_file *seq, void *v)
710 __releases(resource_mutex)
711{
712 mutex_unlock(&resource_mutex);
713}
714
715static int pn_res_seq_show(struct seq_file *seq, void *v)
716{
717 int len;
718
719 if (v == SEQ_START_TOKEN)
720 seq_printf(seq, "%s%n", "rs uid inode", &len);
721 else {
722 struct sock **psk = v;
723 struct sock *sk = *psk;
724
725 seq_printf(seq, "%02X %5d %lu%n",
726 (int) (psk - pnres.sk), sock_i_uid(sk),
727 sock_i_ino(sk), &len);
728 }
729 seq_printf(seq, "%*s\n", 63 - len, "");
730 return 0;
731}
732
733static const struct seq_operations pn_res_seq_ops = {
734 .start = pn_res_seq_start,
735 .next = pn_res_seq_next,
736 .stop = pn_res_seq_stop,
737 .show = pn_res_seq_show,
738};
739
740static int pn_res_open(struct inode *inode, struct file *file)
741{
742 return seq_open_net(inode, file, &pn_res_seq_ops,
743 sizeof(struct seq_net_private));
744}
745
746const struct file_operations pn_res_seq_fops = {
747 .owner = THIS_MODULE,
748 .open = pn_res_open,
749 .read = seq_read,
750 .llseek = seq_lseek,
751 .release = seq_release_net,
752};
753#endif
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index aebfecbdb841..bb6ad81b671d 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -39,7 +39,15 @@
39#include <net/sock.h> 39#include <net/sock.h>
40 40
41#include "rds.h" 41#include "rds.h"
42#include "rdma.h" 42
43char *rds_str_array(char **array, size_t elements, size_t index)
44{
45 if ((index < elements) && array[index])
46 return array[index];
47 else
48 return "unknown";
49}
50EXPORT_SYMBOL(rds_str_array);
43 51
44/* this is just used for stats gathering :/ */ 52/* this is just used for stats gathering :/ */
45static DEFINE_SPINLOCK(rds_sock_lock); 53static DEFINE_SPINLOCK(rds_sock_lock);
@@ -62,7 +70,7 @@ static int rds_release(struct socket *sock)
62 struct rds_sock *rs; 70 struct rds_sock *rs;
63 unsigned long flags; 71 unsigned long flags;
64 72
65 if (sk == NULL) 73 if (!sk)
66 goto out; 74 goto out;
67 75
68 rs = rds_sk_to_rs(sk); 76 rs = rds_sk_to_rs(sk);
@@ -73,7 +81,15 @@ static int rds_release(struct socket *sock)
73 * with the socket. */ 81 * with the socket. */
74 rds_clear_recv_queue(rs); 82 rds_clear_recv_queue(rs);
75 rds_cong_remove_socket(rs); 83 rds_cong_remove_socket(rs);
84
85 /*
86 * the binding lookup hash uses rcu, we need to
87 * make sure we sychronize_rcu before we free our
88 * entry
89 */
76 rds_remove_bound(rs); 90 rds_remove_bound(rs);
91 synchronize_rcu();
92
77 rds_send_drop_to(rs, NULL); 93 rds_send_drop_to(rs, NULL);
78 rds_rdma_drop_keys(rs); 94 rds_rdma_drop_keys(rs);
79 rds_notify_queue_get(rs, NULL); 95 rds_notify_queue_get(rs, NULL);
@@ -83,6 +99,8 @@ static int rds_release(struct socket *sock)
83 rds_sock_count--; 99 rds_sock_count--;
84 spin_unlock_irqrestore(&rds_sock_lock, flags); 100 spin_unlock_irqrestore(&rds_sock_lock, flags);
85 101
102 rds_trans_put(rs->rs_transport);
103
86 sock->sk = NULL; 104 sock->sk = NULL;
87 sock_put(sk); 105 sock_put(sk);
88out: 106out:
@@ -514,7 +532,7 @@ out:
514 spin_unlock_irqrestore(&rds_sock_lock, flags); 532 spin_unlock_irqrestore(&rds_sock_lock, flags);
515} 533}
516 534
517static void __exit rds_exit(void) 535static void rds_exit(void)
518{ 536{
519 sock_unregister(rds_family_ops.family); 537 sock_unregister(rds_family_ops.family);
520 proto_unregister(&rds_proto); 538 proto_unregister(&rds_proto);
@@ -529,7 +547,7 @@ static void __exit rds_exit(void)
529} 547}
530module_exit(rds_exit); 548module_exit(rds_exit);
531 549
532static int __init rds_init(void) 550static int rds_init(void)
533{ 551{
534 int ret; 552 int ret;
535 553
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 5d95fc007f1a..2f6b3fcc79f8 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -34,45 +34,52 @@
34#include <net/sock.h> 34#include <net/sock.h>
35#include <linux/in.h> 35#include <linux/in.h>
36#include <linux/if_arp.h> 36#include <linux/if_arp.h>
37#include <linux/jhash.h>
37#include "rds.h" 38#include "rds.h"
38 39
39/* 40#define BIND_HASH_SIZE 1024
40 * XXX this probably still needs more work.. no INADDR_ANY, and rbtrees aren't 41static struct hlist_head bind_hash_table[BIND_HASH_SIZE];
41 * particularly zippy.
42 *
43 * This is now called for every incoming frame so we arguably care much more
44 * about it than we used to.
45 */
46static DEFINE_SPINLOCK(rds_bind_lock); 42static DEFINE_SPINLOCK(rds_bind_lock);
47static struct rb_root rds_bind_tree = RB_ROOT;
48 43
49static struct rds_sock *rds_bind_tree_walk(__be32 addr, __be16 port, 44static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port)
50 struct rds_sock *insert) 45{
46 return bind_hash_table + (jhash_2words((u32)addr, (u32)port, 0) &
47 (BIND_HASH_SIZE - 1));
48}
49
50static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
51 struct rds_sock *insert)
51{ 52{
52 struct rb_node **p = &rds_bind_tree.rb_node;
53 struct rb_node *parent = NULL;
54 struct rds_sock *rs; 53 struct rds_sock *rs;
54 struct hlist_node *node;
55 struct hlist_head *head = hash_to_bucket(addr, port);
55 u64 cmp; 56 u64 cmp;
56 u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port); 57 u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
57 58
58 while (*p) { 59 rcu_read_lock();
59 parent = *p; 60 hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) {
60 rs = rb_entry(parent, struct rds_sock, rs_bound_node);
61
62 cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) | 61 cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
63 be16_to_cpu(rs->rs_bound_port); 62 be16_to_cpu(rs->rs_bound_port);
64 63
65 if (needle < cmp) 64 if (cmp == needle) {
66 p = &(*p)->rb_left; 65 rcu_read_unlock();
67 else if (needle > cmp)
68 p = &(*p)->rb_right;
69 else
70 return rs; 66 return rs;
67 }
71 } 68 }
69 rcu_read_unlock();
72 70
73 if (insert) { 71 if (insert) {
74 rb_link_node(&insert->rs_bound_node, parent, p); 72 /*
75 rb_insert_color(&insert->rs_bound_node, &rds_bind_tree); 73 * make sure our addr and port are set before
74 * we are added to the list, other people
75 * in rcu will find us as soon as the
76 * hlist_add_head_rcu is done
77 */
78 insert->rs_bound_addr = addr;
79 insert->rs_bound_port = port;
80 rds_sock_addref(insert);
81
82 hlist_add_head_rcu(&insert->rs_bound_node, head);
76 } 83 }
77 return NULL; 84 return NULL;
78} 85}
@@ -86,15 +93,13 @@ static struct rds_sock *rds_bind_tree_walk(__be32 addr, __be16 port,
86struct rds_sock *rds_find_bound(__be32 addr, __be16 port) 93struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
87{ 94{
88 struct rds_sock *rs; 95 struct rds_sock *rs;
89 unsigned long flags;
90 96
91 spin_lock_irqsave(&rds_bind_lock, flags); 97 rs = rds_bind_lookup(addr, port, NULL);
92 rs = rds_bind_tree_walk(addr, port, NULL); 98
93 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) 99 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
94 rds_sock_addref(rs); 100 rds_sock_addref(rs);
95 else 101 else
96 rs = NULL; 102 rs = NULL;
97 spin_unlock_irqrestore(&rds_bind_lock, flags);
98 103
99 rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr, 104 rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
100 ntohs(port)); 105 ntohs(port));
@@ -121,22 +126,15 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
121 do { 126 do {
122 if (rover == 0) 127 if (rover == 0)
123 rover++; 128 rover++;
124 if (rds_bind_tree_walk(addr, cpu_to_be16(rover), rs) == NULL) { 129 if (!rds_bind_lookup(addr, cpu_to_be16(rover), rs)) {
125 *port = cpu_to_be16(rover); 130 *port = rs->rs_bound_port;
126 ret = 0; 131 ret = 0;
132 rdsdebug("rs %p binding to %pI4:%d\n",
133 rs, &addr, (int)ntohs(*port));
127 break; 134 break;
128 } 135 }
129 } while (rover++ != last); 136 } while (rover++ != last);
130 137
131 if (ret == 0) {
132 rs->rs_bound_addr = addr;
133 rs->rs_bound_port = *port;
134 rds_sock_addref(rs);
135
136 rdsdebug("rs %p binding to %pI4:%d\n",
137 rs, &addr, (int)ntohs(*port));
138 }
139
140 spin_unlock_irqrestore(&rds_bind_lock, flags); 138 spin_unlock_irqrestore(&rds_bind_lock, flags);
141 139
142 return ret; 140 return ret;
@@ -153,7 +151,7 @@ void rds_remove_bound(struct rds_sock *rs)
153 rs, &rs->rs_bound_addr, 151 rs, &rs->rs_bound_addr,
154 ntohs(rs->rs_bound_port)); 152 ntohs(rs->rs_bound_port));
155 153
156 rb_erase(&rs->rs_bound_node, &rds_bind_tree); 154 hlist_del_init_rcu(&rs->rs_bound_node);
157 rds_sock_put(rs); 155 rds_sock_put(rs);
158 rs->rs_bound_addr = 0; 156 rs->rs_bound_addr = 0;
159 } 157 }
@@ -184,7 +182,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
184 goto out; 182 goto out;
185 183
186 trans = rds_trans_get_preferred(sin->sin_addr.s_addr); 184 trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
187 if (trans == NULL) { 185 if (!trans) {
188 ret = -EADDRNOTAVAIL; 186 ret = -EADDRNOTAVAIL;
189 rds_remove_bound(rs); 187 rds_remove_bound(rs);
190 if (printk_ratelimit()) 188 if (printk_ratelimit())
@@ -198,5 +196,9 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
198 196
199out: 197out:
200 release_sock(sk); 198 release_sock(sk);
199
200 /* we might have called rds_remove_bound on error */
201 if (ret)
202 synchronize_rcu();
201 return ret; 203 return ret;
202} 204}
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 0871a29f0780..75ea686f27d5 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -141,7 +141,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
141 unsigned long flags; 141 unsigned long flags;
142 142
143 map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL); 143 map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
144 if (map == NULL) 144 if (!map)
145 return NULL; 145 return NULL;
146 146
147 map->m_addr = addr; 147 map->m_addr = addr;
@@ -159,7 +159,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
159 ret = rds_cong_tree_walk(addr, map); 159 ret = rds_cong_tree_walk(addr, map);
160 spin_unlock_irqrestore(&rds_cong_lock, flags); 160 spin_unlock_irqrestore(&rds_cong_lock, flags);
161 161
162 if (ret == NULL) { 162 if (!ret) {
163 ret = map; 163 ret = map;
164 map = NULL; 164 map = NULL;
165 } 165 }
@@ -205,7 +205,7 @@ int rds_cong_get_maps(struct rds_connection *conn)
205 conn->c_lcong = rds_cong_from_addr(conn->c_laddr); 205 conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
206 conn->c_fcong = rds_cong_from_addr(conn->c_faddr); 206 conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
207 207
208 if (conn->c_lcong == NULL || conn->c_fcong == NULL) 208 if (!(conn->c_lcong && conn->c_fcong))
209 return -ENOMEM; 209 return -ENOMEM;
210 210
211 return 0; 211 return 0;
@@ -221,7 +221,7 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
221 list_for_each_entry(conn, &map->m_conn_list, c_map_item) { 221 list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
222 if (!test_and_set_bit(0, &conn->c_map_queued)) { 222 if (!test_and_set_bit(0, &conn->c_map_queued)) {
223 rds_stats_inc(s_cong_update_queued); 223 rds_stats_inc(s_cong_update_queued);
224 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 224 rds_send_xmit(conn);
225 } 225 }
226 } 226 }
227 227
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 7619b671ca28..870992e08cae 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -37,7 +37,6 @@
37 37
38#include "rds.h" 38#include "rds.h"
39#include "loop.h" 39#include "loop.h"
40#include "rdma.h"
41 40
42#define RDS_CONNECTION_HASH_BITS 12 41#define RDS_CONNECTION_HASH_BITS 12
43#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS) 42#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
@@ -63,18 +62,7 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
63 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \ 62 var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
64} while (0) 63} while (0)
65 64
66static inline int rds_conn_is_sending(struct rds_connection *conn) 65/* rcu read lock must be held or the connection spinlock */
67{
68 int ret = 0;
69
70 if (!mutex_trylock(&conn->c_send_lock))
71 ret = 1;
72 else
73 mutex_unlock(&conn->c_send_lock);
74
75 return ret;
76}
77
78static struct rds_connection *rds_conn_lookup(struct hlist_head *head, 66static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
79 __be32 laddr, __be32 faddr, 67 __be32 laddr, __be32 faddr,
80 struct rds_transport *trans) 68 struct rds_transport *trans)
@@ -82,7 +70,7 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
82 struct rds_connection *conn, *ret = NULL; 70 struct rds_connection *conn, *ret = NULL;
83 struct hlist_node *pos; 71 struct hlist_node *pos;
84 72
85 hlist_for_each_entry(conn, pos, head, c_hash_node) { 73 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
86 if (conn->c_faddr == faddr && conn->c_laddr == laddr && 74 if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
87 conn->c_trans == trans) { 75 conn->c_trans == trans) {
88 ret = conn; 76 ret = conn;
@@ -129,10 +117,11 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
129{ 117{
130 struct rds_connection *conn, *parent = NULL; 118 struct rds_connection *conn, *parent = NULL;
131 struct hlist_head *head = rds_conn_bucket(laddr, faddr); 119 struct hlist_head *head = rds_conn_bucket(laddr, faddr);
120 struct rds_transport *loop_trans;
132 unsigned long flags; 121 unsigned long flags;
133 int ret; 122 int ret;
134 123
135 spin_lock_irqsave(&rds_conn_lock, flags); 124 rcu_read_lock();
136 conn = rds_conn_lookup(head, laddr, faddr, trans); 125 conn = rds_conn_lookup(head, laddr, faddr, trans);
137 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && 126 if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
138 !is_outgoing) { 127 !is_outgoing) {
@@ -143,12 +132,12 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
143 parent = conn; 132 parent = conn;
144 conn = parent->c_passive; 133 conn = parent->c_passive;
145 } 134 }
146 spin_unlock_irqrestore(&rds_conn_lock, flags); 135 rcu_read_unlock();
147 if (conn) 136 if (conn)
148 goto out; 137 goto out;
149 138
150 conn = kmem_cache_zalloc(rds_conn_slab, gfp); 139 conn = kmem_cache_zalloc(rds_conn_slab, gfp);
151 if (conn == NULL) { 140 if (!conn) {
152 conn = ERR_PTR(-ENOMEM); 141 conn = ERR_PTR(-ENOMEM);
153 goto out; 142 goto out;
154 } 143 }
@@ -159,7 +148,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
159 spin_lock_init(&conn->c_lock); 148 spin_lock_init(&conn->c_lock);
160 conn->c_next_tx_seq = 1; 149 conn->c_next_tx_seq = 1;
161 150
162 mutex_init(&conn->c_send_lock); 151 init_waitqueue_head(&conn->c_waitq);
163 INIT_LIST_HEAD(&conn->c_send_queue); 152 INIT_LIST_HEAD(&conn->c_send_queue);
164 INIT_LIST_HEAD(&conn->c_retrans); 153 INIT_LIST_HEAD(&conn->c_retrans);
165 154
@@ -175,7 +164,9 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
175 * can bind to the destination address then we'd rather the messages 164 * can bind to the destination address then we'd rather the messages
176 * flow through loopback rather than either transport. 165 * flow through loopback rather than either transport.
177 */ 166 */
178 if (rds_trans_get_preferred(faddr)) { 167 loop_trans = rds_trans_get_preferred(faddr);
168 if (loop_trans) {
169 rds_trans_put(loop_trans);
179 conn->c_loopback = 1; 170 conn->c_loopback = 1;
180 if (is_outgoing && trans->t_prefer_loopback) { 171 if (is_outgoing && trans->t_prefer_loopback) {
181 /* "outgoing" connection - and the transport 172 /* "outgoing" connection - and the transport
@@ -238,7 +229,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
238 kmem_cache_free(rds_conn_slab, conn); 229 kmem_cache_free(rds_conn_slab, conn);
239 conn = found; 230 conn = found;
240 } else { 231 } else {
241 hlist_add_head(&conn->c_hash_node, head); 232 hlist_add_head_rcu(&conn->c_hash_node, head);
242 rds_cong_add_conn(conn); 233 rds_cong_add_conn(conn);
243 rds_conn_count++; 234 rds_conn_count++;
244 } 235 }
@@ -263,21 +254,91 @@ struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
263} 254}
264EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); 255EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
265 256
257void rds_conn_shutdown(struct rds_connection *conn)
258{
259 /* shut it down unless it's down already */
260 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
261 /*
262 * Quiesce the connection mgmt handlers before we start tearing
263 * things down. We don't hold the mutex for the entire
264 * duration of the shutdown operation, else we may be
265 * deadlocking with the CM handler. Instead, the CM event
266 * handler is supposed to check for state DISCONNECTING
267 */
268 mutex_lock(&conn->c_cm_lock);
269 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
270 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
271 rds_conn_error(conn, "shutdown called in state %d\n",
272 atomic_read(&conn->c_state));
273 mutex_unlock(&conn->c_cm_lock);
274 return;
275 }
276 mutex_unlock(&conn->c_cm_lock);
277
278 wait_event(conn->c_waitq,
279 !test_bit(RDS_IN_XMIT, &conn->c_flags));
280
281 conn->c_trans->conn_shutdown(conn);
282 rds_conn_reset(conn);
283
284 if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
285 /* This can happen - eg when we're in the middle of tearing
286 * down the connection, and someone unloads the rds module.
287 * Quite reproduceable with loopback connections.
288 * Mostly harmless.
289 */
290 rds_conn_error(conn,
291 "%s: failed to transition to state DOWN, "
292 "current state is %d\n",
293 __func__,
294 atomic_read(&conn->c_state));
295 return;
296 }
297 }
298
299 /* Then reconnect if it's still live.
300 * The passive side of an IB loopback connection is never added
301 * to the conn hash, so we never trigger a reconnect on this
302 * conn - the reconnect is always triggered by the active peer. */
303 cancel_delayed_work_sync(&conn->c_conn_w);
304 rcu_read_lock();
305 if (!hlist_unhashed(&conn->c_hash_node)) {
306 rcu_read_unlock();
307 rds_queue_reconnect(conn);
308 } else {
309 rcu_read_unlock();
310 }
311}
312
313/*
314 * Stop and free a connection.
315 *
316 * This can only be used in very limited circumstances. It assumes that once
317 * the conn has been shutdown that no one else is referencing the connection.
318 * We can only ensure this in the rmmod path in the current code.
319 */
266void rds_conn_destroy(struct rds_connection *conn) 320void rds_conn_destroy(struct rds_connection *conn)
267{ 321{
268 struct rds_message *rm, *rtmp; 322 struct rds_message *rm, *rtmp;
323 unsigned long flags;
269 324
270 rdsdebug("freeing conn %p for %pI4 -> " 325 rdsdebug("freeing conn %p for %pI4 -> "
271 "%pI4\n", conn, &conn->c_laddr, 326 "%pI4\n", conn, &conn->c_laddr,
272 &conn->c_faddr); 327 &conn->c_faddr);
273 328
274 hlist_del_init(&conn->c_hash_node); 329 /* Ensure conn will not be scheduled for reconnect */
330 spin_lock_irq(&rds_conn_lock);
331 hlist_del_init_rcu(&conn->c_hash_node);
332 spin_unlock_irq(&rds_conn_lock);
333 synchronize_rcu();
275 334
276 /* wait for the rds thread to shut it down */ 335 /* shut the connection down */
277 atomic_set(&conn->c_state, RDS_CONN_ERROR); 336 rds_conn_drop(conn);
278 cancel_delayed_work(&conn->c_conn_w); 337 flush_work(&conn->c_down_w);
279 queue_work(rds_wq, &conn->c_down_w); 338
280 flush_workqueue(rds_wq); 339 /* make sure lingering queued work won't try to ref the conn */
340 cancel_delayed_work_sync(&conn->c_send_w);
341 cancel_delayed_work_sync(&conn->c_recv_w);
281 342
282 /* tear down queued messages */ 343 /* tear down queued messages */
283 list_for_each_entry_safe(rm, rtmp, 344 list_for_each_entry_safe(rm, rtmp,
@@ -302,7 +363,9 @@ void rds_conn_destroy(struct rds_connection *conn)
302 BUG_ON(!list_empty(&conn->c_retrans)); 363 BUG_ON(!list_empty(&conn->c_retrans));
303 kmem_cache_free(rds_conn_slab, conn); 364 kmem_cache_free(rds_conn_slab, conn);
304 365
366 spin_lock_irqsave(&rds_conn_lock, flags);
305 rds_conn_count--; 367 rds_conn_count--;
368 spin_unlock_irqrestore(&rds_conn_lock, flags);
306} 369}
307EXPORT_SYMBOL_GPL(rds_conn_destroy); 370EXPORT_SYMBOL_GPL(rds_conn_destroy);
308 371
@@ -316,23 +379,23 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
316 struct list_head *list; 379 struct list_head *list;
317 struct rds_connection *conn; 380 struct rds_connection *conn;
318 struct rds_message *rm; 381 struct rds_message *rm;
319 unsigned long flags;
320 unsigned int total = 0; 382 unsigned int total = 0;
383 unsigned long flags;
321 size_t i; 384 size_t i;
322 385
323 len /= sizeof(struct rds_info_message); 386 len /= sizeof(struct rds_info_message);
324 387
325 spin_lock_irqsave(&rds_conn_lock, flags); 388 rcu_read_lock();
326 389
327 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 390 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
328 i++, head++) { 391 i++, head++) {
329 hlist_for_each_entry(conn, pos, head, c_hash_node) { 392 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
330 if (want_send) 393 if (want_send)
331 list = &conn->c_send_queue; 394 list = &conn->c_send_queue;
332 else 395 else
333 list = &conn->c_retrans; 396 list = &conn->c_retrans;
334 397
335 spin_lock(&conn->c_lock); 398 spin_lock_irqsave(&conn->c_lock, flags);
336 399
337 /* XXX too lazy to maintain counts.. */ 400 /* XXX too lazy to maintain counts.. */
338 list_for_each_entry(rm, list, m_conn_item) { 401 list_for_each_entry(rm, list, m_conn_item) {
@@ -343,11 +406,10 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
343 conn->c_faddr, 0); 406 conn->c_faddr, 0);
344 } 407 }
345 408
346 spin_unlock(&conn->c_lock); 409 spin_unlock_irqrestore(&conn->c_lock, flags);
347 } 410 }
348 } 411 }
349 412 rcu_read_unlock();
350 spin_unlock_irqrestore(&rds_conn_lock, flags);
351 413
352 lens->nr = total; 414 lens->nr = total;
353 lens->each = sizeof(struct rds_info_message); 415 lens->each = sizeof(struct rds_info_message);
@@ -377,19 +439,17 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
377 uint64_t buffer[(item_len + 7) / 8]; 439 uint64_t buffer[(item_len + 7) / 8];
378 struct hlist_head *head; 440 struct hlist_head *head;
379 struct hlist_node *pos; 441 struct hlist_node *pos;
380 struct hlist_node *tmp;
381 struct rds_connection *conn; 442 struct rds_connection *conn;
382 unsigned long flags;
383 size_t i; 443 size_t i;
384 444
385 spin_lock_irqsave(&rds_conn_lock, flags); 445 rcu_read_lock();
386 446
387 lens->nr = 0; 447 lens->nr = 0;
388 lens->each = item_len; 448 lens->each = item_len;
389 449
390 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); 450 for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
391 i++, head++) { 451 i++, head++) {
392 hlist_for_each_entry_safe(conn, pos, tmp, head, c_hash_node) { 452 hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
393 453
394 /* XXX no c_lock usage.. */ 454 /* XXX no c_lock usage.. */
395 if (!visitor(conn, buffer)) 455 if (!visitor(conn, buffer))
@@ -405,8 +465,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
405 lens->nr++; 465 lens->nr++;
406 } 466 }
407 } 467 }
408 468 rcu_read_unlock();
409 spin_unlock_irqrestore(&rds_conn_lock, flags);
410} 469}
411EXPORT_SYMBOL_GPL(rds_for_each_conn_info); 470EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
412 471
@@ -423,8 +482,8 @@ static int rds_conn_info_visitor(struct rds_connection *conn,
423 sizeof(cinfo->transport)); 482 sizeof(cinfo->transport));
424 cinfo->flags = 0; 483 cinfo->flags = 0;
425 484
426 rds_conn_info_set(cinfo->flags, 485 rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags),
427 rds_conn_is_sending(conn), SENDING); 486 SENDING);
428 /* XXX Future: return the state rather than these funky bits */ 487 /* XXX Future: return the state rather than these funky bits */
429 rds_conn_info_set(cinfo->flags, 488 rds_conn_info_set(cinfo->flags,
430 atomic_read(&conn->c_state) == RDS_CONN_CONNECTING, 489 atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,
@@ -444,12 +503,12 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
444 sizeof(struct rds_info_connection)); 503 sizeof(struct rds_info_connection));
445} 504}
446 505
447int __init rds_conn_init(void) 506int rds_conn_init(void)
448{ 507{
449 rds_conn_slab = kmem_cache_create("rds_connection", 508 rds_conn_slab = kmem_cache_create("rds_connection",
450 sizeof(struct rds_connection), 509 sizeof(struct rds_connection),
451 0, 0, NULL); 510 0, 0, NULL);
452 if (rds_conn_slab == NULL) 511 if (!rds_conn_slab)
453 return -ENOMEM; 512 return -ENOMEM;
454 513
455 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); 514 rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
@@ -487,6 +546,18 @@ void rds_conn_drop(struct rds_connection *conn)
487EXPORT_SYMBOL_GPL(rds_conn_drop); 546EXPORT_SYMBOL_GPL(rds_conn_drop);
488 547
489/* 548/*
549 * If the connection is down, trigger a connect. We may have scheduled a
550 * delayed reconnect however - in this case we should not interfere.
551 */
552void rds_conn_connect_if_down(struct rds_connection *conn)
553{
554 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
555 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
556 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
557}
558EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
559
560/*
490 * An error occurred on the connection 561 * An error occurred on the connection
491 */ 562 */
492void 563void
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 8f2d6dd7700a..b12a3951167d 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -53,12 +53,71 @@ MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer");
53module_param(rds_ib_retry_count, int, 0444); 53module_param(rds_ib_retry_count, int, 0444);
54MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error"); 54MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
55 55
56/*
57 * we have a clumsy combination of RCU and a rwsem protecting this list
58 * because it is used both in the get_mr fast path and while blocking in
59 * the FMR flushing path.
60 */
61DECLARE_RWSEM(rds_ib_devices_lock);
56struct list_head rds_ib_devices; 62struct list_head rds_ib_devices;
57 63
58/* NOTE: if also grabbing ibdev lock, grab this first */ 64/* NOTE: if also grabbing ibdev lock, grab this first */
59DEFINE_SPINLOCK(ib_nodev_conns_lock); 65DEFINE_SPINLOCK(ib_nodev_conns_lock);
60LIST_HEAD(ib_nodev_conns); 66LIST_HEAD(ib_nodev_conns);
61 67
68void rds_ib_nodev_connect(void)
69{
70 struct rds_ib_connection *ic;
71
72 spin_lock(&ib_nodev_conns_lock);
73 list_for_each_entry(ic, &ib_nodev_conns, ib_node)
74 rds_conn_connect_if_down(ic->conn);
75 spin_unlock(&ib_nodev_conns_lock);
76}
77
78void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
79{
80 struct rds_ib_connection *ic;
81 unsigned long flags;
82
83 spin_lock_irqsave(&rds_ibdev->spinlock, flags);
84 list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
85 rds_conn_drop(ic->conn);
86 spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
87}
88
89/*
90 * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references
91 * from interrupt context so we push freing off into a work struct in krdsd.
92 */
93static void rds_ib_dev_free(struct work_struct *work)
94{
95 struct rds_ib_ipaddr *i_ipaddr, *i_next;
96 struct rds_ib_device *rds_ibdev = container_of(work,
97 struct rds_ib_device, free_work);
98
99 if (rds_ibdev->mr_pool)
100 rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
101 if (rds_ibdev->mr)
102 ib_dereg_mr(rds_ibdev->mr);
103 if (rds_ibdev->pd)
104 ib_dealloc_pd(rds_ibdev->pd);
105
106 list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
107 list_del(&i_ipaddr->list);
108 kfree(i_ipaddr);
109 }
110
111 kfree(rds_ibdev);
112}
113
114void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
115{
116 BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0);
117 if (atomic_dec_and_test(&rds_ibdev->refcount))
118 queue_work(rds_wq, &rds_ibdev->free_work);
119}
120
62void rds_ib_add_one(struct ib_device *device) 121void rds_ib_add_one(struct ib_device *device)
63{ 122{
64 struct rds_ib_device *rds_ibdev; 123 struct rds_ib_device *rds_ibdev;
@@ -77,11 +136,14 @@ void rds_ib_add_one(struct ib_device *device)
77 goto free_attr; 136 goto free_attr;
78 } 137 }
79 138
80 rds_ibdev = kmalloc(sizeof *rds_ibdev, GFP_KERNEL); 139 rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
140 ibdev_to_node(device));
81 if (!rds_ibdev) 141 if (!rds_ibdev)
82 goto free_attr; 142 goto free_attr;
83 143
84 spin_lock_init(&rds_ibdev->spinlock); 144 spin_lock_init(&rds_ibdev->spinlock);
145 atomic_set(&rds_ibdev->refcount, 1);
146 INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
85 147
86 rds_ibdev->max_wrs = dev_attr->max_qp_wr; 148 rds_ibdev->max_wrs = dev_attr->max_qp_wr;
87 rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE); 149 rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
@@ -91,68 +153,107 @@ void rds_ib_add_one(struct ib_device *device)
91 min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) : 153 min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
92 fmr_pool_size; 154 fmr_pool_size;
93 155
156 rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom;
157 rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom;
158
94 rds_ibdev->dev = device; 159 rds_ibdev->dev = device;
95 rds_ibdev->pd = ib_alloc_pd(device); 160 rds_ibdev->pd = ib_alloc_pd(device);
96 if (IS_ERR(rds_ibdev->pd)) 161 if (IS_ERR(rds_ibdev->pd)) {
97 goto free_dev; 162 rds_ibdev->pd = NULL;
163 goto put_dev;
164 }
98 165
99 rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, 166 rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE);
100 IB_ACCESS_LOCAL_WRITE); 167 if (IS_ERR(rds_ibdev->mr)) {
101 if (IS_ERR(rds_ibdev->mr)) 168 rds_ibdev->mr = NULL;
102 goto err_pd; 169 goto put_dev;
170 }
103 171
104 rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev); 172 rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
105 if (IS_ERR(rds_ibdev->mr_pool)) { 173 if (IS_ERR(rds_ibdev->mr_pool)) {
106 rds_ibdev->mr_pool = NULL; 174 rds_ibdev->mr_pool = NULL;
107 goto err_mr; 175 goto put_dev;
108 } 176 }
109 177
110 INIT_LIST_HEAD(&rds_ibdev->ipaddr_list); 178 INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
111 INIT_LIST_HEAD(&rds_ibdev->conn_list); 179 INIT_LIST_HEAD(&rds_ibdev->conn_list);
112 list_add_tail(&rds_ibdev->list, &rds_ib_devices); 180
181 down_write(&rds_ib_devices_lock);
182 list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
183 up_write(&rds_ib_devices_lock);
184 atomic_inc(&rds_ibdev->refcount);
113 185
114 ib_set_client_data(device, &rds_ib_client, rds_ibdev); 186 ib_set_client_data(device, &rds_ib_client, rds_ibdev);
187 atomic_inc(&rds_ibdev->refcount);
115 188
116 goto free_attr; 189 rds_ib_nodev_connect();
117 190
118err_mr: 191put_dev:
119 ib_dereg_mr(rds_ibdev->mr); 192 rds_ib_dev_put(rds_ibdev);
120err_pd:
121 ib_dealloc_pd(rds_ibdev->pd);
122free_dev:
123 kfree(rds_ibdev);
124free_attr: 193free_attr:
125 kfree(dev_attr); 194 kfree(dev_attr);
126} 195}
127 196
197/*
198 * New connections use this to find the device to associate with the
199 * connection. It's not in the fast path so we're not concerned about the
200 * performance of the IB call. (As of this writing, it uses an interrupt
201 * blocking spinlock to serialize walking a per-device list of all registered
202 * clients.)
203 *
204 * RCU is used to handle incoming connections racing with device teardown.
205 * Rather than use a lock to serialize removal from the client_data and
206 * getting a new reference, we use an RCU grace period. The destruction
207 * path removes the device from client_data and then waits for all RCU
208 * readers to finish.
209 *
210 * A new connection can get NULL from this if its arriving on a
211 * device that is in the process of being removed.
212 */
213struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
214{
215 struct rds_ib_device *rds_ibdev;
216
217 rcu_read_lock();
218 rds_ibdev = ib_get_client_data(device, &rds_ib_client);
219 if (rds_ibdev)
220 atomic_inc(&rds_ibdev->refcount);
221 rcu_read_unlock();
222 return rds_ibdev;
223}
224
225/*
226 * The IB stack is letting us know that a device is going away. This can
227 * happen if the underlying HCA driver is removed or if PCI hotplug is removing
228 * the pci function, for example.
229 *
230 * This can be called at any time and can be racing with any other RDS path.
231 */
128void rds_ib_remove_one(struct ib_device *device) 232void rds_ib_remove_one(struct ib_device *device)
129{ 233{
130 struct rds_ib_device *rds_ibdev; 234 struct rds_ib_device *rds_ibdev;
131 struct rds_ib_ipaddr *i_ipaddr, *i_next;
132 235
133 rds_ibdev = ib_get_client_data(device, &rds_ib_client); 236 rds_ibdev = ib_get_client_data(device, &rds_ib_client);
134 if (!rds_ibdev) 237 if (!rds_ibdev)
135 return; 238 return;
136 239
137 list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) { 240 rds_ib_dev_shutdown(rds_ibdev);
138 list_del(&i_ipaddr->list);
139 kfree(i_ipaddr);
140 }
141 241
142 rds_ib_destroy_conns(rds_ibdev); 242 /* stop connection attempts from getting a reference to this device. */
243 ib_set_client_data(device, &rds_ib_client, NULL);
143 244
144 if (rds_ibdev->mr_pool) 245 down_write(&rds_ib_devices_lock);
145 rds_ib_destroy_mr_pool(rds_ibdev->mr_pool); 246 list_del_rcu(&rds_ibdev->list);
146 247 up_write(&rds_ib_devices_lock);
147 ib_dereg_mr(rds_ibdev->mr);
148
149 while (ib_dealloc_pd(rds_ibdev->pd)) {
150 rdsdebug("Failed to dealloc pd %p\n", rds_ibdev->pd);
151 msleep(1);
152 }
153 248
154 list_del(&rds_ibdev->list); 249 /*
155 kfree(rds_ibdev); 250 * This synchronize rcu is waiting for readers of both the ib
251 * client data and the devices list to finish before we drop
252 * both of those references.
253 */
254 synchronize_rcu();
255 rds_ib_dev_put(rds_ibdev);
256 rds_ib_dev_put(rds_ibdev);
156} 257}
157 258
158struct ib_client rds_ib_client = { 259struct ib_client rds_ib_client = {
@@ -186,7 +287,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
186 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid); 287 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
187 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid); 288 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
188 289
189 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 290 rds_ibdev = ic->rds_ibdev;
190 iinfo->max_send_wr = ic->i_send_ring.w_nr; 291 iinfo->max_send_wr = ic->i_send_ring.w_nr;
191 iinfo->max_recv_wr = ic->i_recv_ring.w_nr; 292 iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
192 iinfo->max_send_sge = rds_ibdev->max_sge; 293 iinfo->max_send_sge = rds_ibdev->max_sge;
@@ -248,29 +349,36 @@ static int rds_ib_laddr_check(__be32 addr)
248 return ret; 349 return ret;
249} 350}
250 351
352static void rds_ib_unregister_client(void)
353{
354 ib_unregister_client(&rds_ib_client);
355 /* wait for rds_ib_dev_free() to complete */
356 flush_workqueue(rds_wq);
357}
358
251void rds_ib_exit(void) 359void rds_ib_exit(void)
252{ 360{
253 rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info); 361 rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
362 rds_ib_unregister_client();
254 rds_ib_destroy_nodev_conns(); 363 rds_ib_destroy_nodev_conns();
255 ib_unregister_client(&rds_ib_client);
256 rds_ib_sysctl_exit(); 364 rds_ib_sysctl_exit();
257 rds_ib_recv_exit(); 365 rds_ib_recv_exit();
258 rds_trans_unregister(&rds_ib_transport); 366 rds_trans_unregister(&rds_ib_transport);
367 rds_ib_fmr_exit();
259} 368}
260 369
261struct rds_transport rds_ib_transport = { 370struct rds_transport rds_ib_transport = {
262 .laddr_check = rds_ib_laddr_check, 371 .laddr_check = rds_ib_laddr_check,
263 .xmit_complete = rds_ib_xmit_complete, 372 .xmit_complete = rds_ib_xmit_complete,
264 .xmit = rds_ib_xmit, 373 .xmit = rds_ib_xmit,
265 .xmit_cong_map = NULL,
266 .xmit_rdma = rds_ib_xmit_rdma, 374 .xmit_rdma = rds_ib_xmit_rdma,
375 .xmit_atomic = rds_ib_xmit_atomic,
267 .recv = rds_ib_recv, 376 .recv = rds_ib_recv,
268 .conn_alloc = rds_ib_conn_alloc, 377 .conn_alloc = rds_ib_conn_alloc,
269 .conn_free = rds_ib_conn_free, 378 .conn_free = rds_ib_conn_free,
270 .conn_connect = rds_ib_conn_connect, 379 .conn_connect = rds_ib_conn_connect,
271 .conn_shutdown = rds_ib_conn_shutdown, 380 .conn_shutdown = rds_ib_conn_shutdown,
272 .inc_copy_to_user = rds_ib_inc_copy_to_user, 381 .inc_copy_to_user = rds_ib_inc_copy_to_user,
273 .inc_purge = rds_ib_inc_purge,
274 .inc_free = rds_ib_inc_free, 382 .inc_free = rds_ib_inc_free,
275 .cm_initiate_connect = rds_ib_cm_initiate_connect, 383 .cm_initiate_connect = rds_ib_cm_initiate_connect,
276 .cm_handle_connect = rds_ib_cm_handle_connect, 384 .cm_handle_connect = rds_ib_cm_handle_connect,
@@ -286,16 +394,20 @@ struct rds_transport rds_ib_transport = {
286 .t_type = RDS_TRANS_IB 394 .t_type = RDS_TRANS_IB
287}; 395};
288 396
289int __init rds_ib_init(void) 397int rds_ib_init(void)
290{ 398{
291 int ret; 399 int ret;
292 400
293 INIT_LIST_HEAD(&rds_ib_devices); 401 INIT_LIST_HEAD(&rds_ib_devices);
294 402
295 ret = ib_register_client(&rds_ib_client); 403 ret = rds_ib_fmr_init();
296 if (ret) 404 if (ret)
297 goto out; 405 goto out;
298 406
407 ret = ib_register_client(&rds_ib_client);
408 if (ret)
409 goto out_fmr_exit;
410
299 ret = rds_ib_sysctl_init(); 411 ret = rds_ib_sysctl_init();
300 if (ret) 412 if (ret)
301 goto out_ibreg; 413 goto out_ibreg;
@@ -317,7 +429,9 @@ out_recv:
317out_sysctl: 429out_sysctl:
318 rds_ib_sysctl_exit(); 430 rds_ib_sysctl_exit();
319out_ibreg: 431out_ibreg:
320 ib_unregister_client(&rds_ib_client); 432 rds_ib_unregister_client();
433out_fmr_exit:
434 rds_ib_fmr_exit();
321out: 435out:
322 return ret; 436 return ret;
323} 437}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 64df4e79b29f..7ad3d57e06a5 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -3,11 +3,13 @@
3 3
4#include <rdma/ib_verbs.h> 4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h> 5#include <rdma/rdma_cm.h>
6#include <linux/pci.h>
7#include <linux/slab.h>
6#include "rds.h" 8#include "rds.h"
7#include "rdma_transport.h" 9#include "rdma_transport.h"
8 10
9#define RDS_FMR_SIZE 256 11#define RDS_FMR_SIZE 256
10#define RDS_FMR_POOL_SIZE 4096 12#define RDS_FMR_POOL_SIZE 8192
11 13
12#define RDS_IB_MAX_SGE 8 14#define RDS_IB_MAX_SGE 8
13#define RDS_IB_RECV_SGE 2 15#define RDS_IB_RECV_SGE 2
@@ -19,6 +21,9 @@
19 21
20#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */ 22#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
21 23
24#define RDS_IB_RECYCLE_BATCH_COUNT 32
25
26extern struct rw_semaphore rds_ib_devices_lock;
22extern struct list_head rds_ib_devices; 27extern struct list_head rds_ib_devices;
23 28
24/* 29/*
@@ -26,20 +31,29 @@ extern struct list_head rds_ib_devices;
26 * try and minimize the amount of memory tied up both the device and 31 * try and minimize the amount of memory tied up both the device and
27 * socket receive queues. 32 * socket receive queues.
28 */ 33 */
29/* page offset of the final full frag that fits in the page */
30#define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
31struct rds_page_frag { 34struct rds_page_frag {
32 struct list_head f_item; 35 struct list_head f_item;
33 struct page *f_page; 36 struct list_head f_cache_entry;
34 unsigned long f_offset; 37 struct scatterlist f_sg;
35 dma_addr_t f_mapped;
36}; 38};
37 39
38struct rds_ib_incoming { 40struct rds_ib_incoming {
39 struct list_head ii_frags; 41 struct list_head ii_frags;
42 struct list_head ii_cache_entry;
40 struct rds_incoming ii_inc; 43 struct rds_incoming ii_inc;
41}; 44};
42 45
46struct rds_ib_cache_head {
47 struct list_head *first;
48 unsigned long count;
49};
50
51struct rds_ib_refill_cache {
52 struct rds_ib_cache_head *percpu;
53 struct list_head *xfer;
54 struct list_head *ready;
55};
56
43struct rds_ib_connect_private { 57struct rds_ib_connect_private {
44 /* Add new fields at the end, and don't permute existing fields. */ 58 /* Add new fields at the end, and don't permute existing fields. */
45 __be32 dp_saddr; 59 __be32 dp_saddr;
@@ -53,8 +67,7 @@ struct rds_ib_connect_private {
53}; 67};
54 68
55struct rds_ib_send_work { 69struct rds_ib_send_work {
56 struct rds_message *s_rm; 70 void *s_op;
57 struct rds_rdma_op *s_op;
58 struct ib_send_wr s_wr; 71 struct ib_send_wr s_wr;
59 struct ib_sge s_sge[RDS_IB_MAX_SGE]; 72 struct ib_sge s_sge[RDS_IB_MAX_SGE];
60 unsigned long s_queued; 73 unsigned long s_queued;
@@ -92,10 +105,11 @@ struct rds_ib_connection {
92 105
93 /* tx */ 106 /* tx */
94 struct rds_ib_work_ring i_send_ring; 107 struct rds_ib_work_ring i_send_ring;
95 struct rds_message *i_rm; 108 struct rm_data_op *i_data_op;
96 struct rds_header *i_send_hdrs; 109 struct rds_header *i_send_hdrs;
97 u64 i_send_hdrs_dma; 110 u64 i_send_hdrs_dma;
98 struct rds_ib_send_work *i_sends; 111 struct rds_ib_send_work *i_sends;
112 atomic_t i_signaled_sends;
99 113
100 /* rx */ 114 /* rx */
101 struct tasklet_struct i_recv_tasklet; 115 struct tasklet_struct i_recv_tasklet;
@@ -106,8 +120,9 @@ struct rds_ib_connection {
106 struct rds_header *i_recv_hdrs; 120 struct rds_header *i_recv_hdrs;
107 u64 i_recv_hdrs_dma; 121 u64 i_recv_hdrs_dma;
108 struct rds_ib_recv_work *i_recvs; 122 struct rds_ib_recv_work *i_recvs;
109 struct rds_page_frag i_frag;
110 u64 i_ack_recv; /* last ACK received */ 123 u64 i_ack_recv; /* last ACK received */
124 struct rds_ib_refill_cache i_cache_incs;
125 struct rds_ib_refill_cache i_cache_frags;
111 126
112 /* sending acks */ 127 /* sending acks */
113 unsigned long i_ack_flags; 128 unsigned long i_ack_flags;
@@ -138,7 +153,6 @@ struct rds_ib_connection {
138 153
139 /* Batched completions */ 154 /* Batched completions */
140 unsigned int i_unsignaled_wrs; 155 unsigned int i_unsignaled_wrs;
141 long i_unsignaled_bytes;
142}; 156};
143 157
144/* This assumes that atomic_t is at least 32 bits */ 158/* This assumes that atomic_t is at least 32 bits */
@@ -164,9 +178,17 @@ struct rds_ib_device {
164 unsigned int max_fmrs; 178 unsigned int max_fmrs;
165 int max_sge; 179 int max_sge;
166 unsigned int max_wrs; 180 unsigned int max_wrs;
181 unsigned int max_initiator_depth;
182 unsigned int max_responder_resources;
167 spinlock_t spinlock; /* protect the above */ 183 spinlock_t spinlock; /* protect the above */
184 atomic_t refcount;
185 struct work_struct free_work;
168}; 186};
169 187
188#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
189#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
190#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
191
170/* bits for i_ack_flags */ 192/* bits for i_ack_flags */
171#define IB_ACK_IN_FLIGHT 0 193#define IB_ACK_IN_FLIGHT 0
172#define IB_ACK_REQUESTED 1 194#define IB_ACK_REQUESTED 1
@@ -202,6 +224,8 @@ struct rds_ib_statistics {
202 uint64_t s_ib_rdma_mr_pool_flush; 224 uint64_t s_ib_rdma_mr_pool_flush;
203 uint64_t s_ib_rdma_mr_pool_wait; 225 uint64_t s_ib_rdma_mr_pool_wait;
204 uint64_t s_ib_rdma_mr_pool_depleted; 226 uint64_t s_ib_rdma_mr_pool_depleted;
227 uint64_t s_ib_atomic_cswp;
228 uint64_t s_ib_atomic_fadd;
205}; 229};
206 230
207extern struct workqueue_struct *rds_ib_wq; 231extern struct workqueue_struct *rds_ib_wq;
@@ -243,6 +267,8 @@ static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
243extern struct rds_transport rds_ib_transport; 267extern struct rds_transport rds_ib_transport;
244extern void rds_ib_add_one(struct ib_device *device); 268extern void rds_ib_add_one(struct ib_device *device);
245extern void rds_ib_remove_one(struct ib_device *device); 269extern void rds_ib_remove_one(struct ib_device *device);
270struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
271void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
246extern struct ib_client rds_ib_client; 272extern struct ib_client rds_ib_client;
247 273
248extern unsigned int fmr_pool_size; 274extern unsigned int fmr_pool_size;
@@ -258,7 +284,7 @@ void rds_ib_conn_free(void *arg);
258int rds_ib_conn_connect(struct rds_connection *conn); 284int rds_ib_conn_connect(struct rds_connection *conn);
259void rds_ib_conn_shutdown(struct rds_connection *conn); 285void rds_ib_conn_shutdown(struct rds_connection *conn);
260void rds_ib_state_change(struct sock *sk); 286void rds_ib_state_change(struct sock *sk);
261int __init rds_ib_listen_init(void); 287int rds_ib_listen_init(void);
262void rds_ib_listen_stop(void); 288void rds_ib_listen_stop(void);
263void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...); 289void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
264int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, 290int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
@@ -275,15 +301,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn,
275int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr); 301int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
276void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); 302void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
277void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn); 303void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
278void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock); 304void rds_ib_destroy_nodev_conns(void);
279static inline void rds_ib_destroy_nodev_conns(void)
280{
281 __rds_ib_destroy_conns(&ib_nodev_conns, &ib_nodev_conns_lock);
282}
283static inline void rds_ib_destroy_conns(struct rds_ib_device *rds_ibdev)
284{
285 __rds_ib_destroy_conns(&rds_ibdev->conn_list, &rds_ibdev->spinlock);
286}
287struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *); 305struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
288void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo); 306void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
289void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); 307void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
@@ -292,14 +310,16 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
292void rds_ib_sync_mr(void *trans_private, int dir); 310void rds_ib_sync_mr(void *trans_private, int dir);
293void rds_ib_free_mr(void *trans_private, int invalidate); 311void rds_ib_free_mr(void *trans_private, int invalidate);
294void rds_ib_flush_mrs(void); 312void rds_ib_flush_mrs(void);
313int rds_ib_fmr_init(void);
314void rds_ib_fmr_exit(void);
295 315
296/* ib_recv.c */ 316/* ib_recv.c */
297int __init rds_ib_recv_init(void); 317int rds_ib_recv_init(void);
298void rds_ib_recv_exit(void); 318void rds_ib_recv_exit(void);
299int rds_ib_recv(struct rds_connection *conn); 319int rds_ib_recv(struct rds_connection *conn);
300int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 320int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
301 gfp_t page_gfp, int prefill); 321void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
302void rds_ib_inc_purge(struct rds_incoming *inc); 322void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
303void rds_ib_inc_free(struct rds_incoming *inc); 323void rds_ib_inc_free(struct rds_incoming *inc);
304int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 324int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
305 size_t size); 325 size_t size);
@@ -325,17 +345,19 @@ u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
325extern wait_queue_head_t rds_ib_ring_empty_wait; 345extern wait_queue_head_t rds_ib_ring_empty_wait;
326 346
327/* ib_send.c */ 347/* ib_send.c */
348char *rds_ib_wc_status_str(enum ib_wc_status status);
328void rds_ib_xmit_complete(struct rds_connection *conn); 349void rds_ib_xmit_complete(struct rds_connection *conn);
329int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, 350int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
330 unsigned int hdr_off, unsigned int sg, unsigned int off); 351 unsigned int hdr_off, unsigned int sg, unsigned int off);
331void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context); 352void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
332void rds_ib_send_init_ring(struct rds_ib_connection *ic); 353void rds_ib_send_init_ring(struct rds_ib_connection *ic);
333void rds_ib_send_clear_ring(struct rds_ib_connection *ic); 354void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
334int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op); 355int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
335void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits); 356void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
336void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted); 357void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
337int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, 358int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
338 u32 *adv_credits, int need_posted, int max_posted); 359 u32 *adv_credits, int need_posted, int max_posted);
360int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
339 361
340/* ib_stats.c */ 362/* ib_stats.c */
341DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); 363DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
@@ -344,7 +366,7 @@ unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
344 unsigned int avail); 366 unsigned int avail);
345 367
346/* ib_sysctl.c */ 368/* ib_sysctl.c */
347int __init rds_ib_sysctl_init(void); 369int rds_ib_sysctl_init(void);
348void rds_ib_sysctl_exit(void); 370void rds_ib_sysctl_exit(void);
349extern unsigned long rds_ib_sysctl_max_send_wr; 371extern unsigned long rds_ib_sysctl_max_send_wr;
350extern unsigned long rds_ib_sysctl_max_recv_wr; 372extern unsigned long rds_ib_sysctl_max_recv_wr;
@@ -354,28 +376,4 @@ extern unsigned long rds_ib_sysctl_max_recv_allocation;
354extern unsigned int rds_ib_sysctl_flow_control; 376extern unsigned int rds_ib_sysctl_flow_control;
355extern ctl_table rds_ib_sysctl_table[]; 377extern ctl_table rds_ib_sysctl_table[];
356 378
357/*
358 * Helper functions for getting/setting the header and data SGEs in
359 * RDS packets (not RDMA)
360 *
361 * From version 3.1 onwards, header is in front of data in the sge.
362 */
363static inline struct ib_sge *
364rds_ib_header_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
365{
366 if (ic->conn->c_version > RDS_PROTOCOL_3_0)
367 return &sge[0];
368 else
369 return &sge[1];
370}
371
372static inline struct ib_sge *
373rds_ib_data_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
374{
375 if (ic->conn->c_version > RDS_PROTOCOL_3_0)
376 return &sge[1];
377 else
378 return &sge[0];
379}
380
381#endif 379#endif
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index f68832798db2..ee369d201a65 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -38,6 +38,36 @@
38#include "rds.h" 38#include "rds.h"
39#include "ib.h" 39#include "ib.h"
40 40
41static char *rds_ib_event_type_strings[] = {
42#define RDS_IB_EVENT_STRING(foo) \
43 [IB_EVENT_##foo] = __stringify(IB_EVENT_##foo)
44 RDS_IB_EVENT_STRING(CQ_ERR),
45 RDS_IB_EVENT_STRING(QP_FATAL),
46 RDS_IB_EVENT_STRING(QP_REQ_ERR),
47 RDS_IB_EVENT_STRING(QP_ACCESS_ERR),
48 RDS_IB_EVENT_STRING(COMM_EST),
49 RDS_IB_EVENT_STRING(SQ_DRAINED),
50 RDS_IB_EVENT_STRING(PATH_MIG),
51 RDS_IB_EVENT_STRING(PATH_MIG_ERR),
52 RDS_IB_EVENT_STRING(DEVICE_FATAL),
53 RDS_IB_EVENT_STRING(PORT_ACTIVE),
54 RDS_IB_EVENT_STRING(PORT_ERR),
55 RDS_IB_EVENT_STRING(LID_CHANGE),
56 RDS_IB_EVENT_STRING(PKEY_CHANGE),
57 RDS_IB_EVENT_STRING(SM_CHANGE),
58 RDS_IB_EVENT_STRING(SRQ_ERR),
59 RDS_IB_EVENT_STRING(SRQ_LIMIT_REACHED),
60 RDS_IB_EVENT_STRING(QP_LAST_WQE_REACHED),
61 RDS_IB_EVENT_STRING(CLIENT_REREGISTER),
62#undef RDS_IB_EVENT_STRING
63};
64
65static char *rds_ib_event_str(enum ib_event_type type)
66{
67 return rds_str_array(rds_ib_event_type_strings,
68 ARRAY_SIZE(rds_ib_event_type_strings), type);
69};
70
41/* 71/*
42 * Set the selected protocol version 72 * Set the selected protocol version
43 */ 73 */
@@ -95,7 +125,6 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
95{ 125{
96 const struct rds_ib_connect_private *dp = NULL; 126 const struct rds_ib_connect_private *dp = NULL;
97 struct rds_ib_connection *ic = conn->c_transport_data; 127 struct rds_ib_connection *ic = conn->c_transport_data;
98 struct rds_ib_device *rds_ibdev;
99 struct ib_qp_attr qp_attr; 128 struct ib_qp_attr qp_attr;
100 int err; 129 int err;
101 130
@@ -111,11 +140,21 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
111 } 140 }
112 } 141 }
113 142
114 printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n", 143 if (conn->c_version < RDS_PROTOCOL(3,1)) {
115 &conn->c_faddr, 144 printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
116 RDS_PROTOCOL_MAJOR(conn->c_version), 145 " no longer supported\n",
117 RDS_PROTOCOL_MINOR(conn->c_version), 146 &conn->c_faddr,
118 ic->i_flowctl ? ", flow control" : ""); 147 RDS_PROTOCOL_MAJOR(conn->c_version),
148 RDS_PROTOCOL_MINOR(conn->c_version));
149 rds_conn_destroy(conn);
150 return;
151 } else {
152 printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
153 &conn->c_faddr,
154 RDS_PROTOCOL_MAJOR(conn->c_version),
155 RDS_PROTOCOL_MINOR(conn->c_version),
156 ic->i_flowctl ? ", flow control" : "");
157 }
119 158
120 /* 159 /*
121 * Init rings and fill recv. this needs to wait until protocol negotiation 160 * Init rings and fill recv. this needs to wait until protocol negotiation
@@ -125,7 +164,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
125 rds_ib_recv_init_ring(ic); 164 rds_ib_recv_init_ring(ic);
126 /* Post receive buffers - as a side effect, this will update 165 /* Post receive buffers - as a side effect, this will update
127 * the posted credit count. */ 166 * the posted credit count. */
128 rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1); 167 rds_ib_recv_refill(conn, 1);
129 168
130 /* Tune RNR behavior */ 169 /* Tune RNR behavior */
131 rds_ib_tune_rnr(ic, &qp_attr); 170 rds_ib_tune_rnr(ic, &qp_attr);
@@ -135,12 +174,11 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
135 if (err) 174 if (err)
136 printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err); 175 printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
137 176
138 /* update ib_device with this local ipaddr & conn */ 177 /* update ib_device with this local ipaddr */
139 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 178 err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
140 err = rds_ib_update_ipaddr(rds_ibdev, conn->c_laddr);
141 if (err) 179 if (err)
142 printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", err); 180 printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
143 rds_ib_add_conn(rds_ibdev, conn); 181 err);
144 182
145 /* If the peer gave us the last packet it saw, process this as if 183 /* If the peer gave us the last packet it saw, process this as if
146 * we had received a regular ACK. */ 184 * we had received a regular ACK. */
@@ -153,18 +191,23 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
153static void rds_ib_cm_fill_conn_param(struct rds_connection *conn, 191static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
154 struct rdma_conn_param *conn_param, 192 struct rdma_conn_param *conn_param,
155 struct rds_ib_connect_private *dp, 193 struct rds_ib_connect_private *dp,
156 u32 protocol_version) 194 u32 protocol_version,
195 u32 max_responder_resources,
196 u32 max_initiator_depth)
157{ 197{
198 struct rds_ib_connection *ic = conn->c_transport_data;
199 struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
200
158 memset(conn_param, 0, sizeof(struct rdma_conn_param)); 201 memset(conn_param, 0, sizeof(struct rdma_conn_param));
159 /* XXX tune these? */ 202
160 conn_param->responder_resources = 1; 203 conn_param->responder_resources =
161 conn_param->initiator_depth = 1; 204 min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
205 conn_param->initiator_depth =
206 min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
162 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7); 207 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
163 conn_param->rnr_retry_count = 7; 208 conn_param->rnr_retry_count = 7;
164 209
165 if (dp) { 210 if (dp) {
166 struct rds_ib_connection *ic = conn->c_transport_data;
167
168 memset(dp, 0, sizeof(*dp)); 211 memset(dp, 0, sizeof(*dp));
169 dp->dp_saddr = conn->c_laddr; 212 dp->dp_saddr = conn->c_laddr;
170 dp->dp_daddr = conn->c_faddr; 213 dp->dp_daddr = conn->c_faddr;
@@ -189,7 +232,8 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
189 232
190static void rds_ib_cq_event_handler(struct ib_event *event, void *data) 233static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
191{ 234{
192 rdsdebug("event %u data %p\n", event->event, data); 235 rdsdebug("event %u (%s) data %p\n",
236 event->event, rds_ib_event_str(event->event), data);
193} 237}
194 238
195static void rds_ib_qp_event_handler(struct ib_event *event, void *data) 239static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
@@ -197,16 +241,18 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
197 struct rds_connection *conn = data; 241 struct rds_connection *conn = data;
198 struct rds_ib_connection *ic = conn->c_transport_data; 242 struct rds_ib_connection *ic = conn->c_transport_data;
199 243
200 rdsdebug("conn %p ic %p event %u\n", conn, ic, event->event); 244 rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
245 rds_ib_event_str(event->event));
201 246
202 switch (event->event) { 247 switch (event->event) {
203 case IB_EVENT_COMM_EST: 248 case IB_EVENT_COMM_EST:
204 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); 249 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
205 break; 250 break;
206 default: 251 default:
207 rdsdebug("Fatal QP Event %u " 252 rdsdebug("Fatal QP Event %u (%s) "
208 "- connection %pI4->%pI4, reconnecting\n", 253 "- connection %pI4->%pI4, reconnecting\n",
209 event->event, &conn->c_laddr, &conn->c_faddr); 254 event->event, rds_ib_event_str(event->event),
255 &conn->c_laddr, &conn->c_faddr);
210 rds_conn_drop(conn); 256 rds_conn_drop(conn);
211 break; 257 break;
212 } 258 }
@@ -224,18 +270,16 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
224 struct rds_ib_device *rds_ibdev; 270 struct rds_ib_device *rds_ibdev;
225 int ret; 271 int ret;
226 272
227 /* rds_ib_add_one creates a rds_ib_device object per IB device, 273 /*
228 * and allocates a protection domain, memory range and FMR pool 274 * It's normal to see a null device if an incoming connection races
229 * for each. If that fails for any reason, it will not register 275 * with device removal, so we don't print a warning.
230 * the rds_ibdev at all.
231 */ 276 */
232 rds_ibdev = ib_get_client_data(dev, &rds_ib_client); 277 rds_ibdev = rds_ib_get_client_data(dev);
233 if (rds_ibdev == NULL) { 278 if (!rds_ibdev)
234 if (printk_ratelimit())
235 printk(KERN_NOTICE "RDS/IB: No client_data for device %s\n",
236 dev->name);
237 return -EOPNOTSUPP; 279 return -EOPNOTSUPP;
238 } 280
281 /* add the conn now so that connection establishment has the dev */
282 rds_ib_add_conn(rds_ibdev, conn);
239 283
240 if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1) 284 if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
241 rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1); 285 rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
@@ -306,7 +350,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
306 ic->i_send_ring.w_nr * 350 ic->i_send_ring.w_nr *
307 sizeof(struct rds_header), 351 sizeof(struct rds_header),
308 &ic->i_send_hdrs_dma, GFP_KERNEL); 352 &ic->i_send_hdrs_dma, GFP_KERNEL);
309 if (ic->i_send_hdrs == NULL) { 353 if (!ic->i_send_hdrs) {
310 ret = -ENOMEM; 354 ret = -ENOMEM;
311 rdsdebug("ib_dma_alloc_coherent send failed\n"); 355 rdsdebug("ib_dma_alloc_coherent send failed\n");
312 goto out; 356 goto out;
@@ -316,7 +360,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
316 ic->i_recv_ring.w_nr * 360 ic->i_recv_ring.w_nr *
317 sizeof(struct rds_header), 361 sizeof(struct rds_header),
318 &ic->i_recv_hdrs_dma, GFP_KERNEL); 362 &ic->i_recv_hdrs_dma, GFP_KERNEL);
319 if (ic->i_recv_hdrs == NULL) { 363 if (!ic->i_recv_hdrs) {
320 ret = -ENOMEM; 364 ret = -ENOMEM;
321 rdsdebug("ib_dma_alloc_coherent recv failed\n"); 365 rdsdebug("ib_dma_alloc_coherent recv failed\n");
322 goto out; 366 goto out;
@@ -324,22 +368,24 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
324 368
325 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), 369 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
326 &ic->i_ack_dma, GFP_KERNEL); 370 &ic->i_ack_dma, GFP_KERNEL);
327 if (ic->i_ack == NULL) { 371 if (!ic->i_ack) {
328 ret = -ENOMEM; 372 ret = -ENOMEM;
329 rdsdebug("ib_dma_alloc_coherent ack failed\n"); 373 rdsdebug("ib_dma_alloc_coherent ack failed\n");
330 goto out; 374 goto out;
331 } 375 }
332 376
333 ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); 377 ic->i_sends = vmalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
334 if (ic->i_sends == NULL) { 378 ibdev_to_node(dev));
379 if (!ic->i_sends) {
335 ret = -ENOMEM; 380 ret = -ENOMEM;
336 rdsdebug("send allocation failed\n"); 381 rdsdebug("send allocation failed\n");
337 goto out; 382 goto out;
338 } 383 }
339 memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work)); 384 memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
340 385
341 ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work)); 386 ic->i_recvs = vmalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
342 if (ic->i_recvs == NULL) { 387 ibdev_to_node(dev));
388 if (!ic->i_recvs) {
343 ret = -ENOMEM; 389 ret = -ENOMEM;
344 rdsdebug("recv allocation failed\n"); 390 rdsdebug("recv allocation failed\n");
345 goto out; 391 goto out;
@@ -352,6 +398,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
352 ic->i_send_cq, ic->i_recv_cq); 398 ic->i_send_cq, ic->i_recv_cq);
353 399
354out: 400out:
401 rds_ib_dev_put(rds_ibdev);
355 return ret; 402 return ret;
356} 403}
357 404
@@ -409,7 +456,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
409 struct rds_ib_connection *ic = NULL; 456 struct rds_ib_connection *ic = NULL;
410 struct rdma_conn_param conn_param; 457 struct rdma_conn_param conn_param;
411 u32 version; 458 u32 version;
412 int err, destroy = 1; 459 int err = 1, destroy = 1;
413 460
414 /* Check whether the remote protocol version matches ours. */ 461 /* Check whether the remote protocol version matches ours. */
415 version = rds_ib_protocol_compatible(event); 462 version = rds_ib_protocol_compatible(event);
@@ -448,7 +495,6 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
448 /* Wait and see - our connect may still be succeeding */ 495 /* Wait and see - our connect may still be succeeding */
449 rds_ib_stats_inc(s_ib_connect_raced); 496 rds_ib_stats_inc(s_ib_connect_raced);
450 } 497 }
451 mutex_unlock(&conn->c_cm_lock);
452 goto out; 498 goto out;
453 } 499 }
454 500
@@ -475,24 +521,23 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
475 err = rds_ib_setup_qp(conn); 521 err = rds_ib_setup_qp(conn);
476 if (err) { 522 if (err) {
477 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); 523 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
478 mutex_unlock(&conn->c_cm_lock);
479 goto out; 524 goto out;
480 } 525 }
481 526
482 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version); 527 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
528 event->param.conn.responder_resources,
529 event->param.conn.initiator_depth);
483 530
484 /* rdma_accept() calls rdma_reject() internally if it fails */ 531 /* rdma_accept() calls rdma_reject() internally if it fails */
485 err = rdma_accept(cm_id, &conn_param); 532 err = rdma_accept(cm_id, &conn_param);
486 mutex_unlock(&conn->c_cm_lock); 533 if (err)
487 if (err) {
488 rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err); 534 rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
489 goto out;
490 }
491
492 return 0;
493 535
494out: 536out:
495 rdma_reject(cm_id, NULL, 0); 537 if (conn)
538 mutex_unlock(&conn->c_cm_lock);
539 if (err)
540 rdma_reject(cm_id, NULL, 0);
496 return destroy; 541 return destroy;
497} 542}
498 543
@@ -516,8 +561,8 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
516 goto out; 561 goto out;
517 } 562 }
518 563
519 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION); 564 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
520 565 UINT_MAX, UINT_MAX);
521 ret = rdma_connect(cm_id, &conn_param); 566 ret = rdma_connect(cm_id, &conn_param);
522 if (ret) 567 if (ret)
523 rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); 568 rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
@@ -601,9 +646,19 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
601 ic->i_cm_id, err); 646 ic->i_cm_id, err);
602 } 647 }
603 648
649 /*
650 * We want to wait for tx and rx completion to finish
651 * before we tear down the connection, but we have to be
652 * careful not to get stuck waiting on a send ring that
653 * only has unsignaled sends in it. We've shutdown new
654 * sends before getting here so by waiting for signaled
655 * sends to complete we're ensured that there will be no
656 * more tx processing.
657 */
604 wait_event(rds_ib_ring_empty_wait, 658 wait_event(rds_ib_ring_empty_wait,
605 rds_ib_ring_empty(&ic->i_send_ring) && 659 rds_ib_ring_empty(&ic->i_recv_ring) &&
606 rds_ib_ring_empty(&ic->i_recv_ring)); 660 (atomic_read(&ic->i_signaled_sends) == 0));
661 tasklet_kill(&ic->i_recv_tasklet);
607 662
608 if (ic->i_send_hdrs) 663 if (ic->i_send_hdrs)
609 ib_dma_free_coherent(dev, 664 ib_dma_free_coherent(dev,
@@ -654,9 +709,12 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
654 BUG_ON(ic->rds_ibdev); 709 BUG_ON(ic->rds_ibdev);
655 710
656 /* Clear pending transmit */ 711 /* Clear pending transmit */
657 if (ic->i_rm) { 712 if (ic->i_data_op) {
658 rds_message_put(ic->i_rm); 713 struct rds_message *rm;
659 ic->i_rm = NULL; 714
715 rm = container_of(ic->i_data_op, struct rds_message, data);
716 rds_message_put(rm);
717 ic->i_data_op = NULL;
660 } 718 }
661 719
662 /* Clear the ACK state */ 720 /* Clear the ACK state */
@@ -690,12 +748,19 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
690{ 748{
691 struct rds_ib_connection *ic; 749 struct rds_ib_connection *ic;
692 unsigned long flags; 750 unsigned long flags;
751 int ret;
693 752
694 /* XXX too lazy? */ 753 /* XXX too lazy? */
695 ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL); 754 ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL);
696 if (ic == NULL) 755 if (!ic)
697 return -ENOMEM; 756 return -ENOMEM;
698 757
758 ret = rds_ib_recv_alloc_caches(ic);
759 if (ret) {
760 kfree(ic);
761 return ret;
762 }
763
699 INIT_LIST_HEAD(&ic->ib_node); 764 INIT_LIST_HEAD(&ic->ib_node);
700 tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn, 765 tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
701 (unsigned long) ic); 766 (unsigned long) ic);
@@ -703,6 +768,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
703#ifndef KERNEL_HAS_ATOMIC64 768#ifndef KERNEL_HAS_ATOMIC64
704 spin_lock_init(&ic->i_ack_lock); 769 spin_lock_init(&ic->i_ack_lock);
705#endif 770#endif
771 atomic_set(&ic->i_signaled_sends, 0);
706 772
707 /* 773 /*
708 * rds_ib_conn_shutdown() waits for these to be emptied so they 774 * rds_ib_conn_shutdown() waits for these to be emptied so they
@@ -744,6 +810,8 @@ void rds_ib_conn_free(void *arg)
744 list_del(&ic->ib_node); 810 list_del(&ic->ib_node);
745 spin_unlock_irq(lock_ptr); 811 spin_unlock_irq(lock_ptr);
746 812
813 rds_ib_recv_free_caches(ic);
814
747 kfree(ic); 815 kfree(ic);
748} 816}
749 817
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index a54cd63f9e35..b5a88415a18e 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -32,11 +32,16 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/rculist.h>
35 36
36#include "rds.h" 37#include "rds.h"
37#include "rdma.h"
38#include "ib.h" 38#include "ib.h"
39#include "xlist.h"
39 40
41struct workqueue_struct *rds_ib_fmr_wq;
42
43static DEFINE_PER_CPU(unsigned long, clean_list_grace);
44#define CLEAN_LIST_BUSY_BIT 0
40 45
41/* 46/*
42 * This is stored as mr->r_trans_private. 47 * This is stored as mr->r_trans_private.
@@ -45,7 +50,11 @@ struct rds_ib_mr {
45 struct rds_ib_device *device; 50 struct rds_ib_device *device;
46 struct rds_ib_mr_pool *pool; 51 struct rds_ib_mr_pool *pool;
47 struct ib_fmr *fmr; 52 struct ib_fmr *fmr;
48 struct list_head list; 53
54 struct xlist_head xlist;
55
56 /* unmap_list is for freeing */
57 struct list_head unmap_list;
49 unsigned int remap_count; 58 unsigned int remap_count;
50 59
51 struct scatterlist *sg; 60 struct scatterlist *sg;
@@ -59,14 +68,16 @@ struct rds_ib_mr {
59 */ 68 */
60struct rds_ib_mr_pool { 69struct rds_ib_mr_pool {
61 struct mutex flush_lock; /* serialize fmr invalidate */ 70 struct mutex flush_lock; /* serialize fmr invalidate */
62 struct work_struct flush_worker; /* flush worker */ 71 struct delayed_work flush_worker; /* flush worker */
63 72
64 spinlock_t list_lock; /* protect variables below */
65 atomic_t item_count; /* total # of MRs */ 73 atomic_t item_count; /* total # of MRs */
66 atomic_t dirty_count; /* # dirty of MRs */ 74 atomic_t dirty_count; /* # dirty of MRs */
67 struct list_head drop_list; /* MRs that have reached their max_maps limit */ 75
68 struct list_head free_list; /* unused MRs */ 76 struct xlist_head drop_list; /* MRs that have reached their max_maps limit */
69 struct list_head clean_list; /* unused & unamapped MRs */ 77 struct xlist_head free_list; /* unused MRs */
78 struct xlist_head clean_list; /* global unused & unamapped MRs */
79 wait_queue_head_t flush_wait;
80
70 atomic_t free_pinned; /* memory pinned by free MRs */ 81 atomic_t free_pinned; /* memory pinned by free MRs */
71 unsigned long max_items; 82 unsigned long max_items;
72 unsigned long max_items_soft; 83 unsigned long max_items_soft;
@@ -74,7 +85,7 @@ struct rds_ib_mr_pool {
74 struct ib_fmr_attr fmr_attr; 85 struct ib_fmr_attr fmr_attr;
75}; 86};
76 87
77static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all); 88static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
78static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr); 89static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
79static void rds_ib_mr_pool_flush_worker(struct work_struct *work); 90static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
80 91
@@ -83,16 +94,17 @@ static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
83 struct rds_ib_device *rds_ibdev; 94 struct rds_ib_device *rds_ibdev;
84 struct rds_ib_ipaddr *i_ipaddr; 95 struct rds_ib_ipaddr *i_ipaddr;
85 96
86 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { 97 rcu_read_lock();
87 spin_lock_irq(&rds_ibdev->spinlock); 98 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
88 list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) { 99 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
89 if (i_ipaddr->ipaddr == ipaddr) { 100 if (i_ipaddr->ipaddr == ipaddr) {
90 spin_unlock_irq(&rds_ibdev->spinlock); 101 atomic_inc(&rds_ibdev->refcount);
102 rcu_read_unlock();
91 return rds_ibdev; 103 return rds_ibdev;
92 } 104 }
93 } 105 }
94 spin_unlock_irq(&rds_ibdev->spinlock);
95 } 106 }
107 rcu_read_unlock();
96 108
97 return NULL; 109 return NULL;
98} 110}
@@ -108,7 +120,7 @@ static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
108 i_ipaddr->ipaddr = ipaddr; 120 i_ipaddr->ipaddr = ipaddr;
109 121
110 spin_lock_irq(&rds_ibdev->spinlock); 122 spin_lock_irq(&rds_ibdev->spinlock);
111 list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list); 123 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
112 spin_unlock_irq(&rds_ibdev->spinlock); 124 spin_unlock_irq(&rds_ibdev->spinlock);
113 125
114 return 0; 126 return 0;
@@ -116,17 +128,24 @@ static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
116 128
117static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) 129static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
118{ 130{
119 struct rds_ib_ipaddr *i_ipaddr, *next; 131 struct rds_ib_ipaddr *i_ipaddr;
132 struct rds_ib_ipaddr *to_free = NULL;
133
120 134
121 spin_lock_irq(&rds_ibdev->spinlock); 135 spin_lock_irq(&rds_ibdev->spinlock);
122 list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) { 136 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
123 if (i_ipaddr->ipaddr == ipaddr) { 137 if (i_ipaddr->ipaddr == ipaddr) {
124 list_del(&i_ipaddr->list); 138 list_del_rcu(&i_ipaddr->list);
125 kfree(i_ipaddr); 139 to_free = i_ipaddr;
126 break; 140 break;
127 } 141 }
128 } 142 }
129 spin_unlock_irq(&rds_ibdev->spinlock); 143 spin_unlock_irq(&rds_ibdev->spinlock);
144
145 if (to_free) {
146 synchronize_rcu();
147 kfree(to_free);
148 }
130} 149}
131 150
132int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) 151int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
@@ -134,8 +153,10 @@ int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
134 struct rds_ib_device *rds_ibdev_old; 153 struct rds_ib_device *rds_ibdev_old;
135 154
136 rds_ibdev_old = rds_ib_get_device(ipaddr); 155 rds_ibdev_old = rds_ib_get_device(ipaddr);
137 if (rds_ibdev_old) 156 if (rds_ibdev_old) {
138 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr); 157 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
158 rds_ib_dev_put(rds_ibdev_old);
159 }
139 160
140 return rds_ib_add_ipaddr(rds_ibdev, ipaddr); 161 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
141} 162}
@@ -150,12 +171,13 @@ void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *con
150 BUG_ON(list_empty(&ic->ib_node)); 171 BUG_ON(list_empty(&ic->ib_node));
151 list_del(&ic->ib_node); 172 list_del(&ic->ib_node);
152 173
153 spin_lock_irq(&rds_ibdev->spinlock); 174 spin_lock(&rds_ibdev->spinlock);
154 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); 175 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
155 spin_unlock_irq(&rds_ibdev->spinlock); 176 spin_unlock(&rds_ibdev->spinlock);
156 spin_unlock_irq(&ib_nodev_conns_lock); 177 spin_unlock_irq(&ib_nodev_conns_lock);
157 178
158 ic->rds_ibdev = rds_ibdev; 179 ic->rds_ibdev = rds_ibdev;
180 atomic_inc(&rds_ibdev->refcount);
159} 181}
160 182
161void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) 183void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
@@ -175,18 +197,18 @@ void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *
175 spin_unlock(&ib_nodev_conns_lock); 197 spin_unlock(&ib_nodev_conns_lock);
176 198
177 ic->rds_ibdev = NULL; 199 ic->rds_ibdev = NULL;
200 rds_ib_dev_put(rds_ibdev);
178} 201}
179 202
180void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock) 203void rds_ib_destroy_nodev_conns(void)
181{ 204{
182 struct rds_ib_connection *ic, *_ic; 205 struct rds_ib_connection *ic, *_ic;
183 LIST_HEAD(tmp_list); 206 LIST_HEAD(tmp_list);
184 207
185 /* avoid calling conn_destroy with irqs off */ 208 /* avoid calling conn_destroy with irqs off */
186 spin_lock_irq(list_lock); 209 spin_lock_irq(&ib_nodev_conns_lock);
187 list_splice(list, &tmp_list); 210 list_splice(&ib_nodev_conns, &tmp_list);
188 INIT_LIST_HEAD(list); 211 spin_unlock_irq(&ib_nodev_conns_lock);
189 spin_unlock_irq(list_lock);
190 212
191 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) 213 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
192 rds_conn_destroy(ic->conn); 214 rds_conn_destroy(ic->conn);
@@ -200,12 +222,12 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
200 if (!pool) 222 if (!pool)
201 return ERR_PTR(-ENOMEM); 223 return ERR_PTR(-ENOMEM);
202 224
203 INIT_LIST_HEAD(&pool->free_list); 225 INIT_XLIST_HEAD(&pool->free_list);
204 INIT_LIST_HEAD(&pool->drop_list); 226 INIT_XLIST_HEAD(&pool->drop_list);
205 INIT_LIST_HEAD(&pool->clean_list); 227 INIT_XLIST_HEAD(&pool->clean_list);
206 mutex_init(&pool->flush_lock); 228 mutex_init(&pool->flush_lock);
207 spin_lock_init(&pool->list_lock); 229 init_waitqueue_head(&pool->flush_wait);
208 INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); 230 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
209 231
210 pool->fmr_attr.max_pages = fmr_message_size; 232 pool->fmr_attr.max_pages = fmr_message_size;
211 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; 233 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
@@ -233,34 +255,60 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co
233 255
234void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) 256void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
235{ 257{
236 flush_workqueue(rds_wq); 258 cancel_delayed_work_sync(&pool->flush_worker);
237 rds_ib_flush_mr_pool(pool, 1); 259 rds_ib_flush_mr_pool(pool, 1, NULL);
238 WARN_ON(atomic_read(&pool->item_count)); 260 WARN_ON(atomic_read(&pool->item_count));
239 WARN_ON(atomic_read(&pool->free_pinned)); 261 WARN_ON(atomic_read(&pool->free_pinned));
240 kfree(pool); 262 kfree(pool);
241} 263}
242 264
265static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
266 struct rds_ib_mr **ibmr_ret)
267{
268 struct xlist_head *ibmr_xl;
269 ibmr_xl = xlist_del_head_fast(xl);
270 *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
271}
272
243static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) 273static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
244{ 274{
245 struct rds_ib_mr *ibmr = NULL; 275 struct rds_ib_mr *ibmr = NULL;
246 unsigned long flags; 276 struct xlist_head *ret;
277 unsigned long *flag;
247 278
248 spin_lock_irqsave(&pool->list_lock, flags); 279 preempt_disable();
249 if (!list_empty(&pool->clean_list)) { 280 flag = &__get_cpu_var(clean_list_grace);
250 ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list); 281 set_bit(CLEAN_LIST_BUSY_BIT, flag);
251 list_del_init(&ibmr->list); 282 ret = xlist_del_head(&pool->clean_list);
252 } 283 if (ret)
253 spin_unlock_irqrestore(&pool->list_lock, flags); 284 ibmr = list_entry(ret, struct rds_ib_mr, xlist);
254 285
286 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
287 preempt_enable();
255 return ibmr; 288 return ibmr;
256} 289}
257 290
291static inline void wait_clean_list_grace(void)
292{
293 int cpu;
294 unsigned long *flag;
295
296 for_each_online_cpu(cpu) {
297 flag = &per_cpu(clean_list_grace, cpu);
298 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
299 cpu_relax();
300 }
301}
302
258static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) 303static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
259{ 304{
260 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 305 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
261 struct rds_ib_mr *ibmr = NULL; 306 struct rds_ib_mr *ibmr = NULL;
262 int err = 0, iter = 0; 307 int err = 0, iter = 0;
263 308
309 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
310 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
311
264 while (1) { 312 while (1) {
265 ibmr = rds_ib_reuse_fmr(pool); 313 ibmr = rds_ib_reuse_fmr(pool);
266 if (ibmr) 314 if (ibmr)
@@ -287,19 +335,24 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
287 335
288 /* We do have some empty MRs. Flush them out. */ 336 /* We do have some empty MRs. Flush them out. */
289 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait); 337 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
290 rds_ib_flush_mr_pool(pool, 0); 338 rds_ib_flush_mr_pool(pool, 0, &ibmr);
339 if (ibmr)
340 return ibmr;
291 } 341 }
292 342
293 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); 343 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
294 if (!ibmr) { 344 if (!ibmr) {
295 err = -ENOMEM; 345 err = -ENOMEM;
296 goto out_no_cigar; 346 goto out_no_cigar;
297 } 347 }
298 348
349 memset(ibmr, 0, sizeof(*ibmr));
350
299 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd, 351 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
300 (IB_ACCESS_LOCAL_WRITE | 352 (IB_ACCESS_LOCAL_WRITE |
301 IB_ACCESS_REMOTE_READ | 353 IB_ACCESS_REMOTE_READ |
302 IB_ACCESS_REMOTE_WRITE), 354 IB_ACCESS_REMOTE_WRITE|
355 IB_ACCESS_REMOTE_ATOMIC),
303 &pool->fmr_attr); 356 &pool->fmr_attr);
304 if (IS_ERR(ibmr->fmr)) { 357 if (IS_ERR(ibmr->fmr)) {
305 err = PTR_ERR(ibmr->fmr); 358 err = PTR_ERR(ibmr->fmr);
@@ -367,7 +420,8 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
367 if (page_cnt > fmr_message_size) 420 if (page_cnt > fmr_message_size)
368 return -EINVAL; 421 return -EINVAL;
369 422
370 dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC); 423 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
424 rdsibdev_to_node(rds_ibdev));
371 if (!dma_pages) 425 if (!dma_pages)
372 return -ENOMEM; 426 return -ENOMEM;
373 427
@@ -441,7 +495,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
441 495
442 /* FIXME we need a way to tell a r/w MR 496 /* FIXME we need a way to tell a r/w MR
443 * from a r/o MR */ 497 * from a r/o MR */
444 BUG_ON(in_interrupt()); 498 BUG_ON(irqs_disabled());
445 set_page_dirty(page); 499 set_page_dirty(page);
446 put_page(page); 500 put_page(page);
447 } 501 }
@@ -477,33 +531,109 @@ static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int fr
477} 531}
478 532
479/* 533/*
534 * given an xlist of mrs, put them all into the list_head for more processing
535 */
536static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
537{
538 struct rds_ib_mr *ibmr;
539 struct xlist_head splice;
540 struct xlist_head *cur;
541 struct xlist_head *next;
542
543 splice.next = NULL;
544 xlist_splice(xlist, &splice);
545 cur = splice.next;
546 while (cur) {
547 next = cur->next;
548 ibmr = list_entry(cur, struct rds_ib_mr, xlist);
549 list_add_tail(&ibmr->unmap_list, list);
550 cur = next;
551 }
552}
553
554/*
555 * this takes a list head of mrs and turns it into an xlist of clusters.
556 * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
557 * reuse.
558 */
559static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
560 struct list_head *list, struct xlist_head *xlist,
561 struct xlist_head **tail_ret)
562{
563 struct rds_ib_mr *ibmr;
564 struct xlist_head *cur_mr = xlist;
565 struct xlist_head *tail_mr = NULL;
566
567 list_for_each_entry(ibmr, list, unmap_list) {
568 tail_mr = &ibmr->xlist;
569 tail_mr->next = NULL;
570 cur_mr->next = tail_mr;
571 cur_mr = tail_mr;
572 }
573 *tail_ret = tail_mr;
574}
575
576/*
480 * Flush our pool of MRs. 577 * Flush our pool of MRs.
481 * At a minimum, all currently unused MRs are unmapped. 578 * At a minimum, all currently unused MRs are unmapped.
482 * If the number of MRs allocated exceeds the limit, we also try 579 * If the number of MRs allocated exceeds the limit, we also try
483 * to free as many MRs as needed to get back to this limit. 580 * to free as many MRs as needed to get back to this limit.
484 */ 581 */
485static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all) 582static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
583 int free_all, struct rds_ib_mr **ibmr_ret)
486{ 584{
487 struct rds_ib_mr *ibmr, *next; 585 struct rds_ib_mr *ibmr, *next;
586 struct xlist_head clean_xlist;
587 struct xlist_head *clean_tail;
488 LIST_HEAD(unmap_list); 588 LIST_HEAD(unmap_list);
489 LIST_HEAD(fmr_list); 589 LIST_HEAD(fmr_list);
490 unsigned long unpinned = 0; 590 unsigned long unpinned = 0;
491 unsigned long flags;
492 unsigned int nfreed = 0, ncleaned = 0, free_goal; 591 unsigned int nfreed = 0, ncleaned = 0, free_goal;
493 int ret = 0; 592 int ret = 0;
494 593
495 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush); 594 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
496 595
497 mutex_lock(&pool->flush_lock); 596 if (ibmr_ret) {
597 DEFINE_WAIT(wait);
598 while(!mutex_trylock(&pool->flush_lock)) {
599 ibmr = rds_ib_reuse_fmr(pool);
600 if (ibmr) {
601 *ibmr_ret = ibmr;
602 finish_wait(&pool->flush_wait, &wait);
603 goto out_nolock;
604 }
605
606 prepare_to_wait(&pool->flush_wait, &wait,
607 TASK_UNINTERRUPTIBLE);
608 if (xlist_empty(&pool->clean_list))
609 schedule();
610
611 ibmr = rds_ib_reuse_fmr(pool);
612 if (ibmr) {
613 *ibmr_ret = ibmr;
614 finish_wait(&pool->flush_wait, &wait);
615 goto out_nolock;
616 }
617 }
618 finish_wait(&pool->flush_wait, &wait);
619 } else
620 mutex_lock(&pool->flush_lock);
621
622 if (ibmr_ret) {
623 ibmr = rds_ib_reuse_fmr(pool);
624 if (ibmr) {
625 *ibmr_ret = ibmr;
626 goto out;
627 }
628 }
498 629
499 spin_lock_irqsave(&pool->list_lock, flags);
500 /* Get the list of all MRs to be dropped. Ordering matters - 630 /* Get the list of all MRs to be dropped. Ordering matters -
501 * we want to put drop_list ahead of free_list. */ 631 * we want to put drop_list ahead of free_list.
502 list_splice_init(&pool->free_list, &unmap_list); 632 */
503 list_splice_init(&pool->drop_list, &unmap_list); 633 xlist_append_to_list(&pool->drop_list, &unmap_list);
634 xlist_append_to_list(&pool->free_list, &unmap_list);
504 if (free_all) 635 if (free_all)
505 list_splice_init(&pool->clean_list, &unmap_list); 636 xlist_append_to_list(&pool->clean_list, &unmap_list);
506 spin_unlock_irqrestore(&pool->list_lock, flags);
507 637
508 free_goal = rds_ib_flush_goal(pool, free_all); 638 free_goal = rds_ib_flush_goal(pool, free_all);
509 639
@@ -511,19 +641,20 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
511 goto out; 641 goto out;
512 642
513 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */ 643 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
514 list_for_each_entry(ibmr, &unmap_list, list) 644 list_for_each_entry(ibmr, &unmap_list, unmap_list)
515 list_add(&ibmr->fmr->list, &fmr_list); 645 list_add(&ibmr->fmr->list, &fmr_list);
646
516 ret = ib_unmap_fmr(&fmr_list); 647 ret = ib_unmap_fmr(&fmr_list);
517 if (ret) 648 if (ret)
518 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret); 649 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
519 650
520 /* Now we can destroy the DMA mapping and unpin any pages */ 651 /* Now we can destroy the DMA mapping and unpin any pages */
521 list_for_each_entry_safe(ibmr, next, &unmap_list, list) { 652 list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
522 unpinned += ibmr->sg_len; 653 unpinned += ibmr->sg_len;
523 __rds_ib_teardown_mr(ibmr); 654 __rds_ib_teardown_mr(ibmr);
524 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) { 655 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
525 rds_ib_stats_inc(s_ib_rdma_mr_free); 656 rds_ib_stats_inc(s_ib_rdma_mr_free);
526 list_del(&ibmr->list); 657 list_del(&ibmr->unmap_list);
527 ib_dealloc_fmr(ibmr->fmr); 658 ib_dealloc_fmr(ibmr->fmr);
528 kfree(ibmr); 659 kfree(ibmr);
529 nfreed++; 660 nfreed++;
@@ -531,9 +662,27 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
531 ncleaned++; 662 ncleaned++;
532 } 663 }
533 664
534 spin_lock_irqsave(&pool->list_lock, flags); 665 if (!list_empty(&unmap_list)) {
535 list_splice(&unmap_list, &pool->clean_list); 666 /* we have to make sure that none of the things we're about
536 spin_unlock_irqrestore(&pool->list_lock, flags); 667 * to put on the clean list would race with other cpus trying
668 * to pull items off. The xlist would explode if we managed to
669 * remove something from the clean list and then add it back again
670 * while another CPU was spinning on that same item in xlist_del_head.
671 *
672 * This is pretty unlikely, but just in case wait for an xlist grace period
673 * here before adding anything back into the clean list.
674 */
675 wait_clean_list_grace();
676
677 list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
678 if (ibmr_ret)
679 refill_local(pool, &clean_xlist, ibmr_ret);
680
681 /* refill_local may have emptied our list */
682 if (!xlist_empty(&clean_xlist))
683 xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
684
685 }
537 686
538 atomic_sub(unpinned, &pool->free_pinned); 687 atomic_sub(unpinned, &pool->free_pinned);
539 atomic_sub(ncleaned, &pool->dirty_count); 688 atomic_sub(ncleaned, &pool->dirty_count);
@@ -541,14 +690,35 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
541 690
542out: 691out:
543 mutex_unlock(&pool->flush_lock); 692 mutex_unlock(&pool->flush_lock);
693 if (waitqueue_active(&pool->flush_wait))
694 wake_up(&pool->flush_wait);
695out_nolock:
544 return ret; 696 return ret;
545} 697}
546 698
699int rds_ib_fmr_init(void)
700{
701 rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
702 if (!rds_ib_fmr_wq)
703 return -ENOMEM;
704 return 0;
705}
706
707/*
708 * By the time this is called all the IB devices should have been torn down and
709 * had their pools freed. As each pool is freed its work struct is waited on,
710 * so the pool flushing work queue should be idle by the time we get here.
711 */
712void rds_ib_fmr_exit(void)
713{
714 destroy_workqueue(rds_ib_fmr_wq);
715}
716
547static void rds_ib_mr_pool_flush_worker(struct work_struct *work) 717static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
548{ 718{
549 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker); 719 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
550 720
551 rds_ib_flush_mr_pool(pool, 0); 721 rds_ib_flush_mr_pool(pool, 0, NULL);
552} 722}
553 723
554void rds_ib_free_mr(void *trans_private, int invalidate) 724void rds_ib_free_mr(void *trans_private, int invalidate)
@@ -556,47 +726,49 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
556 struct rds_ib_mr *ibmr = trans_private; 726 struct rds_ib_mr *ibmr = trans_private;
557 struct rds_ib_device *rds_ibdev = ibmr->device; 727 struct rds_ib_device *rds_ibdev = ibmr->device;
558 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 728 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
559 unsigned long flags;
560 729
561 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len); 730 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
562 731
563 /* Return it to the pool's free list */ 732 /* Return it to the pool's free list */
564 spin_lock_irqsave(&pool->list_lock, flags);
565 if (ibmr->remap_count >= pool->fmr_attr.max_maps) 733 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
566 list_add(&ibmr->list, &pool->drop_list); 734 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
567 else 735 else
568 list_add(&ibmr->list, &pool->free_list); 736 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
569 737
570 atomic_add(ibmr->sg_len, &pool->free_pinned); 738 atomic_add(ibmr->sg_len, &pool->free_pinned);
571 atomic_inc(&pool->dirty_count); 739 atomic_inc(&pool->dirty_count);
572 spin_unlock_irqrestore(&pool->list_lock, flags);
573 740
574 /* If we've pinned too many pages, request a flush */ 741 /* If we've pinned too many pages, request a flush */
575 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || 742 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
576 atomic_read(&pool->dirty_count) >= pool->max_items / 10) 743 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
577 queue_work(rds_wq, &pool->flush_worker); 744 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
578 745
579 if (invalidate) { 746 if (invalidate) {
580 if (likely(!in_interrupt())) { 747 if (likely(!in_interrupt())) {
581 rds_ib_flush_mr_pool(pool, 0); 748 rds_ib_flush_mr_pool(pool, 0, NULL);
582 } else { 749 } else {
583 /* We get here if the user created a MR marked 750 /* We get here if the user created a MR marked
584 * as use_once and invalidate at the same time. */ 751 * as use_once and invalidate at the same time. */
585 queue_work(rds_wq, &pool->flush_worker); 752 queue_delayed_work(rds_ib_fmr_wq,
753 &pool->flush_worker, 10);
586 } 754 }
587 } 755 }
756
757 rds_ib_dev_put(rds_ibdev);
588} 758}
589 759
590void rds_ib_flush_mrs(void) 760void rds_ib_flush_mrs(void)
591{ 761{
592 struct rds_ib_device *rds_ibdev; 762 struct rds_ib_device *rds_ibdev;
593 763
764 down_read(&rds_ib_devices_lock);
594 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { 765 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
595 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool; 766 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
596 767
597 if (pool) 768 if (pool)
598 rds_ib_flush_mr_pool(pool, 0); 769 rds_ib_flush_mr_pool(pool, 0, NULL);
599 } 770 }
771 up_read(&rds_ib_devices_lock);
600} 772}
601 773
602void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 774void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
@@ -628,6 +800,7 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
628 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret); 800 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
629 801
630 ibmr->device = rds_ibdev; 802 ibmr->device = rds_ibdev;
803 rds_ibdev = NULL;
631 804
632 out: 805 out:
633 if (ret) { 806 if (ret) {
@@ -635,5 +808,8 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
635 rds_ib_free_mr(ibmr, 0); 808 rds_ib_free_mr(ibmr, 0);
636 ibmr = ERR_PTR(ret); 809 ibmr = ERR_PTR(ret);
637 } 810 }
811 if (rds_ibdev)
812 rds_ib_dev_put(rds_ibdev);
638 return ibmr; 813 return ibmr;
639} 814}
815
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index c74e9904a6b2..e29e0ca32f74 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -43,42 +43,6 @@ static struct kmem_cache *rds_ib_incoming_slab;
43static struct kmem_cache *rds_ib_frag_slab; 43static struct kmem_cache *rds_ib_frag_slab;
44static atomic_t rds_ib_allocation = ATOMIC_INIT(0); 44static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
45 45
46static void rds_ib_frag_drop_page(struct rds_page_frag *frag)
47{
48 rdsdebug("frag %p page %p\n", frag, frag->f_page);
49 __free_page(frag->f_page);
50 frag->f_page = NULL;
51}
52
53static void rds_ib_frag_free(struct rds_page_frag *frag)
54{
55 rdsdebug("frag %p page %p\n", frag, frag->f_page);
56 BUG_ON(frag->f_page != NULL);
57 kmem_cache_free(rds_ib_frag_slab, frag);
58}
59
60/*
61 * We map a page at a time. Its fragments are posted in order. This
62 * is called in fragment order as the fragments get send completion events.
63 * Only the last frag in the page performs the unmapping.
64 *
65 * It's OK for ring cleanup to call this in whatever order it likes because
66 * DMA is not in flight and so we can unmap while other ring entries still
67 * hold page references in their frags.
68 */
69static void rds_ib_recv_unmap_page(struct rds_ib_connection *ic,
70 struct rds_ib_recv_work *recv)
71{
72 struct rds_page_frag *frag = recv->r_frag;
73
74 rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page);
75 if (frag->f_mapped)
76 ib_dma_unmap_page(ic->i_cm_id->device,
77 frag->f_mapped,
78 RDS_FRAG_SIZE, DMA_FROM_DEVICE);
79 frag->f_mapped = 0;
80}
81
82void rds_ib_recv_init_ring(struct rds_ib_connection *ic) 46void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
83{ 47{
84 struct rds_ib_recv_work *recv; 48 struct rds_ib_recv_work *recv;
@@ -95,16 +59,161 @@ void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
95 recv->r_wr.sg_list = recv->r_sge; 59 recv->r_wr.sg_list = recv->r_sge;
96 recv->r_wr.num_sge = RDS_IB_RECV_SGE; 60 recv->r_wr.num_sge = RDS_IB_RECV_SGE;
97 61
98 sge = rds_ib_data_sge(ic, recv->r_sge); 62 sge = &recv->r_sge[0];
63 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
64 sge->length = sizeof(struct rds_header);
65 sge->lkey = ic->i_mr->lkey;
66
67 sge = &recv->r_sge[1];
99 sge->addr = 0; 68 sge->addr = 0;
100 sge->length = RDS_FRAG_SIZE; 69 sge->length = RDS_FRAG_SIZE;
101 sge->lkey = ic->i_mr->lkey; 70 sge->lkey = ic->i_mr->lkey;
71 }
72}
102 73
103 sge = rds_ib_header_sge(ic, recv->r_sge); 74/*
104 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); 75 * The entire 'from' list, including the from element itself, is put on
105 sge->length = sizeof(struct rds_header); 76 * to the tail of the 'to' list.
106 sge->lkey = ic->i_mr->lkey; 77 */
78static void list_splice_entire_tail(struct list_head *from,
79 struct list_head *to)
80{
81 struct list_head *from_last = from->prev;
82
83 list_splice_tail(from_last, to);
84 list_add_tail(from_last, to);
85}
86
87static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
88{
89 struct list_head *tmp;
90
91 tmp = xchg(&cache->xfer, NULL);
92 if (tmp) {
93 if (cache->ready)
94 list_splice_entire_tail(tmp, cache->ready);
95 else
96 cache->ready = tmp;
97 }
98}
99
100static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
101{
102 struct rds_ib_cache_head *head;
103 int cpu;
104
105 cache->percpu = alloc_percpu(struct rds_ib_cache_head);
106 if (!cache->percpu)
107 return -ENOMEM;
108
109 for_each_possible_cpu(cpu) {
110 head = per_cpu_ptr(cache->percpu, cpu);
111 head->first = NULL;
112 head->count = 0;
113 }
114 cache->xfer = NULL;
115 cache->ready = NULL;
116
117 return 0;
118}
119
120int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
121{
122 int ret;
123
124 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
125 if (!ret) {
126 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
127 if (ret)
128 free_percpu(ic->i_cache_incs.percpu);
107 } 129 }
130
131 return ret;
132}
133
134static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
135 struct list_head *caller_list)
136{
137 struct rds_ib_cache_head *head;
138 int cpu;
139
140 for_each_possible_cpu(cpu) {
141 head = per_cpu_ptr(cache->percpu, cpu);
142 if (head->first) {
143 list_splice_entire_tail(head->first, caller_list);
144 head->first = NULL;
145 }
146 }
147
148 if (cache->ready) {
149 list_splice_entire_tail(cache->ready, caller_list);
150 cache->ready = NULL;
151 }
152}
153
154void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
155{
156 struct rds_ib_incoming *inc;
157 struct rds_ib_incoming *inc_tmp;
158 struct rds_page_frag *frag;
159 struct rds_page_frag *frag_tmp;
160 LIST_HEAD(list);
161
162 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
163 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
164 free_percpu(ic->i_cache_incs.percpu);
165
166 list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
167 list_del(&inc->ii_cache_entry);
168 WARN_ON(!list_empty(&inc->ii_frags));
169 kmem_cache_free(rds_ib_incoming_slab, inc);
170 }
171
172 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
173 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
174 free_percpu(ic->i_cache_frags.percpu);
175
176 list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
177 list_del(&frag->f_cache_entry);
178 WARN_ON(!list_empty(&frag->f_item));
179 kmem_cache_free(rds_ib_frag_slab, frag);
180 }
181}
182
183/* fwd decl */
184static void rds_ib_recv_cache_put(struct list_head *new_item,
185 struct rds_ib_refill_cache *cache);
186static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
187
188
189/* Recycle frag and attached recv buffer f_sg */
190static void rds_ib_frag_free(struct rds_ib_connection *ic,
191 struct rds_page_frag *frag)
192{
193 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
194
195 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
196}
197
198/* Recycle inc after freeing attached frags */
199void rds_ib_inc_free(struct rds_incoming *inc)
200{
201 struct rds_ib_incoming *ibinc;
202 struct rds_page_frag *frag;
203 struct rds_page_frag *pos;
204 struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
205
206 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
207
208 /* Free attached frags */
209 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
210 list_del_init(&frag->f_item);
211 rds_ib_frag_free(ic, frag);
212 }
213 BUG_ON(!list_empty(&ibinc->ii_frags));
214
215 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
216 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
108} 217}
109 218
110static void rds_ib_recv_clear_one(struct rds_ib_connection *ic, 219static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
@@ -115,10 +224,8 @@ static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
115 recv->r_ibinc = NULL; 224 recv->r_ibinc = NULL;
116 } 225 }
117 if (recv->r_frag) { 226 if (recv->r_frag) {
118 rds_ib_recv_unmap_page(ic, recv); 227 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
119 if (recv->r_frag->f_page) 228 rds_ib_frag_free(ic, recv->r_frag);
120 rds_ib_frag_drop_page(recv->r_frag);
121 rds_ib_frag_free(recv->r_frag);
122 recv->r_frag = NULL; 229 recv->r_frag = NULL;
123 } 230 }
124} 231}
@@ -129,84 +236,111 @@ void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
129 236
130 for (i = 0; i < ic->i_recv_ring.w_nr; i++) 237 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
131 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); 238 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
132
133 if (ic->i_frag.f_page)
134 rds_ib_frag_drop_page(&ic->i_frag);
135} 239}
136 240
137static int rds_ib_recv_refill_one(struct rds_connection *conn, 241static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
138 struct rds_ib_recv_work *recv, 242 gfp_t slab_mask)
139 gfp_t kptr_gfp, gfp_t page_gfp)
140{ 243{
141 struct rds_ib_connection *ic = conn->c_transport_data; 244 struct rds_ib_incoming *ibinc;
142 dma_addr_t dma_addr; 245 struct list_head *cache_item;
143 struct ib_sge *sge; 246 int avail_allocs;
144 int ret = -ENOMEM;
145 247
146 if (recv->r_ibinc == NULL) { 248 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
147 if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) { 249 if (cache_item) {
250 ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
251 } else {
252 avail_allocs = atomic_add_unless(&rds_ib_allocation,
253 1, rds_ib_sysctl_max_recv_allocation);
254 if (!avail_allocs) {
148 rds_ib_stats_inc(s_ib_rx_alloc_limit); 255 rds_ib_stats_inc(s_ib_rx_alloc_limit);
149 goto out; 256 return NULL;
150 } 257 }
151 recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, 258 ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
152 kptr_gfp); 259 if (!ibinc) {
153 if (recv->r_ibinc == NULL) {
154 atomic_dec(&rds_ib_allocation); 260 atomic_dec(&rds_ib_allocation);
155 goto out; 261 return NULL;
156 } 262 }
157 INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
158 rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
159 } 263 }
264 INIT_LIST_HEAD(&ibinc->ii_frags);
265 rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
160 266
161 if (recv->r_frag == NULL) { 267 return ibinc;
162 recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, kptr_gfp); 268}
163 if (recv->r_frag == NULL) 269
164 goto out; 270static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
165 INIT_LIST_HEAD(&recv->r_frag->f_item); 271 gfp_t slab_mask, gfp_t page_mask)
166 recv->r_frag->f_page = NULL; 272{
273 struct rds_page_frag *frag;
274 struct list_head *cache_item;
275 int ret;
276
277 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
278 if (cache_item) {
279 frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
280 } else {
281 frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
282 if (!frag)
283 return NULL;
284
285 sg_init_table(&frag->f_sg, 1);
286 ret = rds_page_remainder_alloc(&frag->f_sg,
287 RDS_FRAG_SIZE, page_mask);
288 if (ret) {
289 kmem_cache_free(rds_ib_frag_slab, frag);
290 return NULL;
291 }
167 } 292 }
168 293
169 if (ic->i_frag.f_page == NULL) { 294 INIT_LIST_HEAD(&frag->f_item);
170 ic->i_frag.f_page = alloc_page(page_gfp); 295
171 if (ic->i_frag.f_page == NULL) 296 return frag;
172 goto out; 297}
173 ic->i_frag.f_offset = 0; 298
299static int rds_ib_recv_refill_one(struct rds_connection *conn,
300 struct rds_ib_recv_work *recv, int prefill)
301{
302 struct rds_ib_connection *ic = conn->c_transport_data;
303 struct ib_sge *sge;
304 int ret = -ENOMEM;
305 gfp_t slab_mask = GFP_NOWAIT;
306 gfp_t page_mask = GFP_NOWAIT;
307
308 if (prefill) {
309 slab_mask = GFP_KERNEL;
310 page_mask = GFP_HIGHUSER;
174 } 311 }
175 312
176 dma_addr = ib_dma_map_page(ic->i_cm_id->device, 313 if (!ic->i_cache_incs.ready)
177 ic->i_frag.f_page, 314 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
178 ic->i_frag.f_offset, 315 if (!ic->i_cache_frags.ready)
179 RDS_FRAG_SIZE, 316 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
180 DMA_FROM_DEVICE);
181 if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr))
182 goto out;
183 317
184 /* 318 /*
185 * Once we get the RDS_PAGE_LAST_OFF frag then rds_ib_frag_unmap() 319 * ibinc was taken from recv if recv contained the start of a message.
186 * must be called on this recv. This happens as completions hit 320 * recvs that were continuations will still have this allocated.
187 * in order or on connection shutdown.
188 */ 321 */
189 recv->r_frag->f_page = ic->i_frag.f_page; 322 if (!recv->r_ibinc) {
190 recv->r_frag->f_offset = ic->i_frag.f_offset; 323 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
191 recv->r_frag->f_mapped = dma_addr; 324 if (!recv->r_ibinc)
325 goto out;
326 }
192 327
193 sge = rds_ib_data_sge(ic, recv->r_sge); 328 WARN_ON(recv->r_frag); /* leak! */
194 sge->addr = dma_addr; 329 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
195 sge->length = RDS_FRAG_SIZE; 330 if (!recv->r_frag)
331 goto out;
332
333 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
334 1, DMA_FROM_DEVICE);
335 WARN_ON(ret != 1);
196 336
197 sge = rds_ib_header_sge(ic, recv->r_sge); 337 sge = &recv->r_sge[0];
198 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header); 338 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
199 sge->length = sizeof(struct rds_header); 339 sge->length = sizeof(struct rds_header);
200 340
201 get_page(recv->r_frag->f_page); 341 sge = &recv->r_sge[1];
202 342 sge->addr = sg_dma_address(&recv->r_frag->f_sg);
203 if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) { 343 sge->length = sg_dma_len(&recv->r_frag->f_sg);
204 ic->i_frag.f_offset += RDS_FRAG_SIZE;
205 } else {
206 put_page(ic->i_frag.f_page);
207 ic->i_frag.f_page = NULL;
208 ic->i_frag.f_offset = 0;
209 }
210 344
211 ret = 0; 345 ret = 0;
212out: 346out:
@@ -216,13 +350,11 @@ out:
216/* 350/*
217 * This tries to allocate and post unused work requests after making sure that 351 * This tries to allocate and post unused work requests after making sure that
218 * they have all the allocations they need to queue received fragments into 352 * they have all the allocations they need to queue received fragments into
219 * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc 353 * sockets.
220 * pairs don't go unmatched.
221 * 354 *
222 * -1 is returned if posting fails due to temporary resource exhaustion. 355 * -1 is returned if posting fails due to temporary resource exhaustion.
223 */ 356 */
224int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 357void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
225 gfp_t page_gfp, int prefill)
226{ 358{
227 struct rds_ib_connection *ic = conn->c_transport_data; 359 struct rds_ib_connection *ic = conn->c_transport_data;
228 struct rds_ib_recv_work *recv; 360 struct rds_ib_recv_work *recv;
@@ -236,28 +368,25 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
236 if (pos >= ic->i_recv_ring.w_nr) { 368 if (pos >= ic->i_recv_ring.w_nr) {
237 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", 369 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
238 pos); 370 pos);
239 ret = -EINVAL;
240 break; 371 break;
241 } 372 }
242 373
243 recv = &ic->i_recvs[pos]; 374 recv = &ic->i_recvs[pos];
244 ret = rds_ib_recv_refill_one(conn, recv, kptr_gfp, page_gfp); 375 ret = rds_ib_recv_refill_one(conn, recv, prefill);
245 if (ret) { 376 if (ret) {
246 ret = -1;
247 break; 377 break;
248 } 378 }
249 379
250 /* XXX when can this fail? */ 380 /* XXX when can this fail? */
251 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); 381 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
252 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv, 382 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
253 recv->r_ibinc, recv->r_frag->f_page, 383 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
254 (long) recv->r_frag->f_mapped, ret); 384 (long) sg_dma_address(&recv->r_frag->f_sg), ret);
255 if (ret) { 385 if (ret) {
256 rds_ib_conn_error(conn, "recv post on " 386 rds_ib_conn_error(conn, "recv post on "
257 "%pI4 returned %d, disconnecting and " 387 "%pI4 returned %d, disconnecting and "
258 "reconnecting\n", &conn->c_faddr, 388 "reconnecting\n", &conn->c_faddr,
259 ret); 389 ret);
260 ret = -1;
261 break; 390 break;
262 } 391 }
263 392
@@ -270,37 +399,73 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
270 399
271 if (ret) 400 if (ret)
272 rds_ib_ring_unalloc(&ic->i_recv_ring, 1); 401 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
273 return ret;
274} 402}
275 403
276void rds_ib_inc_purge(struct rds_incoming *inc) 404/*
405 * We want to recycle several types of recv allocations, like incs and frags.
406 * To use this, the *_free() function passes in the ptr to a list_head within
407 * the recyclee, as well as the cache to put it on.
408 *
409 * First, we put the memory on a percpu list. When this reaches a certain size,
410 * We move it to an intermediate non-percpu list in a lockless manner, with some
411 * xchg/compxchg wizardry.
412 *
413 * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
414 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
415 * list_empty() will return true with one element is actually present.
416 */
417static void rds_ib_recv_cache_put(struct list_head *new_item,
418 struct rds_ib_refill_cache *cache)
277{ 419{
278 struct rds_ib_incoming *ibinc; 420 unsigned long flags;
279 struct rds_page_frag *frag; 421 struct rds_ib_cache_head *chp;
280 struct rds_page_frag *pos; 422 struct list_head *old;
281 423
282 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); 424 local_irq_save(flags);
283 rdsdebug("purging ibinc %p inc %p\n", ibinc, inc);
284 425
285 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) { 426 chp = per_cpu_ptr(cache->percpu, smp_processor_id());
286 list_del_init(&frag->f_item); 427 if (!chp->first)
287 rds_ib_frag_drop_page(frag); 428 INIT_LIST_HEAD(new_item);
288 rds_ib_frag_free(frag); 429 else /* put on front */
289 } 430 list_add_tail(new_item, chp->first);
431 chp->first = new_item;
432 chp->count++;
433
434 if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT)
435 goto end;
436
437 /*
438 * Return our per-cpu first list to the cache's xfer by atomically
439 * grabbing the current xfer list, appending it to our per-cpu list,
440 * and then atomically returning that entire list back to the
441 * cache's xfer list as long as it's still empty.
442 */
443 do {
444 old = xchg(&cache->xfer, NULL);
445 if (old)
446 list_splice_entire_tail(old, chp->first);
447 old = cmpxchg(&cache->xfer, NULL, chp->first);
448 } while (old);
449
450 chp->first = NULL;
451 chp->count = 0;
452end:
453 local_irq_restore(flags);
290} 454}
291 455
292void rds_ib_inc_free(struct rds_incoming *inc) 456static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
293{ 457{
294 struct rds_ib_incoming *ibinc; 458 struct list_head *head = cache->ready;
295 459
296 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); 460 if (head) {
461 if (!list_empty(head)) {
462 cache->ready = head->next;
463 list_del_init(head);
464 } else
465 cache->ready = NULL;
466 }
297 467
298 rds_ib_inc_purge(inc); 468 return head;
299 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
300 BUG_ON(!list_empty(&ibinc->ii_frags));
301 kmem_cache_free(rds_ib_incoming_slab, ibinc);
302 atomic_dec(&rds_ib_allocation);
303 BUG_ON(atomic_read(&rds_ib_allocation) < 0);
304} 469}
305 470
306int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, 471int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
@@ -336,13 +501,13 @@ int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
336 to_copy = min_t(unsigned long, to_copy, len - copied); 501 to_copy = min_t(unsigned long, to_copy, len - copied);
337 502
338 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag " 503 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
339 "[%p, %lu] + %lu\n", 504 "[%p, %u] + %lu\n",
340 to_copy, iov->iov_base, iov->iov_len, iov_off, 505 to_copy, iov->iov_base, iov->iov_len, iov_off,
341 frag->f_page, frag->f_offset, frag_off); 506 sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
342 507
343 /* XXX needs + offset for multiple recvs per page */ 508 /* XXX needs + offset for multiple recvs per page */
344 ret = rds_page_copy_to_user(frag->f_page, 509 ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
345 frag->f_offset + frag_off, 510 frag->f_sg.offset + frag_off,
346 iov->iov_base + iov_off, 511 iov->iov_base + iov_off,
347 to_copy); 512 to_copy);
348 if (ret) { 513 if (ret) {
@@ -557,47 +722,6 @@ u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
557 return rds_ib_get_ack(ic); 722 return rds_ib_get_ack(ic);
558} 723}
559 724
560static struct rds_header *rds_ib_get_header(struct rds_connection *conn,
561 struct rds_ib_recv_work *recv,
562 u32 data_len)
563{
564 struct rds_ib_connection *ic = conn->c_transport_data;
565 void *hdr_buff = &ic->i_recv_hdrs[recv - ic->i_recvs];
566 void *addr;
567 u32 misplaced_hdr_bytes;
568
569 /*
570 * Support header at the front (RDS 3.1+) as well as header-at-end.
571 *
572 * Cases:
573 * 1) header all in header buff (great!)
574 * 2) header all in data page (copy all to header buff)
575 * 3) header split across hdr buf + data page
576 * (move bit in hdr buff to end before copying other bit from data page)
577 */
578 if (conn->c_version > RDS_PROTOCOL_3_0 || data_len == RDS_FRAG_SIZE)
579 return hdr_buff;
580
581 if (data_len <= (RDS_FRAG_SIZE - sizeof(struct rds_header))) {
582 addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
583 memcpy(hdr_buff,
584 addr + recv->r_frag->f_offset + data_len,
585 sizeof(struct rds_header));
586 kunmap_atomic(addr, KM_SOFTIRQ0);
587 return hdr_buff;
588 }
589
590 misplaced_hdr_bytes = (sizeof(struct rds_header) - (RDS_FRAG_SIZE - data_len));
591
592 memmove(hdr_buff + misplaced_hdr_bytes, hdr_buff, misplaced_hdr_bytes);
593
594 addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
595 memcpy(hdr_buff, addr + recv->r_frag->f_offset + data_len,
596 sizeof(struct rds_header) - misplaced_hdr_bytes);
597 kunmap_atomic(addr, KM_SOFTIRQ0);
598 return hdr_buff;
599}
600
601/* 725/*
602 * It's kind of lame that we're copying from the posted receive pages into 726 * It's kind of lame that we're copying from the posted receive pages into
603 * long-lived bitmaps. We could have posted the bitmaps and rdma written into 727 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
@@ -639,7 +763,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
639 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); 763 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
640 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ 764 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
641 765
642 addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0); 766 addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0);
643 767
644 src = addr + frag_off; 768 src = addr + frag_off;
645 dst = (void *)map->m_page_addrs[map_page] + map_off; 769 dst = (void *)map->m_page_addrs[map_page] + map_off;
@@ -710,7 +834,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
710 } 834 }
711 data_len -= sizeof(struct rds_header); 835 data_len -= sizeof(struct rds_header);
712 836
713 ihdr = rds_ib_get_header(conn, recv, data_len); 837 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
714 838
715 /* Validate the checksum. */ 839 /* Validate the checksum. */
716 if (!rds_message_verify_checksum(ihdr)) { 840 if (!rds_message_verify_checksum(ihdr)) {
@@ -742,12 +866,12 @@ static void rds_ib_process_recv(struct rds_connection *conn,
742 * the inc is freed. We don't go that route, so we have to drop the 866 * the inc is freed. We don't go that route, so we have to drop the
743 * page ref ourselves. We can't just leave the page on the recv 867 * page ref ourselves. We can't just leave the page on the recv
744 * because that confuses the dma mapping of pages and each recv's use 868 * because that confuses the dma mapping of pages and each recv's use
745 * of a partial page. We can leave the frag, though, it will be 869 * of a partial page.
746 * reused.
747 * 870 *
748 * FIXME: Fold this into the code path below. 871 * FIXME: Fold this into the code path below.
749 */ 872 */
750 rds_ib_frag_drop_page(recv->r_frag); 873 rds_ib_frag_free(ic, recv->r_frag);
874 recv->r_frag = NULL;
751 return; 875 return;
752 } 876 }
753 877
@@ -757,7 +881,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
757 * into the inc and save the inc so we can hang upcoming fragments 881 * into the inc and save the inc so we can hang upcoming fragments
758 * off its list. 882 * off its list.
759 */ 883 */
760 if (ibinc == NULL) { 884 if (!ibinc) {
761 ibinc = recv->r_ibinc; 885 ibinc = recv->r_ibinc;
762 recv->r_ibinc = NULL; 886 recv->r_ibinc = NULL;
763 ic->i_ibinc = ibinc; 887 ic->i_ibinc = ibinc;
@@ -842,32 +966,38 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic,
842 struct rds_ib_recv_work *recv; 966 struct rds_ib_recv_work *recv;
843 967
844 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { 968 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
845 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", 969 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
846 (unsigned long long)wc.wr_id, wc.status, wc.byte_len, 970 (unsigned long long)wc.wr_id, wc.status,
971 rds_ib_wc_status_str(wc.status), wc.byte_len,
847 be32_to_cpu(wc.ex.imm_data)); 972 be32_to_cpu(wc.ex.imm_data));
848 rds_ib_stats_inc(s_ib_rx_cq_event); 973 rds_ib_stats_inc(s_ib_rx_cq_event);
849 974
850 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; 975 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
851 976
852 rds_ib_recv_unmap_page(ic, recv); 977 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
853 978
854 /* 979 /*
855 * Also process recvs in connecting state because it is possible 980 * Also process recvs in connecting state because it is possible
856 * to get a recv completion _before_ the rdmacm ESTABLISHED 981 * to get a recv completion _before_ the rdmacm ESTABLISHED
857 * event is processed. 982 * event is processed.
858 */ 983 */
859 if (rds_conn_up(conn) || rds_conn_connecting(conn)) { 984 if (wc.status == IB_WC_SUCCESS) {
985 rds_ib_process_recv(conn, recv, wc.byte_len, state);
986 } else {
860 /* We expect errors as the qp is drained during shutdown */ 987 /* We expect errors as the qp is drained during shutdown */
861 if (wc.status == IB_WC_SUCCESS) { 988 if (rds_conn_up(conn) || rds_conn_connecting(conn))
862 rds_ib_process_recv(conn, recv, wc.byte_len, state); 989 rds_ib_conn_error(conn, "recv completion on %pI4 had "
863 } else { 990 "status %u (%s), disconnecting and "
864 rds_ib_conn_error(conn, "recv completion on " 991 "reconnecting\n", &conn->c_faddr,
865 "%pI4 had status %u, disconnecting and " 992 wc.status,
866 "reconnecting\n", &conn->c_faddr, 993 rds_ib_wc_status_str(wc.status));
867 wc.status);
868 }
869 } 994 }
870 995
996 /*
997 * It's very important that we only free this ring entry if we've truly
998 * freed the resources allocated to the entry. The refilling path can
999 * leak if we don't.
1000 */
871 rds_ib_ring_free(&ic->i_recv_ring, 1); 1001 rds_ib_ring_free(&ic->i_recv_ring, 1);
872 } 1002 }
873} 1003}
@@ -897,11 +1027,8 @@ void rds_ib_recv_tasklet_fn(unsigned long data)
897 if (rds_ib_ring_empty(&ic->i_recv_ring)) 1027 if (rds_ib_ring_empty(&ic->i_recv_ring))
898 rds_ib_stats_inc(s_ib_rx_ring_empty); 1028 rds_ib_stats_inc(s_ib_rx_ring_empty);
899 1029
900 /*
901 * If the ring is running low, then schedule the thread to refill.
902 */
903 if (rds_ib_ring_low(&ic->i_recv_ring)) 1030 if (rds_ib_ring_low(&ic->i_recv_ring))
904 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 1031 rds_ib_recv_refill(conn, 0);
905} 1032}
906 1033
907int rds_ib_recv(struct rds_connection *conn) 1034int rds_ib_recv(struct rds_connection *conn)
@@ -910,25 +1037,13 @@ int rds_ib_recv(struct rds_connection *conn)
910 int ret = 0; 1037 int ret = 0;
911 1038
912 rdsdebug("conn %p\n", conn); 1039 rdsdebug("conn %p\n", conn);
913
914 /*
915 * If we get a temporary posting failure in this context then
916 * we're really low and we want the caller to back off for a bit.
917 */
918 mutex_lock(&ic->i_recv_mutex);
919 if (rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0))
920 ret = -ENOMEM;
921 else
922 rds_ib_stats_inc(s_ib_rx_refill_from_thread);
923 mutex_unlock(&ic->i_recv_mutex);
924
925 if (rds_conn_up(conn)) 1040 if (rds_conn_up(conn))
926 rds_ib_attempt_ack(ic); 1041 rds_ib_attempt_ack(ic);
927 1042
928 return ret; 1043 return ret;
929} 1044}
930 1045
931int __init rds_ib_recv_init(void) 1046int rds_ib_recv_init(void)
932{ 1047{
933 struct sysinfo si; 1048 struct sysinfo si;
934 int ret = -ENOMEM; 1049 int ret = -ENOMEM;
@@ -939,14 +1054,14 @@ int __init rds_ib_recv_init(void)
939 1054
940 rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming", 1055 rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
941 sizeof(struct rds_ib_incoming), 1056 sizeof(struct rds_ib_incoming),
942 0, 0, NULL); 1057 0, SLAB_HWCACHE_ALIGN, NULL);
943 if (rds_ib_incoming_slab == NULL) 1058 if (!rds_ib_incoming_slab)
944 goto out; 1059 goto out;
945 1060
946 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag", 1061 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
947 sizeof(struct rds_page_frag), 1062 sizeof(struct rds_page_frag),
948 0, 0, NULL); 1063 0, SLAB_HWCACHE_ALIGN, NULL);
949 if (rds_ib_frag_slab == NULL) 1064 if (!rds_ib_frag_slab)
950 kmem_cache_destroy(rds_ib_incoming_slab); 1065 kmem_cache_destroy(rds_ib_incoming_slab);
951 else 1066 else
952 ret = 0; 1067 ret = 0;
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 17fa80803ab0..71f373c421bc 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -36,11 +36,49 @@
36#include <linux/dmapool.h> 36#include <linux/dmapool.h>
37 37
38#include "rds.h" 38#include "rds.h"
39#include "rdma.h"
40#include "ib.h" 39#include "ib.h"
41 40
42static void rds_ib_send_rdma_complete(struct rds_message *rm, 41static char *rds_ib_wc_status_strings[] = {
43 int wc_status) 42#define RDS_IB_WC_STATUS_STR(foo) \
43 [IB_WC_##foo] = __stringify(IB_WC_##foo)
44 RDS_IB_WC_STATUS_STR(SUCCESS),
45 RDS_IB_WC_STATUS_STR(LOC_LEN_ERR),
46 RDS_IB_WC_STATUS_STR(LOC_QP_OP_ERR),
47 RDS_IB_WC_STATUS_STR(LOC_EEC_OP_ERR),
48 RDS_IB_WC_STATUS_STR(LOC_PROT_ERR),
49 RDS_IB_WC_STATUS_STR(WR_FLUSH_ERR),
50 RDS_IB_WC_STATUS_STR(MW_BIND_ERR),
51 RDS_IB_WC_STATUS_STR(BAD_RESP_ERR),
52 RDS_IB_WC_STATUS_STR(LOC_ACCESS_ERR),
53 RDS_IB_WC_STATUS_STR(REM_INV_REQ_ERR),
54 RDS_IB_WC_STATUS_STR(REM_ACCESS_ERR),
55 RDS_IB_WC_STATUS_STR(REM_OP_ERR),
56 RDS_IB_WC_STATUS_STR(RETRY_EXC_ERR),
57 RDS_IB_WC_STATUS_STR(RNR_RETRY_EXC_ERR),
58 RDS_IB_WC_STATUS_STR(LOC_RDD_VIOL_ERR),
59 RDS_IB_WC_STATUS_STR(REM_INV_RD_REQ_ERR),
60 RDS_IB_WC_STATUS_STR(REM_ABORT_ERR),
61 RDS_IB_WC_STATUS_STR(INV_EECN_ERR),
62 RDS_IB_WC_STATUS_STR(INV_EEC_STATE_ERR),
63 RDS_IB_WC_STATUS_STR(FATAL_ERR),
64 RDS_IB_WC_STATUS_STR(RESP_TIMEOUT_ERR),
65 RDS_IB_WC_STATUS_STR(GENERAL_ERR),
66#undef RDS_IB_WC_STATUS_STR
67};
68
69char *rds_ib_wc_status_str(enum ib_wc_status status)
70{
71 return rds_str_array(rds_ib_wc_status_strings,
72 ARRAY_SIZE(rds_ib_wc_status_strings), status);
73}
74
75/*
76 * Convert IB-specific error message to RDS error message and call core
77 * completion handler.
78 */
79static void rds_ib_send_complete(struct rds_message *rm,
80 int wc_status,
81 void (*complete)(struct rds_message *rm, int status))
44{ 82{
45 int notify_status; 83 int notify_status;
46 84
@@ -60,69 +98,125 @@ static void rds_ib_send_rdma_complete(struct rds_message *rm,
60 notify_status = RDS_RDMA_OTHER_ERROR; 98 notify_status = RDS_RDMA_OTHER_ERROR;
61 break; 99 break;
62 } 100 }
63 rds_rdma_send_complete(rm, notify_status); 101 complete(rm, notify_status);
102}
103
104static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
105 struct rm_data_op *op,
106 int wc_status)
107{
108 if (op->op_nents)
109 ib_dma_unmap_sg(ic->i_cm_id->device,
110 op->op_sg, op->op_nents,
111 DMA_TO_DEVICE);
64} 112}
65 113
66static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, 114static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
67 struct rds_rdma_op *op) 115 struct rm_rdma_op *op,
116 int wc_status)
68{ 117{
69 if (op->r_mapped) { 118 if (op->op_mapped) {
70 ib_dma_unmap_sg(ic->i_cm_id->device, 119 ib_dma_unmap_sg(ic->i_cm_id->device,
71 op->r_sg, op->r_nents, 120 op->op_sg, op->op_nents,
72 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 121 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
73 op->r_mapped = 0; 122 op->op_mapped = 0;
74 } 123 }
124
125 /* If the user asked for a completion notification on this
126 * message, we can implement three different semantics:
127 * 1. Notify when we received the ACK on the RDS message
128 * that was queued with the RDMA. This provides reliable
129 * notification of RDMA status at the expense of a one-way
130 * packet delay.
131 * 2. Notify when the IB stack gives us the completion event for
132 * the RDMA operation.
133 * 3. Notify when the IB stack gives us the completion event for
134 * the accompanying RDS messages.
135 * Here, we implement approach #3. To implement approach #2,
136 * we would need to take an event for the rdma WR. To implement #1,
137 * don't call rds_rdma_send_complete at all, and fall back to the notify
138 * handling in the ACK processing code.
139 *
140 * Note: There's no need to explicitly sync any RDMA buffers using
141 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
142 * operation itself unmapped the RDMA buffers, which takes care
143 * of synching.
144 */
145 rds_ib_send_complete(container_of(op, struct rds_message, rdma),
146 wc_status, rds_rdma_send_complete);
147
148 if (op->op_write)
149 rds_stats_add(s_send_rdma_bytes, op->op_bytes);
150 else
151 rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
75} 152}
76 153
77static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, 154static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
78 struct rds_ib_send_work *send, 155 struct rm_atomic_op *op,
79 int wc_status) 156 int wc_status)
80{ 157{
81 struct rds_message *rm = send->s_rm; 158 /* unmap atomic recvbuf */
82 159 if (op->op_mapped) {
83 rdsdebug("ic %p send %p rm %p\n", ic, send, rm); 160 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
84 161 DMA_FROM_DEVICE);
85 ib_dma_unmap_sg(ic->i_cm_id->device, 162 op->op_mapped = 0;
86 rm->m_sg, rm->m_nents, 163 }
87 DMA_TO_DEVICE);
88
89 if (rm->m_rdma_op != NULL) {
90 rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
91
92 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics:
94 * 1. Notify when we received the ACK on the RDS message
95 * that was queued with the RDMA. This provides reliable
96 * notification of RDMA status at the expense of a one-way
97 * packet delay.
98 * 2. Notify when the IB stack gives us the completion event for
99 * the RDMA operation.
100 * 3. Notify when the IB stack gives us the completion event for
101 * the accompanying RDS messages.
102 * Here, we implement approach #3. To implement approach #2,
103 * call rds_rdma_send_complete from the cq_handler. To implement #1,
104 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 * handling in the ACK processing code.
106 *
107 * Note: There's no need to explicitly sync any RDMA buffers using
108 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 * operation itself unmapped the RDMA buffers, which takes care
110 * of synching.
111 */
112 rds_ib_send_rdma_complete(rm, wc_status);
113 164
114 if (rm->m_rdma_op->r_write) 165 rds_ib_send_complete(container_of(op, struct rds_message, atomic),
115 rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes); 166 wc_status, rds_atomic_send_complete);
116 else 167
117 rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes); 168 if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
169 rds_ib_stats_inc(s_ib_atomic_cswp);
170 else
171 rds_ib_stats_inc(s_ib_atomic_fadd);
172}
173
174/*
175 * Unmap the resources associated with a struct send_work.
176 *
177 * Returns the rm for no good reason other than it is unobtainable
178 * other than by switching on wr.opcode, currently, and the caller,
179 * the event handler, needs it.
180 */
181static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
182 struct rds_ib_send_work *send,
183 int wc_status)
184{
185 struct rds_message *rm = NULL;
186
187 /* In the error case, wc.opcode sometimes contains garbage */
188 switch (send->s_wr.opcode) {
189 case IB_WR_SEND:
190 if (send->s_op) {
191 rm = container_of(send->s_op, struct rds_message, data);
192 rds_ib_send_unmap_data(ic, send->s_op, wc_status);
193 }
194 break;
195 case IB_WR_RDMA_WRITE:
196 case IB_WR_RDMA_READ:
197 if (send->s_op) {
198 rm = container_of(send->s_op, struct rds_message, rdma);
199 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
200 }
201 break;
202 case IB_WR_ATOMIC_FETCH_AND_ADD:
203 case IB_WR_ATOMIC_CMP_AND_SWP:
204 if (send->s_op) {
205 rm = container_of(send->s_op, struct rds_message, atomic);
206 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
207 }
208 break;
209 default:
210 if (printk_ratelimit())
211 printk(KERN_NOTICE
212 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
213 __func__, send->s_wr.opcode);
214 break;
118 } 215 }
119 216
120 /* If anyone waited for this message to get flushed out, wake 217 send->s_wr.opcode = 0xdead;
121 * them up now */
122 rds_message_unmapped(rm);
123 218
124 rds_message_put(rm); 219 return rm;
125 send->s_rm = NULL;
126} 220}
127 221
128void rds_ib_send_init_ring(struct rds_ib_connection *ic) 222void rds_ib_send_init_ring(struct rds_ib_connection *ic)
@@ -133,23 +227,18 @@ void rds_ib_send_init_ring(struct rds_ib_connection *ic)
133 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { 227 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
134 struct ib_sge *sge; 228 struct ib_sge *sge;
135 229
136 send->s_rm = NULL;
137 send->s_op = NULL; 230 send->s_op = NULL;
138 231
139 send->s_wr.wr_id = i; 232 send->s_wr.wr_id = i;
140 send->s_wr.sg_list = send->s_sge; 233 send->s_wr.sg_list = send->s_sge;
141 send->s_wr.num_sge = 1;
142 send->s_wr.opcode = IB_WR_SEND;
143 send->s_wr.send_flags = 0;
144 send->s_wr.ex.imm_data = 0; 234 send->s_wr.ex.imm_data = 0;
145 235
146 sge = rds_ib_data_sge(ic, send->s_sge); 236 sge = &send->s_sge[0];
147 sge->lkey = ic->i_mr->lkey;
148
149 sge = rds_ib_header_sge(ic, send->s_sge);
150 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); 237 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
151 sge->length = sizeof(struct rds_header); 238 sge->length = sizeof(struct rds_header);
152 sge->lkey = ic->i_mr->lkey; 239 sge->lkey = ic->i_mr->lkey;
240
241 send->s_sge[1].lkey = ic->i_mr->lkey;
153 } 242 }
154} 243}
155 244
@@ -159,16 +248,24 @@ void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
159 u32 i; 248 u32 i;
160 249
161 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { 250 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
162 if (send->s_wr.opcode == 0xdead) 251 if (send->s_op && send->s_wr.opcode != 0xdead)
163 continue; 252 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
164 if (send->s_rm)
165 rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
166 if (send->s_op)
167 rds_ib_send_unmap_rdma(ic, send->s_op);
168 } 253 }
169} 254}
170 255
171/* 256/*
257 * The only fast path caller always has a non-zero nr, so we don't
258 * bother testing nr before performing the atomic sub.
259 */
260static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
261{
262 if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
263 waitqueue_active(&rds_ib_ring_empty_wait))
264 wake_up(&rds_ib_ring_empty_wait);
265 BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
266}
267
268/*
172 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc 269 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
173 * operations performed in the send path. As the sender allocs and potentially 270 * operations performed in the send path. As the sender allocs and potentially
174 * unallocs the next free entry in the ring it doesn't alter which is 271 * unallocs the next free entry in the ring it doesn't alter which is
@@ -178,12 +275,14 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
178{ 275{
179 struct rds_connection *conn = context; 276 struct rds_connection *conn = context;
180 struct rds_ib_connection *ic = conn->c_transport_data; 277 struct rds_ib_connection *ic = conn->c_transport_data;
278 struct rds_message *rm = NULL;
181 struct ib_wc wc; 279 struct ib_wc wc;
182 struct rds_ib_send_work *send; 280 struct rds_ib_send_work *send;
183 u32 completed; 281 u32 completed;
184 u32 oldest; 282 u32 oldest;
185 u32 i = 0; 283 u32 i = 0;
186 int ret; 284 int ret;
285 int nr_sig = 0;
187 286
188 rdsdebug("cq %p conn %p\n", cq, conn); 287 rdsdebug("cq %p conn %p\n", cq, conn);
189 rds_ib_stats_inc(s_ib_tx_cq_call); 288 rds_ib_stats_inc(s_ib_tx_cq_call);
@@ -192,8 +291,9 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
192 rdsdebug("ib_req_notify_cq send failed: %d\n", ret); 291 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
193 292
194 while (ib_poll_cq(cq, 1, &wc) > 0) { 293 while (ib_poll_cq(cq, 1, &wc) > 0) {
195 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", 294 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
196 (unsigned long long)wc.wr_id, wc.status, wc.byte_len, 295 (unsigned long long)wc.wr_id, wc.status,
296 rds_ib_wc_status_str(wc.status), wc.byte_len,
197 be32_to_cpu(wc.ex.imm_data)); 297 be32_to_cpu(wc.ex.imm_data));
198 rds_ib_stats_inc(s_ib_tx_cq_event); 298 rds_ib_stats_inc(s_ib_tx_cq_event);
199 299
@@ -210,51 +310,30 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
210 310
211 for (i = 0; i < completed; i++) { 311 for (i = 0; i < completed; i++) {
212 send = &ic->i_sends[oldest]; 312 send = &ic->i_sends[oldest];
313 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
314 nr_sig++;
213 315
214 /* In the error case, wc.opcode sometimes contains garbage */ 316 rm = rds_ib_send_unmap_op(ic, send, wc.status);
215 switch (send->s_wr.opcode) {
216 case IB_WR_SEND:
217 if (send->s_rm)
218 rds_ib_send_unmap_rm(ic, send, wc.status);
219 break;
220 case IB_WR_RDMA_WRITE:
221 case IB_WR_RDMA_READ:
222 /* Nothing to be done - the SG list will be unmapped
223 * when the SEND completes. */
224 break;
225 default:
226 if (printk_ratelimit())
227 printk(KERN_NOTICE
228 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
229 __func__, send->s_wr.opcode);
230 break;
231 }
232 317
233 send->s_wr.opcode = 0xdead;
234 send->s_wr.num_sge = 1;
235 if (send->s_queued + HZ/2 < jiffies) 318 if (send->s_queued + HZ/2 < jiffies)
236 rds_ib_stats_inc(s_ib_tx_stalled); 319 rds_ib_stats_inc(s_ib_tx_stalled);
237 320
238 /* If a RDMA operation produced an error, signal this right 321 if (send->s_op) {
239 * away. If we don't, the subsequent SEND that goes with this 322 if (send->s_op == rm->m_final_op) {
240 * RDMA will be canceled with ERR_WFLUSH, and the application 323 /* If anyone waited for this message to get flushed out, wake
241 * never learn that the RDMA failed. */ 324 * them up now */
242 if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) { 325 rds_message_unmapped(rm);
243 struct rds_message *rm;
244
245 rm = rds_send_get_message(conn, send->s_op);
246 if (rm) {
247 if (rm->m_rdma_op)
248 rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
249 rds_ib_send_rdma_complete(rm, wc.status);
250 rds_message_put(rm);
251 } 326 }
327 rds_message_put(rm);
328 send->s_op = NULL;
252 } 329 }
253 330
254 oldest = (oldest + 1) % ic->i_send_ring.w_nr; 331 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
255 } 332 }
256 333
257 rds_ib_ring_free(&ic->i_send_ring, completed); 334 rds_ib_ring_free(&ic->i_send_ring, completed);
335 rds_ib_sub_signaled(ic, nr_sig);
336 nr_sig = 0;
258 337
259 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || 338 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
260 test_bit(0, &conn->c_map_queued)) 339 test_bit(0, &conn->c_map_queued))
@@ -262,10 +341,10 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
262 341
263 /* We expect errors as the qp is drained during shutdown */ 342 /* We expect errors as the qp is drained during shutdown */
264 if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) { 343 if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
265 rds_ib_conn_error(conn, 344 rds_ib_conn_error(conn, "send completion on %pI4 had status "
266 "send completion on %pI4 " 345 "%u (%s), disconnecting and reconnecting\n",
267 "had status %u, disconnecting and reconnecting\n", 346 &conn->c_faddr, wc.status,
268 &conn->c_faddr, wc.status); 347 rds_ib_wc_status_str(wc.status));
269 } 348 }
270 } 349 }
271} 350}
@@ -294,7 +373,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
294 * credits (see rds_ib_send_add_credits below). 373 * credits (see rds_ib_send_add_credits below).
295 * 374 *
296 * The RDS send code is essentially single-threaded; rds_send_xmit 375 * The RDS send code is essentially single-threaded; rds_send_xmit
297 * grabs c_send_lock to ensure exclusive access to the send ring. 376 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
298 * However, the ACK sending code is independent and can race with 377 * However, the ACK sending code is independent and can race with
299 * message SENDs. 378 * message SENDs.
300 * 379 *
@@ -413,40 +492,21 @@ void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
413 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 492 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
414} 493}
415 494
416static inline void 495static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
417rds_ib_xmit_populate_wr(struct rds_ib_connection *ic, 496 struct rds_ib_send_work *send,
418 struct rds_ib_send_work *send, unsigned int pos, 497 bool notify)
419 unsigned long buffer, unsigned int length,
420 int send_flags)
421{ 498{
422 struct ib_sge *sge; 499 /*
423 500 * We want to delay signaling completions just enough to get
424 WARN_ON(pos != send - ic->i_sends); 501 * the batching benefits but not so much that we create dead time
425 502 * on the wire.
426 send->s_wr.send_flags = send_flags; 503 */
427 send->s_wr.opcode = IB_WR_SEND; 504 if (ic->i_unsignaled_wrs-- == 0 || notify) {
428 send->s_wr.num_sge = 2; 505 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
429 send->s_wr.next = NULL; 506 send->s_wr.send_flags |= IB_SEND_SIGNALED;
430 send->s_queued = jiffies; 507 return 1;
431 send->s_op = NULL;
432
433 if (length != 0) {
434 sge = rds_ib_data_sge(ic, send->s_sge);
435 sge->addr = buffer;
436 sge->length = length;
437 sge->lkey = ic->i_mr->lkey;
438
439 sge = rds_ib_header_sge(ic, send->s_sge);
440 } else {
441 /* We're sending a packet with no payload. There is only
442 * one SGE */
443 send->s_wr.num_sge = 1;
444 sge = &send->s_sge[0];
445 } 508 }
446 509 return 0;
447 sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
448 sge->length = sizeof(struct rds_header);
449 sge->lkey = ic->i_mr->lkey;
450} 510}
451 511
452/* 512/*
@@ -475,13 +535,14 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
475 u32 pos; 535 u32 pos;
476 u32 i; 536 u32 i;
477 u32 work_alloc; 537 u32 work_alloc;
478 u32 credit_alloc; 538 u32 credit_alloc = 0;
479 u32 posted; 539 u32 posted;
480 u32 adv_credits = 0; 540 u32 adv_credits = 0;
481 int send_flags = 0; 541 int send_flags = 0;
482 int sent; 542 int bytes_sent = 0;
483 int ret; 543 int ret;
484 int flow_controlled = 0; 544 int flow_controlled = 0;
545 int nr_sig = 0;
485 546
486 BUG_ON(off % RDS_FRAG_SIZE); 547 BUG_ON(off % RDS_FRAG_SIZE);
487 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); 548 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
@@ -507,14 +568,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
507 goto out; 568 goto out;
508 } 569 }
509 570
510 credit_alloc = work_alloc;
511 if (ic->i_flowctl) { 571 if (ic->i_flowctl) {
512 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); 572 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
513 adv_credits += posted; 573 adv_credits += posted;
514 if (credit_alloc < work_alloc) { 574 if (credit_alloc < work_alloc) {
515 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); 575 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
516 work_alloc = credit_alloc; 576 work_alloc = credit_alloc;
517 flow_controlled++; 577 flow_controlled = 1;
518 } 578 }
519 if (work_alloc == 0) { 579 if (work_alloc == 0) {
520 set_bit(RDS_LL_SEND_FULL, &conn->c_flags); 580 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
@@ -525,31 +585,25 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
525 } 585 }
526 586
527 /* map the message the first time we see it */ 587 /* map the message the first time we see it */
528 if (ic->i_rm == NULL) { 588 if (!ic->i_data_op) {
529 /* 589 if (rm->data.op_nents) {
530 printk(KERN_NOTICE "rds_ib_xmit prep msg dport=%u flags=0x%x len=%d\n", 590 rm->data.op_count = ib_dma_map_sg(dev,
531 be16_to_cpu(rm->m_inc.i_hdr.h_dport), 591 rm->data.op_sg,
532 rm->m_inc.i_hdr.h_flags, 592 rm->data.op_nents,
533 be32_to_cpu(rm->m_inc.i_hdr.h_len)); 593 DMA_TO_DEVICE);
534 */ 594 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
535 if (rm->m_nents) { 595 if (rm->data.op_count == 0) {
536 rm->m_count = ib_dma_map_sg(dev,
537 rm->m_sg, rm->m_nents, DMA_TO_DEVICE);
538 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count);
539 if (rm->m_count == 0) {
540 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 596 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
541 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 597 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
542 ret = -ENOMEM; /* XXX ? */ 598 ret = -ENOMEM; /* XXX ? */
543 goto out; 599 goto out;
544 } 600 }
545 } else { 601 } else {
546 rm->m_count = 0; 602 rm->data.op_count = 0;
547 } 603 }
548 604
549 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
550 ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
551 rds_message_addref(rm); 605 rds_message_addref(rm);
552 ic->i_rm = rm; 606 ic->i_data_op = &rm->data;
553 607
554 /* Finalize the header */ 608 /* Finalize the header */
555 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags)) 609 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
@@ -559,10 +613,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
559 613
560 /* If it has a RDMA op, tell the peer we did it. This is 614 /* If it has a RDMA op, tell the peer we did it. This is
561 * used by the peer to release use-once RDMA MRs. */ 615 * used by the peer to release use-once RDMA MRs. */
562 if (rm->m_rdma_op) { 616 if (rm->rdma.op_active) {
563 struct rds_ext_header_rdma ext_hdr; 617 struct rds_ext_header_rdma ext_hdr;
564 618
565 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key); 619 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
566 rds_message_add_extension(&rm->m_inc.i_hdr, 620 rds_message_add_extension(&rm->m_inc.i_hdr,
567 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 621 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
568 } 622 }
@@ -582,99 +636,77 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
582 /* 636 /*
583 * Update adv_credits since we reset the ACK_REQUIRED bit. 637 * Update adv_credits since we reset the ACK_REQUIRED bit.
584 */ 638 */
585 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); 639 if (ic->i_flowctl) {
586 adv_credits += posted; 640 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
587 BUG_ON(adv_credits > 255); 641 adv_credits += posted;
642 BUG_ON(adv_credits > 255);
643 }
588 } 644 }
589 645
590 send = &ic->i_sends[pos];
591 first = send;
592 prev = NULL;
593 scat = &rm->m_sg[sg];
594 sent = 0;
595 i = 0;
596
597 /* Sometimes you want to put a fence between an RDMA 646 /* Sometimes you want to put a fence between an RDMA
598 * READ and the following SEND. 647 * READ and the following SEND.
599 * We could either do this all the time 648 * We could either do this all the time
600 * or when requested by the user. Right now, we let 649 * or when requested by the user. Right now, we let
601 * the application choose. 650 * the application choose.
602 */ 651 */
603 if (rm->m_rdma_op && rm->m_rdma_op->r_fence) 652 if (rm->rdma.op_active && rm->rdma.op_fence)
604 send_flags = IB_SEND_FENCE; 653 send_flags = IB_SEND_FENCE;
605 654
606 /* 655 /* Each frag gets a header. Msgs may be 0 bytes */
607 * We could be copying the header into the unused tail of the page. 656 send = &ic->i_sends[pos];
608 * That would need to be changed in the future when those pages might 657 first = send;
609 * be mapped userspace pages or page cache pages. So instead we always 658 prev = NULL;
610 * use a second sge and our long-lived ring of mapped headers. We send 659 scat = &ic->i_data_op->op_sg[sg];
611 * the header after the data so that the data payload can be aligned on 660 i = 0;
612 * the receiver. 661 do {
613 */ 662 unsigned int len = 0;
614 663
615 /* handle a 0-len message */ 664 /* Set up the header */
616 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) { 665 send->s_wr.send_flags = send_flags;
617 rds_ib_xmit_populate_wr(ic, send, pos, 0, 0, send_flags); 666 send->s_wr.opcode = IB_WR_SEND;
618 goto add_header; 667 send->s_wr.num_sge = 1;
619 } 668 send->s_wr.next = NULL;
669 send->s_queued = jiffies;
670 send->s_op = NULL;
620 671
621 /* if there's data reference it with a chain of work reqs */ 672 send->s_sge[0].addr = ic->i_send_hdrs_dma
622 for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) { 673 + (pos * sizeof(struct rds_header));
623 unsigned int len; 674 send->s_sge[0].length = sizeof(struct rds_header);
624 675
625 send = &ic->i_sends[pos]; 676 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
626 677
627 len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off); 678 /* Set up the data, if present */
628 rds_ib_xmit_populate_wr(ic, send, pos, 679 if (i < work_alloc
629 ib_sg_dma_address(dev, scat) + off, len, 680 && scat != &rm->data.op_sg[rm->data.op_count]) {
630 send_flags); 681 len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
682 send->s_wr.num_sge = 2;
631 683
632 /* 684 send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
633 * We want to delay signaling completions just enough to get 685 send->s_sge[1].length = len;
634 * the batching benefits but not so much that we create dead time
635 * on the wire.
636 */
637 if (ic->i_unsignaled_wrs-- == 0) {
638 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
639 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
640 }
641 686
642 ic->i_unsignaled_bytes -= len; 687 bytes_sent += len;
643 if (ic->i_unsignaled_bytes <= 0) { 688 off += len;
644 ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes; 689 if (off == ib_sg_dma_len(dev, scat)) {
645 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 690 scat++;
691 off = 0;
692 }
646 } 693 }
647 694
695 rds_ib_set_wr_signal_state(ic, send, 0);
696
648 /* 697 /*
649 * Always signal the last one if we're stopping due to flow control. 698 * Always signal the last one if we're stopping due to flow control.
650 */ 699 */
651 if (flow_controlled && i == (work_alloc-1)) 700 if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
652 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 701 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
653 702
703 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
704 nr_sig++;
705
654 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 706 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
655 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); 707 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
656 708
657 sent += len; 709 if (ic->i_flowctl && adv_credits) {
658 off += len;
659 if (off == ib_sg_dma_len(dev, scat)) {
660 scat++;
661 off = 0;
662 }
663
664add_header:
665 /* Tack on the header after the data. The header SGE should already
666 * have been set up to point to the right header buffer. */
667 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
668
669 if (0) {
670 struct rds_header *hdr = &ic->i_send_hdrs[pos];
671
672 printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n",
673 be16_to_cpu(hdr->h_dport),
674 hdr->h_flags,
675 be32_to_cpu(hdr->h_len));
676 }
677 if (adv_credits) {
678 struct rds_header *hdr = &ic->i_send_hdrs[pos]; 710 struct rds_header *hdr = &ic->i_send_hdrs[pos];
679 711
680 /* add credit and redo the header checksum */ 712 /* add credit and redo the header checksum */
@@ -689,20 +721,25 @@ add_header:
689 prev = send; 721 prev = send;
690 722
691 pos = (pos + 1) % ic->i_send_ring.w_nr; 723 pos = (pos + 1) % ic->i_send_ring.w_nr;
692 } 724 send = &ic->i_sends[pos];
725 i++;
726
727 } while (i < work_alloc
728 && scat != &rm->data.op_sg[rm->data.op_count]);
693 729
694 /* Account the RDS header in the number of bytes we sent, but just once. 730 /* Account the RDS header in the number of bytes we sent, but just once.
695 * The caller has no concept of fragmentation. */ 731 * The caller has no concept of fragmentation. */
696 if (hdr_off == 0) 732 if (hdr_off == 0)
697 sent += sizeof(struct rds_header); 733 bytes_sent += sizeof(struct rds_header);
698 734
699 /* if we finished the message then send completion owns it */ 735 /* if we finished the message then send completion owns it */
700 if (scat == &rm->m_sg[rm->m_count]) { 736 if (scat == &rm->data.op_sg[rm->data.op_count]) {
701 prev->s_rm = ic->i_rm; 737 prev->s_op = ic->i_data_op;
702 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 738 prev->s_wr.send_flags |= IB_SEND_SOLICITED;
703 ic->i_rm = NULL; 739 ic->i_data_op = NULL;
704 } 740 }
705 741
742 /* Put back wrs & credits we didn't use */
706 if (i < work_alloc) { 743 if (i < work_alloc) {
707 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); 744 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
708 work_alloc = i; 745 work_alloc = i;
@@ -710,6 +747,9 @@ add_header:
710 if (ic->i_flowctl && i < credit_alloc) 747 if (ic->i_flowctl && i < credit_alloc)
711 rds_ib_send_add_credits(conn, credit_alloc - i); 748 rds_ib_send_add_credits(conn, credit_alloc - i);
712 749
750 if (nr_sig)
751 atomic_add(nr_sig, &ic->i_signaled_sends);
752
713 /* XXX need to worry about failed_wr and partial sends. */ 753 /* XXX need to worry about failed_wr and partial sends. */
714 failed_wr = &first->s_wr; 754 failed_wr = &first->s_wr;
715 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 755 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
@@ -720,32 +760,127 @@ add_header:
720 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 " 760 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
721 "returned %d\n", &conn->c_faddr, ret); 761 "returned %d\n", &conn->c_faddr, ret);
722 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 762 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
723 if (prev->s_rm) { 763 rds_ib_sub_signaled(ic, nr_sig);
724 ic->i_rm = prev->s_rm; 764 if (prev->s_op) {
725 prev->s_rm = NULL; 765 ic->i_data_op = prev->s_op;
766 prev->s_op = NULL;
726 } 767 }
727 768
728 rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); 769 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
729 goto out; 770 goto out;
730 } 771 }
731 772
732 ret = sent; 773 ret = bytes_sent;
733out: 774out:
734 BUG_ON(adv_credits); 775 BUG_ON(adv_credits);
735 return ret; 776 return ret;
736} 777}
737 778
738int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) 779/*
780 * Issue atomic operation.
781 * A simplified version of the rdma case, we always map 1 SG, and
782 * only 8 bytes, for the return value from the atomic operation.
783 */
784int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
785{
786 struct rds_ib_connection *ic = conn->c_transport_data;
787 struct rds_ib_send_work *send = NULL;
788 struct ib_send_wr *failed_wr;
789 struct rds_ib_device *rds_ibdev;
790 u32 pos;
791 u32 work_alloc;
792 int ret;
793 int nr_sig = 0;
794
795 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
796
797 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
798 if (work_alloc != 1) {
799 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
800 rds_ib_stats_inc(s_ib_tx_ring_full);
801 ret = -ENOMEM;
802 goto out;
803 }
804
805 /* address of send request in ring */
806 send = &ic->i_sends[pos];
807 send->s_queued = jiffies;
808
809 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
810 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
811 send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare;
812 send->s_wr.wr.atomic.swap = op->op_m_cswp.swap;
813 send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask;
814 send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask;
815 } else { /* FADD */
816 send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
817 send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add;
818 send->s_wr.wr.atomic.swap = 0;
819 send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask;
820 send->s_wr.wr.atomic.swap_mask = 0;
821 }
822 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
823 send->s_wr.num_sge = 1;
824 send->s_wr.next = NULL;
825 send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
826 send->s_wr.wr.atomic.rkey = op->op_rkey;
827 send->s_op = op;
828 rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
829
830 /* map 8 byte retval buffer to the device */
831 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
832 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
833 if (ret != 1) {
834 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
835 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
836 ret = -ENOMEM; /* XXX ? */
837 goto out;
838 }
839
840 /* Convert our struct scatterlist to struct ib_sge */
841 send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
842 send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
843 send->s_sge[0].lkey = ic->i_mr->lkey;
844
845 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
846 send->s_sge[0].addr, send->s_sge[0].length);
847
848 if (nr_sig)
849 atomic_add(nr_sig, &ic->i_signaled_sends);
850
851 failed_wr = &send->s_wr;
852 ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
853 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
854 send, &send->s_wr, ret, failed_wr);
855 BUG_ON(failed_wr != &send->s_wr);
856 if (ret) {
857 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
858 "returned %d\n", &conn->c_faddr, ret);
859 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
860 rds_ib_sub_signaled(ic, nr_sig);
861 goto out;
862 }
863
864 if (unlikely(failed_wr != &send->s_wr)) {
865 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
866 BUG_ON(failed_wr != &send->s_wr);
867 }
868
869out:
870 return ret;
871}
872
873int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
739{ 874{
740 struct rds_ib_connection *ic = conn->c_transport_data; 875 struct rds_ib_connection *ic = conn->c_transport_data;
741 struct rds_ib_send_work *send = NULL; 876 struct rds_ib_send_work *send = NULL;
742 struct rds_ib_send_work *first; 877 struct rds_ib_send_work *first;
743 struct rds_ib_send_work *prev; 878 struct rds_ib_send_work *prev;
744 struct ib_send_wr *failed_wr; 879 struct ib_send_wr *failed_wr;
745 struct rds_ib_device *rds_ibdev;
746 struct scatterlist *scat; 880 struct scatterlist *scat;
747 unsigned long len; 881 unsigned long len;
748 u64 remote_addr = op->r_remote_addr; 882 u64 remote_addr = op->op_remote_addr;
883 u32 max_sge = ic->rds_ibdev->max_sge;
749 u32 pos; 884 u32 pos;
750 u32 work_alloc; 885 u32 work_alloc;
751 u32 i; 886 u32 i;
@@ -753,29 +888,28 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
753 int sent; 888 int sent;
754 int ret; 889 int ret;
755 int num_sge; 890 int num_sge;
756 891 int nr_sig = 0;
757 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); 892
758 893 /* map the op the first time we see it */
759 /* map the message the first time we see it */ 894 if (!op->op_mapped) {
760 if (!op->r_mapped) { 895 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
761 op->r_count = ib_dma_map_sg(ic->i_cm_id->device, 896 op->op_sg, op->op_nents, (op->op_write) ?
762 op->r_sg, op->r_nents, (op->r_write) ? 897 DMA_TO_DEVICE : DMA_FROM_DEVICE);
763 DMA_TO_DEVICE : DMA_FROM_DEVICE); 898 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
764 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); 899 if (op->op_count == 0) {
765 if (op->r_count == 0) {
766 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 900 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
767 ret = -ENOMEM; /* XXX ? */ 901 ret = -ENOMEM; /* XXX ? */
768 goto out; 902 goto out;
769 } 903 }
770 904
771 op->r_mapped = 1; 905 op->op_mapped = 1;
772 } 906 }
773 907
774 /* 908 /*
775 * Instead of knowing how to return a partial rdma read/write we insist that there 909 * Instead of knowing how to return a partial rdma read/write we insist that there
776 * be enough work requests to send the entire message. 910 * be enough work requests to send the entire message.
777 */ 911 */
778 i = ceil(op->r_count, rds_ibdev->max_sge); 912 i = ceil(op->op_count, max_sge);
779 913
780 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 914 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
781 if (work_alloc != i) { 915 if (work_alloc != i) {
@@ -788,30 +922,24 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
788 send = &ic->i_sends[pos]; 922 send = &ic->i_sends[pos];
789 first = send; 923 first = send;
790 prev = NULL; 924 prev = NULL;
791 scat = &op->r_sg[0]; 925 scat = &op->op_sg[0];
792 sent = 0; 926 sent = 0;
793 num_sge = op->r_count; 927 num_sge = op->op_count;
794 928
795 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { 929 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
796 send->s_wr.send_flags = 0; 930 send->s_wr.send_flags = 0;
797 send->s_queued = jiffies; 931 send->s_queued = jiffies;
798 /* 932 send->s_op = NULL;
799 * We want to delay signaling completions just enough to get 933
800 * the batching benefits but not so much that we create dead time on the wire. 934 nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
801 */
802 if (ic->i_unsignaled_wrs-- == 0) {
803 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
804 send->s_wr.send_flags = IB_SEND_SIGNALED;
805 }
806 935
807 send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; 936 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
808 send->s_wr.wr.rdma.remote_addr = remote_addr; 937 send->s_wr.wr.rdma.remote_addr = remote_addr;
809 send->s_wr.wr.rdma.rkey = op->r_key; 938 send->s_wr.wr.rdma.rkey = op->op_rkey;
810 send->s_op = op;
811 939
812 if (num_sge > rds_ibdev->max_sge) { 940 if (num_sge > max_sge) {
813 send->s_wr.num_sge = rds_ibdev->max_sge; 941 send->s_wr.num_sge = max_sge;
814 num_sge -= rds_ibdev->max_sge; 942 num_sge -= max_sge;
815 } else { 943 } else {
816 send->s_wr.num_sge = num_sge; 944 send->s_wr.num_sge = num_sge;
817 } 945 }
@@ -821,7 +949,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
821 if (prev) 949 if (prev)
822 prev->s_wr.next = &send->s_wr; 950 prev->s_wr.next = &send->s_wr;
823 951
824 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { 952 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
825 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 953 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
826 send->s_sge[j].addr = 954 send->s_sge[j].addr =
827 ib_sg_dma_address(ic->i_cm_id->device, scat); 955 ib_sg_dma_address(ic->i_cm_id->device, scat);
@@ -843,15 +971,20 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
843 send = ic->i_sends; 971 send = ic->i_sends;
844 } 972 }
845 973
846 /* if we finished the message then send completion owns it */ 974 /* give a reference to the last op */
847 if (scat == &op->r_sg[op->r_count]) 975 if (scat == &op->op_sg[op->op_count]) {
848 prev->s_wr.send_flags = IB_SEND_SIGNALED; 976 prev->s_op = op;
977 rds_message_addref(container_of(op, struct rds_message, rdma));
978 }
849 979
850 if (i < work_alloc) { 980 if (i < work_alloc) {
851 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); 981 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
852 work_alloc = i; 982 work_alloc = i;
853 } 983 }
854 984
985 if (nr_sig)
986 atomic_add(nr_sig, &ic->i_signaled_sends);
987
855 failed_wr = &first->s_wr; 988 failed_wr = &first->s_wr;
856 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 989 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
857 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 990 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
@@ -861,6 +994,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
861 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " 994 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
862 "returned %d\n", &conn->c_faddr, ret); 995 "returned %d\n", &conn->c_faddr, ret);
863 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 996 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
997 rds_ib_sub_signaled(ic, nr_sig);
864 goto out; 998 goto out;
865 } 999 }
866 1000
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index d2c904dd6fbc..2d5965d6e97c 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -67,6 +67,8 @@ static const char *const rds_ib_stat_names[] = {
67 "ib_rdma_mr_pool_flush", 67 "ib_rdma_mr_pool_flush",
68 "ib_rdma_mr_pool_wait", 68 "ib_rdma_mr_pool_wait",
69 "ib_rdma_mr_pool_depleted", 69 "ib_rdma_mr_pool_depleted",
70 "ib_atomic_cswp",
71 "ib_atomic_fadd",
70}; 72};
71 73
72unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter, 74unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c
index 03f01cb4e0fe..fc3da37220fd 100644
--- a/net/rds/ib_sysctl.c
+++ b/net/rds/ib_sysctl.c
@@ -49,10 +49,6 @@ unsigned long rds_ib_sysctl_max_unsig_wrs = 16;
49static unsigned long rds_ib_sysctl_max_unsig_wr_min = 1; 49static unsigned long rds_ib_sysctl_max_unsig_wr_min = 1;
50static unsigned long rds_ib_sysctl_max_unsig_wr_max = 64; 50static unsigned long rds_ib_sysctl_max_unsig_wr_max = 64;
51 51
52unsigned long rds_ib_sysctl_max_unsig_bytes = (16 << 20);
53static unsigned long rds_ib_sysctl_max_unsig_bytes_min = 1;
54static unsigned long rds_ib_sysctl_max_unsig_bytes_max = ~0UL;
55
56/* 52/*
57 * This sysctl does nothing. 53 * This sysctl does nothing.
58 * 54 *
@@ -94,15 +90,6 @@ ctl_table rds_ib_sysctl_table[] = {
94 .extra2 = &rds_ib_sysctl_max_unsig_wr_max, 90 .extra2 = &rds_ib_sysctl_max_unsig_wr_max,
95 }, 91 },
96 { 92 {
97 .procname = "max_unsignaled_bytes",
98 .data = &rds_ib_sysctl_max_unsig_bytes,
99 .maxlen = sizeof(unsigned long),
100 .mode = 0644,
101 .proc_handler = proc_doulongvec_minmax,
102 .extra1 = &rds_ib_sysctl_max_unsig_bytes_min,
103 .extra2 = &rds_ib_sysctl_max_unsig_bytes_max,
104 },
105 {
106 .procname = "max_recv_allocation", 93 .procname = "max_recv_allocation",
107 .data = &rds_ib_sysctl_max_recv_allocation, 94 .data = &rds_ib_sysctl_max_recv_allocation,
108 .maxlen = sizeof(unsigned long), 95 .maxlen = sizeof(unsigned long),
@@ -132,10 +119,10 @@ void rds_ib_sysctl_exit(void)
132 unregister_sysctl_table(rds_ib_sysctl_hdr); 119 unregister_sysctl_table(rds_ib_sysctl_hdr);
133} 120}
134 121
135int __init rds_ib_sysctl_init(void) 122int rds_ib_sysctl_init(void)
136{ 123{
137 rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table); 124 rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table);
138 if (rds_ib_sysctl_hdr == NULL) 125 if (!rds_ib_sysctl_hdr)
139 return -ENOMEM; 126 return -ENOMEM;
140 return 0; 127 return 0;
141} 128}
diff --git a/net/rds/info.c b/net/rds/info.c
index c45c4173a44d..4fdf1b6e84ff 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -76,7 +76,7 @@ void rds_info_register_func(int optname, rds_info_func func)
76 BUG_ON(optname < RDS_INFO_FIRST || optname > RDS_INFO_LAST); 76 BUG_ON(optname < RDS_INFO_FIRST || optname > RDS_INFO_LAST);
77 77
78 spin_lock(&rds_info_lock); 78 spin_lock(&rds_info_lock);
79 BUG_ON(rds_info_funcs[offset] != NULL); 79 BUG_ON(rds_info_funcs[offset]);
80 rds_info_funcs[offset] = func; 80 rds_info_funcs[offset] = func;
81 spin_unlock(&rds_info_lock); 81 spin_unlock(&rds_info_lock);
82} 82}
@@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(rds_info_deregister_func);
102 */ 102 */
103void rds_info_iter_unmap(struct rds_info_iterator *iter) 103void rds_info_iter_unmap(struct rds_info_iterator *iter)
104{ 104{
105 if (iter->addr != NULL) { 105 if (iter->addr) {
106 kunmap_atomic(iter->addr, KM_USER0); 106 kunmap_atomic(iter->addr, KM_USER0);
107 iter->addr = NULL; 107 iter->addr = NULL;
108 } 108 }
@@ -117,7 +117,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
117 unsigned long this; 117 unsigned long this;
118 118
119 while (bytes) { 119 while (bytes) {
120 if (iter->addr == NULL) 120 if (!iter->addr)
121 iter->addr = kmap_atomic(*iter->pages, KM_USER0); 121 iter->addr = kmap_atomic(*iter->pages, KM_USER0);
122 122
123 this = min(bytes, PAGE_SIZE - iter->offset); 123 this = min(bytes, PAGE_SIZE - iter->offset);
@@ -188,7 +188,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
188 >> PAGE_SHIFT; 188 >> PAGE_SHIFT;
189 189
190 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); 190 pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
191 if (pages == NULL) { 191 if (!pages) {
192 ret = -ENOMEM; 192 ret = -ENOMEM;
193 goto out; 193 goto out;
194 } 194 }
@@ -206,7 +206,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
206 206
207call_func: 207call_func:
208 func = rds_info_funcs[optname - RDS_INFO_FIRST]; 208 func = rds_info_funcs[optname - RDS_INFO_FIRST];
209 if (func == NULL) { 209 if (!func) {
210 ret = -ENOPROTOOPT; 210 ret = -ENOPROTOOPT;
211 goto out; 211 goto out;
212 } 212 }
@@ -234,7 +234,7 @@ call_func:
234 ret = -EFAULT; 234 ret = -EFAULT;
235 235
236out: 236out:
237 for (i = 0; pages != NULL && i < nr_pages; i++) 237 for (i = 0; pages && i < nr_pages; i++)
238 put_page(pages[i]); 238 put_page(pages[i]);
239 kfree(pages); 239 kfree(pages);
240 240
diff --git a/net/rds/iw.c b/net/rds/iw.c
index c8f3d3525cb9..56808cac0fc7 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -264,7 +264,6 @@ struct rds_transport rds_iw_transport = {
264 .laddr_check = rds_iw_laddr_check, 264 .laddr_check = rds_iw_laddr_check,
265 .xmit_complete = rds_iw_xmit_complete, 265 .xmit_complete = rds_iw_xmit_complete,
266 .xmit = rds_iw_xmit, 266 .xmit = rds_iw_xmit,
267 .xmit_cong_map = NULL,
268 .xmit_rdma = rds_iw_xmit_rdma, 267 .xmit_rdma = rds_iw_xmit_rdma,
269 .recv = rds_iw_recv, 268 .recv = rds_iw_recv,
270 .conn_alloc = rds_iw_conn_alloc, 269 .conn_alloc = rds_iw_conn_alloc,
@@ -272,7 +271,6 @@ struct rds_transport rds_iw_transport = {
272 .conn_connect = rds_iw_conn_connect, 271 .conn_connect = rds_iw_conn_connect,
273 .conn_shutdown = rds_iw_conn_shutdown, 272 .conn_shutdown = rds_iw_conn_shutdown,
274 .inc_copy_to_user = rds_iw_inc_copy_to_user, 273 .inc_copy_to_user = rds_iw_inc_copy_to_user,
275 .inc_purge = rds_iw_inc_purge,
276 .inc_free = rds_iw_inc_free, 274 .inc_free = rds_iw_inc_free,
277 .cm_initiate_connect = rds_iw_cm_initiate_connect, 275 .cm_initiate_connect = rds_iw_cm_initiate_connect,
278 .cm_handle_connect = rds_iw_cm_handle_connect, 276 .cm_handle_connect = rds_iw_cm_handle_connect,
@@ -289,7 +287,7 @@ struct rds_transport rds_iw_transport = {
289 .t_prefer_loopback = 1, 287 .t_prefer_loopback = 1,
290}; 288};
291 289
292int __init rds_iw_init(void) 290int rds_iw_init(void)
293{ 291{
294 int ret; 292 int ret;
295 293
diff --git a/net/rds/iw.h b/net/rds/iw.h
index eef2f0c28476..543e665fafe3 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -70,7 +70,7 @@ struct rds_iw_send_work {
70 struct rds_message *s_rm; 70 struct rds_message *s_rm;
71 71
72 /* We should really put these into a union: */ 72 /* We should really put these into a union: */
73 struct rds_rdma_op *s_op; 73 struct rm_rdma_op *s_op;
74 struct rds_iw_mapping *s_mapping; 74 struct rds_iw_mapping *s_mapping;
75 struct ib_mr *s_mr; 75 struct ib_mr *s_mr;
76 struct ib_fast_reg_page_list *s_page_list; 76 struct ib_fast_reg_page_list *s_page_list;
@@ -284,7 +284,7 @@ void rds_iw_conn_free(void *arg);
284int rds_iw_conn_connect(struct rds_connection *conn); 284int rds_iw_conn_connect(struct rds_connection *conn);
285void rds_iw_conn_shutdown(struct rds_connection *conn); 285void rds_iw_conn_shutdown(struct rds_connection *conn);
286void rds_iw_state_change(struct sock *sk); 286void rds_iw_state_change(struct sock *sk);
287int __init rds_iw_listen_init(void); 287int rds_iw_listen_init(void);
288void rds_iw_listen_stop(void); 288void rds_iw_listen_stop(void);
289void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...); 289void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...);
290int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, 290int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
@@ -321,12 +321,11 @@ void rds_iw_flush_mrs(void);
321void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id); 321void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id);
322 322
323/* ib_recv.c */ 323/* ib_recv.c */
324int __init rds_iw_recv_init(void); 324int rds_iw_recv_init(void);
325void rds_iw_recv_exit(void); 325void rds_iw_recv_exit(void);
326int rds_iw_recv(struct rds_connection *conn); 326int rds_iw_recv(struct rds_connection *conn);
327int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp, 327int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
328 gfp_t page_gfp, int prefill); 328 gfp_t page_gfp, int prefill);
329void rds_iw_inc_purge(struct rds_incoming *inc);
330void rds_iw_inc_free(struct rds_incoming *inc); 329void rds_iw_inc_free(struct rds_incoming *inc);
331int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 330int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
332 size_t size); 331 size_t size);
@@ -358,7 +357,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
358void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context); 357void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context);
359void rds_iw_send_init_ring(struct rds_iw_connection *ic); 358void rds_iw_send_init_ring(struct rds_iw_connection *ic);
360void rds_iw_send_clear_ring(struct rds_iw_connection *ic); 359void rds_iw_send_clear_ring(struct rds_iw_connection *ic);
361int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op); 360int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
362void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits); 361void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits);
363void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted); 362void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted);
364int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted, 363int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted,
@@ -371,7 +370,7 @@ unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter,
371 unsigned int avail); 370 unsigned int avail);
372 371
373/* ib_sysctl.c */ 372/* ib_sysctl.c */
374int __init rds_iw_sysctl_init(void); 373int rds_iw_sysctl_init(void);
375void rds_iw_sysctl_exit(void); 374void rds_iw_sysctl_exit(void);
376extern unsigned long rds_iw_sysctl_max_send_wr; 375extern unsigned long rds_iw_sysctl_max_send_wr;
377extern unsigned long rds_iw_sysctl_max_recv_wr; 376extern unsigned long rds_iw_sysctl_max_recv_wr;
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index b5dd6ac39be8..712cf2d1f28e 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -257,7 +257,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
257 * the rds_iwdev at all. 257 * the rds_iwdev at all.
258 */ 258 */
259 rds_iwdev = ib_get_client_data(dev, &rds_iw_client); 259 rds_iwdev = ib_get_client_data(dev, &rds_iw_client);
260 if (rds_iwdev == NULL) { 260 if (!rds_iwdev) {
261 if (printk_ratelimit()) 261 if (printk_ratelimit())
262 printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n", 262 printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
263 dev->name); 263 dev->name);
@@ -292,7 +292,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
292 ic->i_send_ring.w_nr * 292 ic->i_send_ring.w_nr *
293 sizeof(struct rds_header), 293 sizeof(struct rds_header),
294 &ic->i_send_hdrs_dma, GFP_KERNEL); 294 &ic->i_send_hdrs_dma, GFP_KERNEL);
295 if (ic->i_send_hdrs == NULL) { 295 if (!ic->i_send_hdrs) {
296 ret = -ENOMEM; 296 ret = -ENOMEM;
297 rdsdebug("ib_dma_alloc_coherent send failed\n"); 297 rdsdebug("ib_dma_alloc_coherent send failed\n");
298 goto out; 298 goto out;
@@ -302,7 +302,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
302 ic->i_recv_ring.w_nr * 302 ic->i_recv_ring.w_nr *
303 sizeof(struct rds_header), 303 sizeof(struct rds_header),
304 &ic->i_recv_hdrs_dma, GFP_KERNEL); 304 &ic->i_recv_hdrs_dma, GFP_KERNEL);
305 if (ic->i_recv_hdrs == NULL) { 305 if (!ic->i_recv_hdrs) {
306 ret = -ENOMEM; 306 ret = -ENOMEM;
307 rdsdebug("ib_dma_alloc_coherent recv failed\n"); 307 rdsdebug("ib_dma_alloc_coherent recv failed\n");
308 goto out; 308 goto out;
@@ -310,14 +310,14 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
310 310
311 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), 311 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
312 &ic->i_ack_dma, GFP_KERNEL); 312 &ic->i_ack_dma, GFP_KERNEL);
313 if (ic->i_ack == NULL) { 313 if (!ic->i_ack) {
314 ret = -ENOMEM; 314 ret = -ENOMEM;
315 rdsdebug("ib_dma_alloc_coherent ack failed\n"); 315 rdsdebug("ib_dma_alloc_coherent ack failed\n");
316 goto out; 316 goto out;
317 } 317 }
318 318
319 ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work)); 319 ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work));
320 if (ic->i_sends == NULL) { 320 if (!ic->i_sends) {
321 ret = -ENOMEM; 321 ret = -ENOMEM;
322 rdsdebug("send allocation failed\n"); 322 rdsdebug("send allocation failed\n");
323 goto out; 323 goto out;
@@ -325,7 +325,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
325 rds_iw_send_init_ring(ic); 325 rds_iw_send_init_ring(ic);
326 326
327 ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work)); 327 ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work));
328 if (ic->i_recvs == NULL) { 328 if (!ic->i_recvs) {
329 ret = -ENOMEM; 329 ret = -ENOMEM;
330 rdsdebug("recv allocation failed\n"); 330 rdsdebug("recv allocation failed\n");
331 goto out; 331 goto out;
@@ -696,7 +696,7 @@ int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
696 696
697 /* XXX too lazy? */ 697 /* XXX too lazy? */
698 ic = kzalloc(sizeof(struct rds_iw_connection), GFP_KERNEL); 698 ic = kzalloc(sizeof(struct rds_iw_connection), GFP_KERNEL);
699 if (ic == NULL) 699 if (!ic)
700 return -ENOMEM; 700 return -ENOMEM;
701 701
702 INIT_LIST_HEAD(&ic->iw_node); 702 INIT_LIST_HEAD(&ic->iw_node);
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 13dc1862d862..0e7accc23ee2 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -34,7 +34,6 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35 35
36#include "rds.h" 36#include "rds.h"
37#include "rdma.h"
38#include "iw.h" 37#include "iw.h"
39 38
40 39
@@ -207,9 +206,9 @@ void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *con
207 BUG_ON(list_empty(&ic->iw_node)); 206 BUG_ON(list_empty(&ic->iw_node));
208 list_del(&ic->iw_node); 207 list_del(&ic->iw_node);
209 208
210 spin_lock_irq(&rds_iwdev->spinlock); 209 spin_lock(&rds_iwdev->spinlock);
211 list_add_tail(&ic->iw_node, &rds_iwdev->conn_list); 210 list_add_tail(&ic->iw_node, &rds_iwdev->conn_list);
212 spin_unlock_irq(&rds_iwdev->spinlock); 211 spin_unlock(&rds_iwdev->spinlock);
213 spin_unlock_irq(&iw_nodev_conns_lock); 212 spin_unlock_irq(&iw_nodev_conns_lock);
214 213
215 ic->rds_iwdev = rds_iwdev; 214 ic->rds_iwdev = rds_iwdev;
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 3d479067d54d..5e57347f49ff 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -53,7 +53,7 @@ static void rds_iw_frag_drop_page(struct rds_page_frag *frag)
53static void rds_iw_frag_free(struct rds_page_frag *frag) 53static void rds_iw_frag_free(struct rds_page_frag *frag)
54{ 54{
55 rdsdebug("frag %p page %p\n", frag, frag->f_page); 55 rdsdebug("frag %p page %p\n", frag, frag->f_page);
56 BUG_ON(frag->f_page != NULL); 56 BUG_ON(frag->f_page);
57 kmem_cache_free(rds_iw_frag_slab, frag); 57 kmem_cache_free(rds_iw_frag_slab, frag);
58} 58}
59 59
@@ -143,14 +143,14 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
143 struct ib_sge *sge; 143 struct ib_sge *sge;
144 int ret = -ENOMEM; 144 int ret = -ENOMEM;
145 145
146 if (recv->r_iwinc == NULL) { 146 if (!recv->r_iwinc) {
147 if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) { 147 if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
148 rds_iw_stats_inc(s_iw_rx_alloc_limit); 148 rds_iw_stats_inc(s_iw_rx_alloc_limit);
149 goto out; 149 goto out;
150 } 150 }
151 recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab, 151 recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
152 kptr_gfp); 152 kptr_gfp);
153 if (recv->r_iwinc == NULL) { 153 if (!recv->r_iwinc) {
154 atomic_dec(&rds_iw_allocation); 154 atomic_dec(&rds_iw_allocation);
155 goto out; 155 goto out;
156 } 156 }
@@ -158,17 +158,17 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
158 rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr); 158 rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
159 } 159 }
160 160
161 if (recv->r_frag == NULL) { 161 if (!recv->r_frag) {
162 recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp); 162 recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp);
163 if (recv->r_frag == NULL) 163 if (!recv->r_frag)
164 goto out; 164 goto out;
165 INIT_LIST_HEAD(&recv->r_frag->f_item); 165 INIT_LIST_HEAD(&recv->r_frag->f_item);
166 recv->r_frag->f_page = NULL; 166 recv->r_frag->f_page = NULL;
167 } 167 }
168 168
169 if (ic->i_frag.f_page == NULL) { 169 if (!ic->i_frag.f_page) {
170 ic->i_frag.f_page = alloc_page(page_gfp); 170 ic->i_frag.f_page = alloc_page(page_gfp);
171 if (ic->i_frag.f_page == NULL) 171 if (!ic->i_frag.f_page)
172 goto out; 172 goto out;
173 ic->i_frag.f_offset = 0; 173 ic->i_frag.f_offset = 0;
174 } 174 }
@@ -273,7 +273,7 @@ int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
273 return ret; 273 return ret;
274} 274}
275 275
276void rds_iw_inc_purge(struct rds_incoming *inc) 276static void rds_iw_inc_purge(struct rds_incoming *inc)
277{ 277{
278 struct rds_iw_incoming *iwinc; 278 struct rds_iw_incoming *iwinc;
279 struct rds_page_frag *frag; 279 struct rds_page_frag *frag;
@@ -716,7 +716,7 @@ static void rds_iw_process_recv(struct rds_connection *conn,
716 * into the inc and save the inc so we can hang upcoming fragments 716 * into the inc and save the inc so we can hang upcoming fragments
717 * off its list. 717 * off its list.
718 */ 718 */
719 if (iwinc == NULL) { 719 if (!iwinc) {
720 iwinc = recv->r_iwinc; 720 iwinc = recv->r_iwinc;
721 recv->r_iwinc = NULL; 721 recv->r_iwinc = NULL;
722 ic->i_iwinc = iwinc; 722 ic->i_iwinc = iwinc;
@@ -887,7 +887,7 @@ int rds_iw_recv(struct rds_connection *conn)
887 return ret; 887 return ret;
888} 888}
889 889
890int __init rds_iw_recv_init(void) 890int rds_iw_recv_init(void)
891{ 891{
892 struct sysinfo si; 892 struct sysinfo si;
893 int ret = -ENOMEM; 893 int ret = -ENOMEM;
@@ -899,13 +899,13 @@ int __init rds_iw_recv_init(void)
899 rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming", 899 rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming",
900 sizeof(struct rds_iw_incoming), 900 sizeof(struct rds_iw_incoming),
901 0, 0, NULL); 901 0, 0, NULL);
902 if (rds_iw_incoming_slab == NULL) 902 if (!rds_iw_incoming_slab)
903 goto out; 903 goto out;
904 904
905 rds_iw_frag_slab = kmem_cache_create("rds_iw_frag", 905 rds_iw_frag_slab = kmem_cache_create("rds_iw_frag",
906 sizeof(struct rds_page_frag), 906 sizeof(struct rds_page_frag),
907 0, 0, NULL); 907 0, 0, NULL);
908 if (rds_iw_frag_slab == NULL) 908 if (!rds_iw_frag_slab)
909 kmem_cache_destroy(rds_iw_incoming_slab); 909 kmem_cache_destroy(rds_iw_incoming_slab);
910 else 910 else
911 ret = 0; 911 ret = 0;
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 52182ff7519e..6280ea020d4e 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -36,7 +36,6 @@
36#include <linux/dmapool.h> 36#include <linux/dmapool.h>
37 37
38#include "rds.h" 38#include "rds.h"
39#include "rdma.h"
40#include "iw.h" 39#include "iw.h"
41 40
42static void rds_iw_send_rdma_complete(struct rds_message *rm, 41static void rds_iw_send_rdma_complete(struct rds_message *rm,
@@ -64,13 +63,13 @@ static void rds_iw_send_rdma_complete(struct rds_message *rm,
64} 63}
65 64
66static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic, 65static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
67 struct rds_rdma_op *op) 66 struct rm_rdma_op *op)
68{ 67{
69 if (op->r_mapped) { 68 if (op->op_mapped) {
70 ib_dma_unmap_sg(ic->i_cm_id->device, 69 ib_dma_unmap_sg(ic->i_cm_id->device,
71 op->r_sg, op->r_nents, 70 op->op_sg, op->op_nents,
72 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 71 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
73 op->r_mapped = 0; 72 op->op_mapped = 0;
74 } 73 }
75} 74}
76 75
@@ -83,11 +82,11 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
83 rdsdebug("ic %p send %p rm %p\n", ic, send, rm); 82 rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
84 83
85 ib_dma_unmap_sg(ic->i_cm_id->device, 84 ib_dma_unmap_sg(ic->i_cm_id->device,
86 rm->m_sg, rm->m_nents, 85 rm->data.op_sg, rm->data.op_nents,
87 DMA_TO_DEVICE); 86 DMA_TO_DEVICE);
88 87
89 if (rm->m_rdma_op != NULL) { 88 if (rm->rdma.op_active) {
90 rds_iw_send_unmap_rdma(ic, rm->m_rdma_op); 89 rds_iw_send_unmap_rdma(ic, &rm->rdma);
91 90
92 /* If the user asked for a completion notification on this 91 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics: 92 * message, we can implement three different semantics:
@@ -111,10 +110,10 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
111 */ 110 */
112 rds_iw_send_rdma_complete(rm, wc_status); 111 rds_iw_send_rdma_complete(rm, wc_status);
113 112
114 if (rm->m_rdma_op->r_write) 113 if (rm->rdma.op_write)
115 rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes); 114 rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
116 else 115 else
117 rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes); 116 rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
118 } 117 }
119 118
120 /* If anyone waited for this message to get flushed out, wake 119 /* If anyone waited for this message to get flushed out, wake
@@ -556,25 +555,27 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
556 } 555 }
557 556
558 /* map the message the first time we see it */ 557 /* map the message the first time we see it */
559 if (ic->i_rm == NULL) { 558 if (!ic->i_rm) {
560 /* 559 /*
561 printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n", 560 printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n",
562 be16_to_cpu(rm->m_inc.i_hdr.h_dport), 561 be16_to_cpu(rm->m_inc.i_hdr.h_dport),
563 rm->m_inc.i_hdr.h_flags, 562 rm->m_inc.i_hdr.h_flags,
564 be32_to_cpu(rm->m_inc.i_hdr.h_len)); 563 be32_to_cpu(rm->m_inc.i_hdr.h_len));
565 */ 564 */
566 if (rm->m_nents) { 565 if (rm->data.op_nents) {
567 rm->m_count = ib_dma_map_sg(dev, 566 rm->data.op_count = ib_dma_map_sg(dev,
568 rm->m_sg, rm->m_nents, DMA_TO_DEVICE); 567 rm->data.op_sg,
569 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count); 568 rm->data.op_nents,
570 if (rm->m_count == 0) { 569 DMA_TO_DEVICE);
570 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
571 if (rm->data.op_count == 0) {
571 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); 572 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
572 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc); 573 rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
573 ret = -ENOMEM; /* XXX ? */ 574 ret = -ENOMEM; /* XXX ? */
574 goto out; 575 goto out;
575 } 576 }
576 } else { 577 } else {
577 rm->m_count = 0; 578 rm->data.op_count = 0;
578 } 579 }
579 580
580 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs; 581 ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
@@ -590,10 +591,10 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
590 591
591 /* If it has a RDMA op, tell the peer we did it. This is 592 /* If it has a RDMA op, tell the peer we did it. This is
592 * used by the peer to release use-once RDMA MRs. */ 593 * used by the peer to release use-once RDMA MRs. */
593 if (rm->m_rdma_op) { 594 if (rm->rdma.op_active) {
594 struct rds_ext_header_rdma ext_hdr; 595 struct rds_ext_header_rdma ext_hdr;
595 596
596 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key); 597 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
597 rds_message_add_extension(&rm->m_inc.i_hdr, 598 rds_message_add_extension(&rm->m_inc.i_hdr,
598 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 599 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
599 } 600 }
@@ -621,7 +622,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
621 send = &ic->i_sends[pos]; 622 send = &ic->i_sends[pos];
622 first = send; 623 first = send;
623 prev = NULL; 624 prev = NULL;
624 scat = &rm->m_sg[sg]; 625 scat = &rm->data.op_sg[sg];
625 sent = 0; 626 sent = 0;
626 i = 0; 627 i = 0;
627 628
@@ -631,7 +632,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
631 * or when requested by the user. Right now, we let 632 * or when requested by the user. Right now, we let
632 * the application choose. 633 * the application choose.
633 */ 634 */
634 if (rm->m_rdma_op && rm->m_rdma_op->r_fence) 635 if (rm->rdma.op_active && rm->rdma.op_fence)
635 send_flags = IB_SEND_FENCE; 636 send_flags = IB_SEND_FENCE;
636 637
637 /* 638 /*
@@ -650,7 +651,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
650 } 651 }
651 652
652 /* if there's data reference it with a chain of work reqs */ 653 /* if there's data reference it with a chain of work reqs */
653 for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) { 654 for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) {
654 unsigned int len; 655 unsigned int len;
655 656
656 send = &ic->i_sends[pos]; 657 send = &ic->i_sends[pos];
@@ -728,7 +729,7 @@ add_header:
728 sent += sizeof(struct rds_header); 729 sent += sizeof(struct rds_header);
729 730
730 /* if we finished the message then send completion owns it */ 731 /* if we finished the message then send completion owns it */
731 if (scat == &rm->m_sg[rm->m_count]) { 732 if (scat == &rm->data.op_sg[rm->data.op_count]) {
732 prev->s_rm = ic->i_rm; 733 prev->s_rm = ic->i_rm;
733 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 734 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
734 ic->i_rm = NULL; 735 ic->i_rm = NULL;
@@ -784,7 +785,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
784 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); 785 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
785} 786}
786 787
787int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op) 788int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
788{ 789{
789 struct rds_iw_connection *ic = conn->c_transport_data; 790 struct rds_iw_connection *ic = conn->c_transport_data;
790 struct rds_iw_send_work *send = NULL; 791 struct rds_iw_send_work *send = NULL;
@@ -794,7 +795,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
794 struct rds_iw_device *rds_iwdev; 795 struct rds_iw_device *rds_iwdev;
795 struct scatterlist *scat; 796 struct scatterlist *scat;
796 unsigned long len; 797 unsigned long len;
797 u64 remote_addr = op->r_remote_addr; 798 u64 remote_addr = op->op_remote_addr;
798 u32 pos, fr_pos; 799 u32 pos, fr_pos;
799 u32 work_alloc; 800 u32 work_alloc;
800 u32 i; 801 u32 i;
@@ -806,21 +807,21 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
806 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client); 807 rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
807 808
808 /* map the message the first time we see it */ 809 /* map the message the first time we see it */
809 if (!op->r_mapped) { 810 if (!op->op_mapped) {
810 op->r_count = ib_dma_map_sg(ic->i_cm_id->device, 811 op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
811 op->r_sg, op->r_nents, (op->r_write) ? 812 op->op_sg, op->op_nents, (op->op_write) ?
812 DMA_TO_DEVICE : DMA_FROM_DEVICE); 813 DMA_TO_DEVICE : DMA_FROM_DEVICE);
813 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count); 814 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
814 if (op->r_count == 0) { 815 if (op->op_count == 0) {
815 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure); 816 rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
816 ret = -ENOMEM; /* XXX ? */ 817 ret = -ENOMEM; /* XXX ? */
817 goto out; 818 goto out;
818 } 819 }
819 820
820 op->r_mapped = 1; 821 op->op_mapped = 1;
821 } 822 }
822 823
823 if (!op->r_write) { 824 if (!op->op_write) {
824 /* Alloc space on the send queue for the fastreg */ 825 /* Alloc space on the send queue for the fastreg */
825 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos); 826 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
826 if (work_alloc != 1) { 827 if (work_alloc != 1) {
@@ -835,7 +836,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
835 * Instead of knowing how to return a partial rdma read/write we insist that there 836 * Instead of knowing how to return a partial rdma read/write we insist that there
836 * be enough work requests to send the entire message. 837 * be enough work requests to send the entire message.
837 */ 838 */
838 i = ceil(op->r_count, rds_iwdev->max_sge); 839 i = ceil(op->op_count, rds_iwdev->max_sge);
839 840
840 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos); 841 work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
841 if (work_alloc != i) { 842 if (work_alloc != i) {
@@ -846,17 +847,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
846 } 847 }
847 848
848 send = &ic->i_sends[pos]; 849 send = &ic->i_sends[pos];
849 if (!op->r_write) { 850 if (!op->op_write) {
850 first = prev = &ic->i_sends[fr_pos]; 851 first = prev = &ic->i_sends[fr_pos];
851 } else { 852 } else {
852 first = send; 853 first = send;
853 prev = NULL; 854 prev = NULL;
854 } 855 }
855 scat = &op->r_sg[0]; 856 scat = &op->op_sg[0];
856 sent = 0; 857 sent = 0;
857 num_sge = op->r_count; 858 num_sge = op->op_count;
858 859
859 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) { 860 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
860 send->s_wr.send_flags = 0; 861 send->s_wr.send_flags = 0;
861 send->s_queued = jiffies; 862 send->s_queued = jiffies;
862 863
@@ -873,13 +874,13 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
873 * for local access after RDS is finished with it, using 874 * for local access after RDS is finished with it, using
874 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed. 875 * IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
875 */ 876 */
876 if (op->r_write) 877 if (op->op_write)
877 send->s_wr.opcode = IB_WR_RDMA_WRITE; 878 send->s_wr.opcode = IB_WR_RDMA_WRITE;
878 else 879 else
879 send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; 880 send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
880 881
881 send->s_wr.wr.rdma.remote_addr = remote_addr; 882 send->s_wr.wr.rdma.remote_addr = remote_addr;
882 send->s_wr.wr.rdma.rkey = op->r_key; 883 send->s_wr.wr.rdma.rkey = op->op_rkey;
883 send->s_op = op; 884 send->s_op = op;
884 885
885 if (num_sge > rds_iwdev->max_sge) { 886 if (num_sge > rds_iwdev->max_sge) {
@@ -893,7 +894,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
893 if (prev) 894 if (prev)
894 prev->s_wr.next = &send->s_wr; 895 prev->s_wr.next = &send->s_wr;
895 896
896 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) { 897 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
897 len = ib_sg_dma_len(ic->i_cm_id->device, scat); 898 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
898 899
899 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) 900 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV)
@@ -927,7 +928,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
927 } 928 }
928 929
929 /* if we finished the message then send completion owns it */ 930 /* if we finished the message then send completion owns it */
930 if (scat == &op->r_sg[op->r_count]) 931 if (scat == &op->op_sg[op->op_count])
931 first->s_wr.send_flags = IB_SEND_SIGNALED; 932 first->s_wr.send_flags = IB_SEND_SIGNALED;
932 933
933 if (i < work_alloc) { 934 if (i < work_alloc) {
@@ -941,9 +942,9 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
941 * adapters do not allow using the lkey for this at all. To bypass this use a 942 * adapters do not allow using the lkey for this at all. To bypass this use a
942 * fastreg_mr (or possibly a dma_mr) 943 * fastreg_mr (or possibly a dma_mr)
943 */ 944 */
944 if (!op->r_write) { 945 if (!op->op_write) {
945 rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos], 946 rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos],
946 op->r_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr); 947 op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
947 work_alloc++; 948 work_alloc++;
948 } 949 }
949 950
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
index 1c4428a61a02..23e3a9a26aaf 100644
--- a/net/rds/iw_sysctl.c
+++ b/net/rds/iw_sysctl.c
@@ -122,10 +122,10 @@ void rds_iw_sysctl_exit(void)
122 unregister_sysctl_table(rds_iw_sysctl_hdr); 122 unregister_sysctl_table(rds_iw_sysctl_hdr);
123} 123}
124 124
125int __init rds_iw_sysctl_init(void) 125int rds_iw_sysctl_init(void)
126{ 126{
127 rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table); 127 rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table);
128 if (rds_iw_sysctl_hdr == NULL) 128 if (!rds_iw_sysctl_hdr)
129 return -ENOMEM; 129 return -ENOMEM;
130 return 0; 130 return 0;
131} 131}
diff --git a/net/rds/loop.c b/net/rds/loop.c
index dd9879379457..c390156b426f 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -61,10 +61,17 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
61 unsigned int hdr_off, unsigned int sg, 61 unsigned int hdr_off, unsigned int sg,
62 unsigned int off) 62 unsigned int off)
63{ 63{
64 /* Do not send cong updates to loopback */
65 if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
66 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
67 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
68 }
69
64 BUG_ON(hdr_off || sg || off); 70 BUG_ON(hdr_off || sg || off);
65 71
66 rds_inc_init(&rm->m_inc, conn, conn->c_laddr); 72 rds_inc_init(&rm->m_inc, conn, conn->c_laddr);
67 rds_message_addref(rm); /* for the inc */ 73 /* For the embedded inc. Matching put is in loop_inc_free() */
74 rds_message_addref(rm);
68 75
69 rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc, 76 rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc,
70 GFP_KERNEL, KM_USER0); 77 GFP_KERNEL, KM_USER0);
@@ -77,16 +84,14 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
77 return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); 84 return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len);
78} 85}
79 86
80static int rds_loop_xmit_cong_map(struct rds_connection *conn, 87/*
81 struct rds_cong_map *map, 88 * See rds_loop_xmit(). Since our inc is embedded in the rm, we
82 unsigned long offset) 89 * make sure the rm lives at least until the inc is done.
90 */
91static void rds_loop_inc_free(struct rds_incoming *inc)
83{ 92{
84 BUG_ON(offset); 93 struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
85 BUG_ON(map != conn->c_lcong); 94 rds_message_put(rm);
86
87 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
88
89 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
90} 95}
91 96
92/* we need to at least give the thread something to succeed */ 97/* we need to at least give the thread something to succeed */
@@ -112,7 +117,7 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
112 unsigned long flags; 117 unsigned long flags;
113 118
114 lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL); 119 lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL);
115 if (lc == NULL) 120 if (!lc)
116 return -ENOMEM; 121 return -ENOMEM;
117 122
118 INIT_LIST_HEAD(&lc->loop_node); 123 INIT_LIST_HEAD(&lc->loop_node);
@@ -169,14 +174,12 @@ void rds_loop_exit(void)
169 */ 174 */
170struct rds_transport rds_loop_transport = { 175struct rds_transport rds_loop_transport = {
171 .xmit = rds_loop_xmit, 176 .xmit = rds_loop_xmit,
172 .xmit_cong_map = rds_loop_xmit_cong_map,
173 .recv = rds_loop_recv, 177 .recv = rds_loop_recv,
174 .conn_alloc = rds_loop_conn_alloc, 178 .conn_alloc = rds_loop_conn_alloc,
175 .conn_free = rds_loop_conn_free, 179 .conn_free = rds_loop_conn_free,
176 .conn_connect = rds_loop_conn_connect, 180 .conn_connect = rds_loop_conn_connect,
177 .conn_shutdown = rds_loop_conn_shutdown, 181 .conn_shutdown = rds_loop_conn_shutdown,
178 .inc_copy_to_user = rds_message_inc_copy_to_user, 182 .inc_copy_to_user = rds_message_inc_copy_to_user,
179 .inc_purge = rds_message_inc_purge, 183 .inc_free = rds_loop_inc_free,
180 .inc_free = rds_message_inc_free,
181 .t_name = "loopback", 184 .t_name = "loopback",
182}; 185};
diff --git a/net/rds/message.c b/net/rds/message.c
index 9a1d67e001ba..84f937f11d47 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -34,9 +34,6 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35 35
36#include "rds.h" 36#include "rds.h"
37#include "rdma.h"
38
39static DECLARE_WAIT_QUEUE_HEAD(rds_message_flush_waitq);
40 37
41static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = { 38static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
42[RDS_EXTHDR_NONE] = 0, 39[RDS_EXTHDR_NONE] = 0,
@@ -63,29 +60,31 @@ static void rds_message_purge(struct rds_message *rm)
63 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags))) 60 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
64 return; 61 return;
65 62
66 for (i = 0; i < rm->m_nents; i++) { 63 for (i = 0; i < rm->data.op_nents; i++) {
67 rdsdebug("putting data page %p\n", (void *)sg_page(&rm->m_sg[i])); 64 rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i]));
68 /* XXX will have to put_page for page refs */ 65 /* XXX will have to put_page for page refs */
69 __free_page(sg_page(&rm->m_sg[i])); 66 __free_page(sg_page(&rm->data.op_sg[i]));
70 } 67 }
71 rm->m_nents = 0; 68 rm->data.op_nents = 0;
72 69
73 if (rm->m_rdma_op) 70 if (rm->rdma.op_active)
74 rds_rdma_free_op(rm->m_rdma_op); 71 rds_rdma_free_op(&rm->rdma);
75 if (rm->m_rdma_mr) 72 if (rm->rdma.op_rdma_mr)
76 rds_mr_put(rm->m_rdma_mr); 73 rds_mr_put(rm->rdma.op_rdma_mr);
77}
78 74
79void rds_message_inc_purge(struct rds_incoming *inc) 75 if (rm->atomic.op_active)
80{ 76 rds_atomic_free_op(&rm->atomic);
81 struct rds_message *rm = container_of(inc, struct rds_message, m_inc); 77 if (rm->atomic.op_rdma_mr)
82 rds_message_purge(rm); 78 rds_mr_put(rm->atomic.op_rdma_mr);
83} 79}
84 80
85void rds_message_put(struct rds_message *rm) 81void rds_message_put(struct rds_message *rm)
86{ 82{
87 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount)); 83 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
88 84 if (atomic_read(&rm->m_refcount) == 0) {
85printk(KERN_CRIT "danger refcount zero on %p\n", rm);
86WARN_ON(1);
87 }
89 if (atomic_dec_and_test(&rm->m_refcount)) { 88 if (atomic_dec_and_test(&rm->m_refcount)) {
90 BUG_ON(!list_empty(&rm->m_sock_item)); 89 BUG_ON(!list_empty(&rm->m_sock_item));
91 BUG_ON(!list_empty(&rm->m_conn_item)); 90 BUG_ON(!list_empty(&rm->m_conn_item));
@@ -96,12 +95,6 @@ void rds_message_put(struct rds_message *rm)
96} 95}
97EXPORT_SYMBOL_GPL(rds_message_put); 96EXPORT_SYMBOL_GPL(rds_message_put);
98 97
99void rds_message_inc_free(struct rds_incoming *inc)
100{
101 struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
102 rds_message_put(rm);
103}
104
105void rds_message_populate_header(struct rds_header *hdr, __be16 sport, 98void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
106 __be16 dport, u64 seq) 99 __be16 dport, u64 seq)
107{ 100{
@@ -214,41 +207,68 @@ int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 o
214} 207}
215EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension); 208EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
216 209
217struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp) 210/*
211 * Each rds_message is allocated with extra space for the scatterlist entries
212 * rds ops will need. This is to minimize memory allocation count. Then, each rds op
213 * can grab SGs when initializing its part of the rds_message.
214 */
215struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
218{ 216{
219 struct rds_message *rm; 217 struct rds_message *rm;
220 218
221 rm = kzalloc(sizeof(struct rds_message) + 219 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
222 (nents * sizeof(struct scatterlist)), gfp);
223 if (!rm) 220 if (!rm)
224 goto out; 221 goto out;
225 222
226 if (nents) 223 rm->m_used_sgs = 0;
227 sg_init_table(rm->m_sg, nents); 224 rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
225
228 atomic_set(&rm->m_refcount, 1); 226 atomic_set(&rm->m_refcount, 1);
229 INIT_LIST_HEAD(&rm->m_sock_item); 227 INIT_LIST_HEAD(&rm->m_sock_item);
230 INIT_LIST_HEAD(&rm->m_conn_item); 228 INIT_LIST_HEAD(&rm->m_conn_item);
231 spin_lock_init(&rm->m_rs_lock); 229 spin_lock_init(&rm->m_rs_lock);
230 init_waitqueue_head(&rm->m_flush_wait);
232 231
233out: 232out:
234 return rm; 233 return rm;
235} 234}
236 235
236/*
237 * RDS ops use this to grab SG entries from the rm's sg pool.
238 */
239struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
240{
241 struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
242 struct scatterlist *sg_ret;
243
244 WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
245 WARN_ON(!nents);
246
247 sg_ret = &sg_first[rm->m_used_sgs];
248 sg_init_table(sg_ret, nents);
249 rm->m_used_sgs += nents;
250
251 return sg_ret;
252}
253
237struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len) 254struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
238{ 255{
239 struct rds_message *rm; 256 struct rds_message *rm;
240 unsigned int i; 257 unsigned int i;
258 int num_sgs = ceil(total_len, PAGE_SIZE);
259 int extra_bytes = num_sgs * sizeof(struct scatterlist);
241 260
242 rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL); 261 rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
243 if (rm == NULL) 262 if (!rm)
244 return ERR_PTR(-ENOMEM); 263 return ERR_PTR(-ENOMEM);
245 264
246 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); 265 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
247 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 266 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
248 rm->m_nents = ceil(total_len, PAGE_SIZE); 267 rm->data.op_nents = ceil(total_len, PAGE_SIZE);
268 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
249 269
250 for (i = 0; i < rm->m_nents; ++i) { 270 for (i = 0; i < rm->data.op_nents; ++i) {
251 sg_set_page(&rm->m_sg[i], 271 sg_set_page(&rm->data.op_sg[i],
252 virt_to_page(page_addrs[i]), 272 virt_to_page(page_addrs[i]),
253 PAGE_SIZE, 0); 273 PAGE_SIZE, 0);
254 } 274 }
@@ -256,40 +276,33 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
256 return rm; 276 return rm;
257} 277}
258 278
259struct rds_message *rds_message_copy_from_user(struct iovec *first_iov, 279int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
260 size_t total_len) 280 size_t total_len)
261{ 281{
262 unsigned long to_copy; 282 unsigned long to_copy;
263 unsigned long iov_off; 283 unsigned long iov_off;
264 unsigned long sg_off; 284 unsigned long sg_off;
265 struct rds_message *rm;
266 struct iovec *iov; 285 struct iovec *iov;
267 struct scatterlist *sg; 286 struct scatterlist *sg;
268 int ret; 287 int ret = 0;
269
270 rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL);
271 if (rm == NULL) {
272 ret = -ENOMEM;
273 goto out;
274 }
275 288
276 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 289 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
277 290
278 /* 291 /*
279 * now allocate and copy in the data payload. 292 * now allocate and copy in the data payload.
280 */ 293 */
281 sg = rm->m_sg; 294 sg = rm->data.op_sg;
282 iov = first_iov; 295 iov = first_iov;
283 iov_off = 0; 296 iov_off = 0;
284 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */ 297 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
285 298
286 while (total_len) { 299 while (total_len) {
287 if (sg_page(sg) == NULL) { 300 if (!sg_page(sg)) {
288 ret = rds_page_remainder_alloc(sg, total_len, 301 ret = rds_page_remainder_alloc(sg, total_len,
289 GFP_HIGHUSER); 302 GFP_HIGHUSER);
290 if (ret) 303 if (ret)
291 goto out; 304 goto out;
292 rm->m_nents++; 305 rm->data.op_nents++;
293 sg_off = 0; 306 sg_off = 0;
294 } 307 }
295 308
@@ -320,14 +333,8 @@ struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
320 sg++; 333 sg++;
321 } 334 }
322 335
323 ret = 0;
324out: 336out:
325 if (ret) { 337 return ret;
326 if (rm)
327 rds_message_put(rm);
328 rm = ERR_PTR(ret);
329 }
330 return rm;
331} 338}
332 339
333int rds_message_inc_copy_to_user(struct rds_incoming *inc, 340int rds_message_inc_copy_to_user(struct rds_incoming *inc,
@@ -348,7 +355,7 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
348 355
349 iov = first_iov; 356 iov = first_iov;
350 iov_off = 0; 357 iov_off = 0;
351 sg = rm->m_sg; 358 sg = rm->data.op_sg;
352 vec_off = 0; 359 vec_off = 0;
353 copied = 0; 360 copied = 0;
354 361
@@ -394,15 +401,14 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
394 */ 401 */
395void rds_message_wait(struct rds_message *rm) 402void rds_message_wait(struct rds_message *rm)
396{ 403{
397 wait_event(rds_message_flush_waitq, 404 wait_event_interruptible(rm->m_flush_wait,
398 !test_bit(RDS_MSG_MAPPED, &rm->m_flags)); 405 !test_bit(RDS_MSG_MAPPED, &rm->m_flags));
399} 406}
400 407
401void rds_message_unmapped(struct rds_message *rm) 408void rds_message_unmapped(struct rds_message *rm)
402{ 409{
403 clear_bit(RDS_MSG_MAPPED, &rm->m_flags); 410 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
404 if (waitqueue_active(&rds_message_flush_waitq)) 411 wake_up_interruptible(&rm->m_flush_wait);
405 wake_up(&rds_message_flush_waitq);
406} 412}
407EXPORT_SYMBOL_GPL(rds_message_unmapped); 413EXPORT_SYMBOL_GPL(rds_message_unmapped);
408 414
diff --git a/net/rds/page.c b/net/rds/page.c
index 595a952d4b17..5e44f5ae7898 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -116,7 +116,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
116 /* jump straight to allocation if we're trying for a huge page */ 116 /* jump straight to allocation if we're trying for a huge page */
117 if (bytes >= PAGE_SIZE) { 117 if (bytes >= PAGE_SIZE) {
118 page = alloc_page(gfp); 118 page = alloc_page(gfp);
119 if (page == NULL) { 119 if (!page) {
120 ret = -ENOMEM; 120 ret = -ENOMEM;
121 } else { 121 } else {
122 sg_set_page(scat, page, PAGE_SIZE, 0); 122 sg_set_page(scat, page, PAGE_SIZE, 0);
@@ -162,7 +162,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
162 rem = &per_cpu(rds_page_remainders, get_cpu()); 162 rem = &per_cpu(rds_page_remainders, get_cpu());
163 local_irq_save(flags); 163 local_irq_save(flags);
164 164
165 if (page == NULL) { 165 if (!page) {
166 ret = -ENOMEM; 166 ret = -ENOMEM;
167 break; 167 break;
168 } 168 }
@@ -186,6 +186,7 @@ out:
186 ret ? 0 : scat->length); 186 ret ? 0 : scat->length);
187 return ret; 187 return ret;
188} 188}
189EXPORT_SYMBOL_GPL(rds_page_remainder_alloc);
189 190
190static int rds_page_remainder_cpu_notify(struct notifier_block *self, 191static int rds_page_remainder_cpu_notify(struct notifier_block *self,
191 unsigned long action, void *hcpu) 192 unsigned long action, void *hcpu)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 75fd13bb631b..1a41debca1ce 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -35,7 +35,7 @@
35#include <linux/rbtree.h> 35#include <linux/rbtree.h>
36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ 36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
37 37
38#include "rdma.h" 38#include "rds.h"
39 39
40/* 40/*
41 * XXX 41 * XXX
@@ -130,14 +130,22 @@ void rds_rdma_drop_keys(struct rds_sock *rs)
130{ 130{
131 struct rds_mr *mr; 131 struct rds_mr *mr;
132 struct rb_node *node; 132 struct rb_node *node;
133 unsigned long flags;
133 134
134 /* Release any MRs associated with this socket */ 135 /* Release any MRs associated with this socket */
136 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
135 while ((node = rb_first(&rs->rs_rdma_keys))) { 137 while ((node = rb_first(&rs->rs_rdma_keys))) {
136 mr = container_of(node, struct rds_mr, r_rb_node); 138 mr = container_of(node, struct rds_mr, r_rb_node);
137 if (mr->r_trans == rs->rs_transport) 139 if (mr->r_trans == rs->rs_transport)
138 mr->r_invalidate = 0; 140 mr->r_invalidate = 0;
141 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
142 RB_CLEAR_NODE(&mr->r_rb_node);
143 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
144 rds_destroy_mr(mr);
139 rds_mr_put(mr); 145 rds_mr_put(mr);
146 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
140 } 147 }
148 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
141 149
142 if (rs->rs_transport && rs->rs_transport->flush_mrs) 150 if (rs->rs_transport && rs->rs_transport->flush_mrs)
143 rs->rs_transport->flush_mrs(); 151 rs->rs_transport->flush_mrs();
@@ -181,7 +189,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
181 goto out; 189 goto out;
182 } 190 }
183 191
184 if (rs->rs_transport->get_mr == NULL) { 192 if (!rs->rs_transport->get_mr) {
185 ret = -EOPNOTSUPP; 193 ret = -EOPNOTSUPP;
186 goto out; 194 goto out;
187 } 195 }
@@ -197,13 +205,13 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
197 205
198 /* XXX clamp nr_pages to limit the size of this alloc? */ 206 /* XXX clamp nr_pages to limit the size of this alloc? */
199 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); 207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
200 if (pages == NULL) { 208 if (!pages) {
201 ret = -ENOMEM; 209 ret = -ENOMEM;
202 goto out; 210 goto out;
203 } 211 }
204 212
205 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); 213 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
206 if (mr == NULL) { 214 if (!mr) {
207 ret = -ENOMEM; 215 ret = -ENOMEM;
208 goto out; 216 goto out;
209 } 217 }
@@ -230,13 +238,13 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
230 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to 238 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
231 * the zero page. 239 * the zero page.
232 */ 240 */
233 ret = rds_pin_pages(args->vec.addr & PAGE_MASK, nr_pages, pages, 1); 241 ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
234 if (ret < 0) 242 if (ret < 0)
235 goto out; 243 goto out;
236 244
237 nents = ret; 245 nents = ret;
238 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); 246 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
239 if (sg == NULL) { 247 if (!sg) {
240 ret = -ENOMEM; 248 ret = -ENOMEM;
241 goto out; 249 goto out;
242 } 250 }
@@ -406,68 +414,127 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
406 414
407 spin_lock_irqsave(&rs->rs_rdma_lock, flags); 415 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
408 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); 416 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
409 if (mr && (mr->r_use_once || force)) { 417 if (!mr) {
418 printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key);
419 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
420 return;
421 }
422
423 if (mr->r_use_once || force) {
410 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); 424 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
411 RB_CLEAR_NODE(&mr->r_rb_node); 425 RB_CLEAR_NODE(&mr->r_rb_node);
412 zot_me = 1; 426 zot_me = 1;
413 } else if (mr) 427 }
414 atomic_inc(&mr->r_refcount);
415 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); 428 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
416 429
417 /* May have to issue a dma_sync on this memory region. 430 /* May have to issue a dma_sync on this memory region.
418 * Note we could avoid this if the operation was a RDMA READ, 431 * Note we could avoid this if the operation was a RDMA READ,
419 * but at this point we can't tell. */ 432 * but at this point we can't tell. */
420 if (mr != NULL) { 433 if (mr->r_trans->sync_mr)
421 if (mr->r_trans->sync_mr) 434 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
422 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); 435
423 436 /* If the MR was marked as invalidate, this will
424 /* If the MR was marked as invalidate, this will 437 * trigger an async flush. */
425 * trigger an async flush. */ 438 if (zot_me)
426 if (zot_me) 439 rds_destroy_mr(mr);
427 rds_destroy_mr(mr); 440 rds_mr_put(mr);
428 rds_mr_put(mr);
429 }
430} 441}
431 442
432void rds_rdma_free_op(struct rds_rdma_op *ro) 443void rds_rdma_free_op(struct rm_rdma_op *ro)
433{ 444{
434 unsigned int i; 445 unsigned int i;
435 446
436 for (i = 0; i < ro->r_nents; i++) { 447 for (i = 0; i < ro->op_nents; i++) {
437 struct page *page = sg_page(&ro->r_sg[i]); 448 struct page *page = sg_page(&ro->op_sg[i]);
438 449
439 /* Mark page dirty if it was possibly modified, which 450 /* Mark page dirty if it was possibly modified, which
440 * is the case for a RDMA_READ which copies from remote 451 * is the case for a RDMA_READ which copies from remote
441 * to local memory */ 452 * to local memory */
442 if (!ro->r_write) { 453 if (!ro->op_write) {
443 BUG_ON(in_interrupt()); 454 BUG_ON(irqs_disabled());
444 set_page_dirty(page); 455 set_page_dirty(page);
445 } 456 }
446 put_page(page); 457 put_page(page);
447 } 458 }
448 459
449 kfree(ro->r_notifier); 460 kfree(ro->op_notifier);
450 kfree(ro); 461 ro->op_notifier = NULL;
462 ro->op_active = 0;
463}
464
465void rds_atomic_free_op(struct rm_atomic_op *ao)
466{
467 struct page *page = sg_page(ao->op_sg);
468
469 /* Mark page dirty if it was possibly modified, which
470 * is the case for a RDMA_READ which copies from remote
471 * to local memory */
472 set_page_dirty(page);
473 put_page(page);
474
475 kfree(ao->op_notifier);
476 ao->op_notifier = NULL;
477 ao->op_active = 0;
451} 478}
452 479
480
453/* 481/*
454 * args is a pointer to an in-kernel copy in the sendmsg cmsg. 482 * Count the number of pages needed to describe an incoming iovec.
455 */ 483 */
456static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs, 484static int rds_rdma_pages(struct rds_rdma_args *args)
457 struct rds_rdma_args *args)
458{ 485{
459 struct rds_iovec vec; 486 struct rds_iovec vec;
460 struct rds_rdma_op *op = NULL; 487 struct rds_iovec __user *local_vec;
488 unsigned int tot_pages = 0;
461 unsigned int nr_pages; 489 unsigned int nr_pages;
462 unsigned int max_pages; 490 unsigned int i;
491
492 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
493
494 /* figure out the number of pages in the vector */
495 for (i = 0; i < args->nr_local; i++) {
496 if (copy_from_user(&vec, &local_vec[i],
497 sizeof(struct rds_iovec)))
498 return -EFAULT;
499
500 nr_pages = rds_pages_in_vec(&vec);
501 if (nr_pages == 0)
502 return -EINVAL;
503
504 tot_pages += nr_pages;
505 }
506
507 return tot_pages;
508}
509
510int rds_rdma_extra_size(struct rds_rdma_args *args)
511{
512 return rds_rdma_pages(args) * sizeof(struct scatterlist);
513}
514
515/*
516 * The application asks for a RDMA transfer.
517 * Extract all arguments and set up the rdma_op
518 */
519int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
520 struct cmsghdr *cmsg)
521{
522 struct rds_rdma_args *args;
523 struct rds_iovec vec;
524 struct rm_rdma_op *op = &rm->rdma;
525 int nr_pages;
463 unsigned int nr_bytes; 526 unsigned int nr_bytes;
464 struct page **pages = NULL; 527 struct page **pages = NULL;
465 struct rds_iovec __user *local_vec; 528 struct rds_iovec __user *local_vec;
466 struct scatterlist *sg;
467 unsigned int nr; 529 unsigned int nr;
468 unsigned int i, j; 530 unsigned int i, j;
469 int ret; 531 int ret = 0;
470 532
533 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
534 || rm->rdma.op_active)
535 return -EINVAL;
536
537 args = CMSG_DATA(cmsg);
471 538
472 if (rs->rs_bound_addr == 0) { 539 if (rs->rs_bound_addr == 0) {
473 ret = -ENOTCONN; /* XXX not a great errno */ 540 ret = -ENOTCONN; /* XXX not a great errno */
@@ -479,61 +546,38 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
479 goto out; 546 goto out;
480 } 547 }
481 548
482 nr_pages = 0; 549 nr_pages = rds_rdma_pages(args);
483 max_pages = 0; 550 if (nr_pages < 0)
484
485 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
486
487 /* figure out the number of pages in the vector */
488 for (i = 0; i < args->nr_local; i++) {
489 if (copy_from_user(&vec, &local_vec[i],
490 sizeof(struct rds_iovec))) {
491 ret = -EFAULT;
492 goto out;
493 }
494
495 nr = rds_pages_in_vec(&vec);
496 if (nr == 0) {
497 ret = -EINVAL;
498 goto out;
499 }
500
501 max_pages = max(nr, max_pages);
502 nr_pages += nr;
503 }
504
505 pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL);
506 if (pages == NULL) {
507 ret = -ENOMEM;
508 goto out; 551 goto out;
509 }
510 552
511 op = kzalloc(offsetof(struct rds_rdma_op, r_sg[nr_pages]), GFP_KERNEL); 553 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
512 if (op == NULL) { 554 if (!pages) {
513 ret = -ENOMEM; 555 ret = -ENOMEM;
514 goto out; 556 goto out;
515 } 557 }
516 558
517 op->r_write = !!(args->flags & RDS_RDMA_READWRITE); 559 op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
518 op->r_fence = !!(args->flags & RDS_RDMA_FENCE); 560 op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
519 op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); 561 op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
520 op->r_recverr = rs->rs_recverr; 562 op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
563 op->op_active = 1;
564 op->op_recverr = rs->rs_recverr;
521 WARN_ON(!nr_pages); 565 WARN_ON(!nr_pages);
522 sg_init_table(op->r_sg, nr_pages); 566 op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
523 567
524 if (op->r_notify || op->r_recverr) { 568 if (op->op_notify || op->op_recverr) {
525 /* We allocate an uninitialized notifier here, because 569 /* We allocate an uninitialized notifier here, because
526 * we don't want to do that in the completion handler. We 570 * we don't want to do that in the completion handler. We
527 * would have to use GFP_ATOMIC there, and don't want to deal 571 * would have to use GFP_ATOMIC there, and don't want to deal
528 * with failed allocations. 572 * with failed allocations.
529 */ 573 */
530 op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); 574 op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
531 if (!op->r_notifier) { 575 if (!op->op_notifier) {
532 ret = -ENOMEM; 576 ret = -ENOMEM;
533 goto out; 577 goto out;
534 } 578 }
535 op->r_notifier->n_user_token = args->user_token; 579 op->op_notifier->n_user_token = args->user_token;
536 op->r_notifier->n_status = RDS_RDMA_SUCCESS; 580 op->op_notifier->n_status = RDS_RDMA_SUCCESS;
537 } 581 }
538 582
539 /* The cookie contains the R_Key of the remote memory region, and 583 /* The cookie contains the R_Key of the remote memory region, and
@@ -543,15 +587,17 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
543 * destination address (which is really an offset into the MR) 587 * destination address (which is really an offset into the MR)
544 * FIXME: We may want to move this into ib_rdma.c 588 * FIXME: We may want to move this into ib_rdma.c
545 */ 589 */
546 op->r_key = rds_rdma_cookie_key(args->cookie); 590 op->op_rkey = rds_rdma_cookie_key(args->cookie);
547 op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); 591 op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
548 592
549 nr_bytes = 0; 593 nr_bytes = 0;
550 594
551 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", 595 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
552 (unsigned long long)args->nr_local, 596 (unsigned long long)args->nr_local,
553 (unsigned long long)args->remote_vec.addr, 597 (unsigned long long)args->remote_vec.addr,
554 op->r_key); 598 op->op_rkey);
599
600 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
555 601
556 for (i = 0; i < args->nr_local; i++) { 602 for (i = 0; i < args->nr_local; i++) {
557 if (copy_from_user(&vec, &local_vec[i], 603 if (copy_from_user(&vec, &local_vec[i],
@@ -569,15 +615,10 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
569 rs->rs_user_addr = vec.addr; 615 rs->rs_user_addr = vec.addr;
570 rs->rs_user_bytes = vec.bytes; 616 rs->rs_user_bytes = vec.bytes;
571 617
572 /* did the user change the vec under us? */
573 if (nr > max_pages || op->r_nents + nr > nr_pages) {
574 ret = -EINVAL;
575 goto out;
576 }
577 /* If it's a WRITE operation, we want to pin the pages for reading. 618 /* If it's a WRITE operation, we want to pin the pages for reading.
578 * If it's a READ operation, we need to pin the pages for writing. 619 * If it's a READ operation, we need to pin the pages for writing.
579 */ 620 */
580 ret = rds_pin_pages(vec.addr & PAGE_MASK, nr, pages, !op->r_write); 621 ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write);
581 if (ret < 0) 622 if (ret < 0)
582 goto out; 623 goto out;
583 624
@@ -588,8 +629,9 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
588 629
589 for (j = 0; j < nr; j++) { 630 for (j = 0; j < nr; j++) {
590 unsigned int offset = vec.addr & ~PAGE_MASK; 631 unsigned int offset = vec.addr & ~PAGE_MASK;
632 struct scatterlist *sg;
591 633
592 sg = &op->r_sg[op->r_nents + j]; 634 sg = &op->op_sg[op->op_nents + j];
593 sg_set_page(sg, pages[j], 635 sg_set_page(sg, pages[j],
594 min_t(unsigned int, vec.bytes, PAGE_SIZE - offset), 636 min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
595 offset); 637 offset);
@@ -601,10 +643,9 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
601 vec.bytes -= sg->length; 643 vec.bytes -= sg->length;
602 } 644 }
603 645
604 op->r_nents += nr; 646 op->op_nents += nr;
605 } 647 }
606 648
607
608 if (nr_bytes > args->remote_vec.bytes) { 649 if (nr_bytes > args->remote_vec.bytes) {
609 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n", 650 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
610 nr_bytes, 651 nr_bytes,
@@ -612,38 +653,17 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
612 ret = -EINVAL; 653 ret = -EINVAL;
613 goto out; 654 goto out;
614 } 655 }
615 op->r_bytes = nr_bytes; 656 op->op_bytes = nr_bytes;
616 657
617 ret = 0; 658 ret = 0;
618out: 659out:
619 kfree(pages); 660 kfree(pages);
620 if (ret) { 661 if (ret)
621 if (op) 662 rds_rdma_free_op(op);
622 rds_rdma_free_op(op);
623 op = ERR_PTR(ret);
624 }
625 return op;
626}
627
628/*
629 * The application asks for a RDMA transfer.
630 * Extract all arguments and set up the rdma_op
631 */
632int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
633 struct cmsghdr *cmsg)
634{
635 struct rds_rdma_op *op;
636
637 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) ||
638 rm->m_rdma_op != NULL)
639 return -EINVAL;
640 663
641 op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
642 if (IS_ERR(op))
643 return PTR_ERR(op);
644 rds_stats_inc(s_send_rdma); 664 rds_stats_inc(s_send_rdma);
645 rm->m_rdma_op = op; 665
646 return 0; 666 return ret;
647} 667}
648 668
649/* 669/*
@@ -673,7 +693,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
673 693
674 spin_lock_irqsave(&rs->rs_rdma_lock, flags); 694 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
675 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); 695 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
676 if (mr == NULL) 696 if (!mr)
677 err = -EINVAL; /* invalid r_key */ 697 err = -EINVAL; /* invalid r_key */
678 else 698 else
679 atomic_inc(&mr->r_refcount); 699 atomic_inc(&mr->r_refcount);
@@ -681,7 +701,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
681 701
682 if (mr) { 702 if (mr) {
683 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); 703 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
684 rm->m_rdma_mr = mr; 704 rm->rdma.op_rdma_mr = mr;
685 } 705 }
686 return err; 706 return err;
687} 707}
@@ -699,5 +719,98 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
699 rm->m_rdma_cookie != 0) 719 rm->m_rdma_cookie != 0)
700 return -EINVAL; 720 return -EINVAL;
701 721
702 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr); 722 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
723}
724
725/*
726 * Fill in rds_message for an atomic request.
727 */
728int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
729 struct cmsghdr *cmsg)
730{
731 struct page *page = NULL;
732 struct rds_atomic_args *args;
733 int ret = 0;
734
735 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
736 || rm->atomic.op_active)
737 return -EINVAL;
738
739 args = CMSG_DATA(cmsg);
740
741 /* Nonmasked & masked cmsg ops converted to masked hw ops */
742 switch (cmsg->cmsg_type) {
743 case RDS_CMSG_ATOMIC_FADD:
744 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
745 rm->atomic.op_m_fadd.add = args->fadd.add;
746 rm->atomic.op_m_fadd.nocarry_mask = 0;
747 break;
748 case RDS_CMSG_MASKED_ATOMIC_FADD:
749 rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
750 rm->atomic.op_m_fadd.add = args->m_fadd.add;
751 rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
752 break;
753 case RDS_CMSG_ATOMIC_CSWP:
754 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
755 rm->atomic.op_m_cswp.compare = args->cswp.compare;
756 rm->atomic.op_m_cswp.swap = args->cswp.swap;
757 rm->atomic.op_m_cswp.compare_mask = ~0;
758 rm->atomic.op_m_cswp.swap_mask = ~0;
759 break;
760 case RDS_CMSG_MASKED_ATOMIC_CSWP:
761 rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
762 rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
763 rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
764 rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
765 rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
766 break;
767 default:
768 BUG(); /* should never happen */
769 }
770
771 rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
772 rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
773 rm->atomic.op_active = 1;
774 rm->atomic.op_recverr = rs->rs_recverr;
775 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
776
777 /* verify 8 byte-aligned */
778 if (args->local_addr & 0x7) {
779 ret = -EFAULT;
780 goto err;
781 }
782
783 ret = rds_pin_pages(args->local_addr, 1, &page, 1);
784 if (ret != 1)
785 goto err;
786 ret = 0;
787
788 sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
789
790 if (rm->atomic.op_notify || rm->atomic.op_recverr) {
791 /* We allocate an uninitialized notifier here, because
792 * we don't want to do that in the completion handler. We
793 * would have to use GFP_ATOMIC there, and don't want to deal
794 * with failed allocations.
795 */
796 rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
797 if (!rm->atomic.op_notifier) {
798 ret = -ENOMEM;
799 goto err;
800 }
801
802 rm->atomic.op_notifier->n_user_token = args->user_token;
803 rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
804 }
805
806 rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
807 rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
808
809 return ret;
810err:
811 if (page)
812 put_page(page);
813 kfree(rm->atomic.op_notifier);
814
815 return ret;
703} 816}
diff --git a/net/rds/rdma.h b/net/rds/rdma.h
deleted file mode 100644
index 909c39835a5d..000000000000
--- a/net/rds/rdma.h
+++ /dev/null
@@ -1,85 +0,0 @@
1#ifndef _RDS_RDMA_H
2#define _RDS_RDMA_H
3
4#include <linux/rbtree.h>
5#include <linux/spinlock.h>
6#include <linux/scatterlist.h>
7
8#include "rds.h"
9
10struct rds_mr {
11 struct rb_node r_rb_node;
12 atomic_t r_refcount;
13 u32 r_key;
14
15 /* A copy of the creation flags */
16 unsigned int r_use_once:1;
17 unsigned int r_invalidate:1;
18 unsigned int r_write:1;
19
20 /* This is for RDS_MR_DEAD.
21 * It would be nice & consistent to make this part of the above
22 * bit field here, but we need to use test_and_set_bit.
23 */
24 unsigned long r_state;
25 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
26 struct rds_transport *r_trans;
27 void *r_trans_private;
28};
29
30/* Flags for mr->r_state */
31#define RDS_MR_DEAD 0
32
33struct rds_rdma_op {
34 u32 r_key;
35 u64 r_remote_addr;
36 unsigned int r_write:1;
37 unsigned int r_fence:1;
38 unsigned int r_notify:1;
39 unsigned int r_recverr:1;
40 unsigned int r_mapped:1;
41 struct rds_notifier *r_notifier;
42 unsigned int r_bytes;
43 unsigned int r_nents;
44 unsigned int r_count;
45 struct scatterlist r_sg[0];
46};
47
48static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
49{
50 return r_key | (((u64) offset) << 32);
51}
52
53static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
54{
55 return cookie;
56}
57
58static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
59{
60 return cookie >> 32;
61}
62
63int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
64int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
65int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
66void rds_rdma_drop_keys(struct rds_sock *rs);
67int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
68 struct cmsghdr *cmsg);
69int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
70 struct cmsghdr *cmsg);
71int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
72 struct cmsghdr *cmsg);
73int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
74 struct cmsghdr *cmsg);
75void rds_rdma_free_op(struct rds_rdma_op *ro);
76void rds_rdma_send_complete(struct rds_message *rm, int);
77
78extern void __rds_put_mr_final(struct rds_mr *mr);
79static inline void rds_mr_put(struct rds_mr *mr)
80{
81 if (atomic_dec_and_test(&mr->r_refcount))
82 __rds_put_mr_final(mr);
83}
84
85#endif
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index e599ba2f950d..e6ed10aee190 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -36,6 +36,34 @@
36 36
37static struct rdma_cm_id *rds_rdma_listen_id; 37static struct rdma_cm_id *rds_rdma_listen_id;
38 38
39static char *rds_cm_event_strings[] = {
40#define RDS_CM_EVENT_STRING(foo) \
41 [RDMA_CM_EVENT_##foo] = __stringify(RDMA_CM_EVENT_##foo)
42 RDS_CM_EVENT_STRING(ADDR_RESOLVED),
43 RDS_CM_EVENT_STRING(ADDR_ERROR),
44 RDS_CM_EVENT_STRING(ROUTE_RESOLVED),
45 RDS_CM_EVENT_STRING(ROUTE_ERROR),
46 RDS_CM_EVENT_STRING(CONNECT_REQUEST),
47 RDS_CM_EVENT_STRING(CONNECT_RESPONSE),
48 RDS_CM_EVENT_STRING(CONNECT_ERROR),
49 RDS_CM_EVENT_STRING(UNREACHABLE),
50 RDS_CM_EVENT_STRING(REJECTED),
51 RDS_CM_EVENT_STRING(ESTABLISHED),
52 RDS_CM_EVENT_STRING(DISCONNECTED),
53 RDS_CM_EVENT_STRING(DEVICE_REMOVAL),
54 RDS_CM_EVENT_STRING(MULTICAST_JOIN),
55 RDS_CM_EVENT_STRING(MULTICAST_ERROR),
56 RDS_CM_EVENT_STRING(ADDR_CHANGE),
57 RDS_CM_EVENT_STRING(TIMEWAIT_EXIT),
58#undef RDS_CM_EVENT_STRING
59};
60
61static char *rds_cm_event_str(enum rdma_cm_event_type type)
62{
63 return rds_str_array(rds_cm_event_strings,
64 ARRAY_SIZE(rds_cm_event_strings), type);
65};
66
39int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, 67int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
40 struct rdma_cm_event *event) 68 struct rdma_cm_event *event)
41{ 69{
@@ -44,8 +72,8 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
44 struct rds_transport *trans; 72 struct rds_transport *trans;
45 int ret = 0; 73 int ret = 0;
46 74
47 rdsdebug("conn %p id %p handling event %u\n", conn, cm_id, 75 rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id,
48 event->event); 76 event->event, rds_cm_event_str(event->event));
49 77
50 if (cm_id->device->node_type == RDMA_NODE_RNIC) 78 if (cm_id->device->node_type == RDMA_NODE_RNIC)
51 trans = &rds_iw_transport; 79 trans = &rds_iw_transport;
@@ -109,7 +137,8 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
109 137
110 default: 138 default:
111 /* things like device disconnect? */ 139 /* things like device disconnect? */
112 printk(KERN_ERR "RDS: unknown event %u!\n", event->event); 140 printk(KERN_ERR "RDS: unknown event %u (%s)!\n",
141 event->event, rds_cm_event_str(event->event));
113 break; 142 break;
114 } 143 }
115 144
@@ -117,12 +146,13 @@ out:
117 if (conn) 146 if (conn)
118 mutex_unlock(&conn->c_cm_lock); 147 mutex_unlock(&conn->c_cm_lock);
119 148
120 rdsdebug("id %p event %u handling ret %d\n", cm_id, event->event, ret); 149 rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event,
150 rds_cm_event_str(event->event), ret);
121 151
122 return ret; 152 return ret;
123} 153}
124 154
125static int __init rds_rdma_listen_init(void) 155static int rds_rdma_listen_init(void)
126{ 156{
127 struct sockaddr_in sin; 157 struct sockaddr_in sin;
128 struct rdma_cm_id *cm_id; 158 struct rdma_cm_id *cm_id;
@@ -177,7 +207,7 @@ static void rds_rdma_listen_stop(void)
177 } 207 }
178} 208}
179 209
180int __init rds_rdma_init(void) 210int rds_rdma_init(void)
181{ 211{
182 int ret; 212 int ret;
183 213
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c224b5bb3ba9..8103dcf8b976 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -80,6 +80,7 @@ enum {
80/* Bits for c_flags */ 80/* Bits for c_flags */
81#define RDS_LL_SEND_FULL 0 81#define RDS_LL_SEND_FULL 0
82#define RDS_RECONNECT_PENDING 1 82#define RDS_RECONNECT_PENDING 1
83#define RDS_IN_XMIT 2
83 84
84struct rds_connection { 85struct rds_connection {
85 struct hlist_node c_hash_node; 86 struct hlist_node c_hash_node;
@@ -91,12 +92,13 @@ struct rds_connection {
91 struct rds_cong_map *c_lcong; 92 struct rds_cong_map *c_lcong;
92 struct rds_cong_map *c_fcong; 93 struct rds_cong_map *c_fcong;
93 94
94 struct mutex c_send_lock; /* protect send ring */
95 struct rds_message *c_xmit_rm; 95 struct rds_message *c_xmit_rm;
96 unsigned long c_xmit_sg; 96 unsigned long c_xmit_sg;
97 unsigned int c_xmit_hdr_off; 97 unsigned int c_xmit_hdr_off;
98 unsigned int c_xmit_data_off; 98 unsigned int c_xmit_data_off;
99 unsigned int c_xmit_atomic_sent;
99 unsigned int c_xmit_rdma_sent; 100 unsigned int c_xmit_rdma_sent;
101 unsigned int c_xmit_data_sent;
100 102
101 spinlock_t c_lock; /* protect msg queues */ 103 spinlock_t c_lock; /* protect msg queues */
102 u64 c_next_tx_seq; 104 u64 c_next_tx_seq;
@@ -116,11 +118,10 @@ struct rds_connection {
116 struct delayed_work c_conn_w; 118 struct delayed_work c_conn_w;
117 struct work_struct c_down_w; 119 struct work_struct c_down_w;
118 struct mutex c_cm_lock; /* protect conn state & cm */ 120 struct mutex c_cm_lock; /* protect conn state & cm */
121 wait_queue_head_t c_waitq;
119 122
120 struct list_head c_map_item; 123 struct list_head c_map_item;
121 unsigned long c_map_queued; 124 unsigned long c_map_queued;
122 unsigned long c_map_offset;
123 unsigned long c_map_bytes;
124 125
125 unsigned int c_unacked_packets; 126 unsigned int c_unacked_packets;
126 unsigned int c_unacked_bytes; 127 unsigned int c_unacked_bytes;
@@ -206,6 +207,48 @@ struct rds_incoming {
206 rds_rdma_cookie_t i_rdma_cookie; 207 rds_rdma_cookie_t i_rdma_cookie;
207}; 208};
208 209
210struct rds_mr {
211 struct rb_node r_rb_node;
212 atomic_t r_refcount;
213 u32 r_key;
214
215 /* A copy of the creation flags */
216 unsigned int r_use_once:1;
217 unsigned int r_invalidate:1;
218 unsigned int r_write:1;
219
220 /* This is for RDS_MR_DEAD.
221 * It would be nice & consistent to make this part of the above
222 * bit field here, but we need to use test_and_set_bit.
223 */
224 unsigned long r_state;
225 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
226 struct rds_transport *r_trans;
227 void *r_trans_private;
228};
229
230/* Flags for mr->r_state */
231#define RDS_MR_DEAD 0
232
233static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
234{
235 return r_key | (((u64) offset) << 32);
236}
237
238static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
239{
240 return cookie;
241}
242
243static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
244{
245 return cookie >> 32;
246}
247
248/* atomic operation types */
249#define RDS_ATOMIC_TYPE_CSWP 0
250#define RDS_ATOMIC_TYPE_FADD 1
251
209/* 252/*
210 * m_sock_item and m_conn_item are on lists that are serialized under 253 * m_sock_item and m_conn_item are on lists that are serialized under
211 * conn->c_lock. m_sock_item has additional meaning in that once it is empty 254 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
@@ -258,13 +301,71 @@ struct rds_message {
258 * -> rs->rs_lock 301 * -> rs->rs_lock
259 */ 302 */
260 spinlock_t m_rs_lock; 303 spinlock_t m_rs_lock;
304 wait_queue_head_t m_flush_wait;
305
261 struct rds_sock *m_rs; 306 struct rds_sock *m_rs;
262 struct rds_rdma_op *m_rdma_op; 307
308 /* cookie to send to remote, in rds header */
263 rds_rdma_cookie_t m_rdma_cookie; 309 rds_rdma_cookie_t m_rdma_cookie;
264 struct rds_mr *m_rdma_mr; 310
265 unsigned int m_nents; 311 unsigned int m_used_sgs;
266 unsigned int m_count; 312 unsigned int m_total_sgs;
267 struct scatterlist m_sg[0]; 313
314 void *m_final_op;
315
316 struct {
317 struct rm_atomic_op {
318 int op_type;
319 union {
320 struct {
321 uint64_t compare;
322 uint64_t swap;
323 uint64_t compare_mask;
324 uint64_t swap_mask;
325 } op_m_cswp;
326 struct {
327 uint64_t add;
328 uint64_t nocarry_mask;
329 } op_m_fadd;
330 };
331
332 u32 op_rkey;
333 u64 op_remote_addr;
334 unsigned int op_notify:1;
335 unsigned int op_recverr:1;
336 unsigned int op_mapped:1;
337 unsigned int op_silent:1;
338 unsigned int op_active:1;
339 struct scatterlist *op_sg;
340 struct rds_notifier *op_notifier;
341
342 struct rds_mr *op_rdma_mr;
343 } atomic;
344 struct rm_rdma_op {
345 u32 op_rkey;
346 u64 op_remote_addr;
347 unsigned int op_write:1;
348 unsigned int op_fence:1;
349 unsigned int op_notify:1;
350 unsigned int op_recverr:1;
351 unsigned int op_mapped:1;
352 unsigned int op_silent:1;
353 unsigned int op_active:1;
354 unsigned int op_bytes;
355 unsigned int op_nents;
356 unsigned int op_count;
357 struct scatterlist *op_sg;
358 struct rds_notifier *op_notifier;
359
360 struct rds_mr *op_rdma_mr;
361 } rdma;
362 struct rm_data_op {
363 unsigned int op_active:1;
364 unsigned int op_nents;
365 unsigned int op_count;
366 struct scatterlist *op_sg;
367 } data;
368 };
268}; 369};
269 370
270/* 371/*
@@ -305,10 +406,6 @@ struct rds_notifier {
305 * transport is responsible for other serialization, including 406 * transport is responsible for other serialization, including
306 * rds_recv_incoming(). This is called in process context but 407 * rds_recv_incoming(). This is called in process context but
307 * should try hard not to block. 408 * should try hard not to block.
308 *
309 * @xmit_cong_map: This asks the transport to send the local bitmap down the
310 * given connection. XXX get a better story about the bitmap
311 * flag and header.
312 */ 409 */
313 410
314#define RDS_TRANS_IB 0 411#define RDS_TRANS_IB 0
@@ -332,13 +429,11 @@ struct rds_transport {
332 void (*xmit_complete)(struct rds_connection *conn); 429 void (*xmit_complete)(struct rds_connection *conn);
333 int (*xmit)(struct rds_connection *conn, struct rds_message *rm, 430 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
334 unsigned int hdr_off, unsigned int sg, unsigned int off); 431 unsigned int hdr_off, unsigned int sg, unsigned int off);
335 int (*xmit_cong_map)(struct rds_connection *conn, 432 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
336 struct rds_cong_map *map, unsigned long offset); 433 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
337 int (*xmit_rdma)(struct rds_connection *conn, struct rds_rdma_op *op);
338 int (*recv)(struct rds_connection *conn); 434 int (*recv)(struct rds_connection *conn);
339 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov, 435 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov,
340 size_t size); 436 size_t size);
341 void (*inc_purge)(struct rds_incoming *inc);
342 void (*inc_free)(struct rds_incoming *inc); 437 void (*inc_free)(struct rds_incoming *inc);
343 438
344 int (*cm_handle_connect)(struct rdma_cm_id *cm_id, 439 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
@@ -367,17 +462,11 @@ struct rds_sock {
367 * bound_addr used for both incoming and outgoing, no INADDR_ANY 462 * bound_addr used for both incoming and outgoing, no INADDR_ANY
368 * support. 463 * support.
369 */ 464 */
370 struct rb_node rs_bound_node; 465 struct hlist_node rs_bound_node;
371 __be32 rs_bound_addr; 466 __be32 rs_bound_addr;
372 __be32 rs_conn_addr; 467 __be32 rs_conn_addr;
373 __be16 rs_bound_port; 468 __be16 rs_bound_port;
374 __be16 rs_conn_port; 469 __be16 rs_conn_port;
375
376 /*
377 * This is only used to communicate the transport between bind and
378 * initiating connections. All other trans use is referenced through
379 * the connection.
380 */
381 struct rds_transport *rs_transport; 470 struct rds_transport *rs_transport;
382 471
383 /* 472 /*
@@ -466,8 +555,8 @@ struct rds_statistics {
466 uint64_t s_recv_ping; 555 uint64_t s_recv_ping;
467 uint64_t s_send_queue_empty; 556 uint64_t s_send_queue_empty;
468 uint64_t s_send_queue_full; 557 uint64_t s_send_queue_full;
469 uint64_t s_send_sem_contention; 558 uint64_t s_send_lock_contention;
470 uint64_t s_send_sem_queue_raced; 559 uint64_t s_send_lock_queue_raced;
471 uint64_t s_send_immediate_retry; 560 uint64_t s_send_immediate_retry;
472 uint64_t s_send_delayed_retry; 561 uint64_t s_send_delayed_retry;
473 uint64_t s_send_drop_acked; 562 uint64_t s_send_drop_acked;
@@ -487,6 +576,7 @@ struct rds_statistics {
487}; 576};
488 577
489/* af_rds.c */ 578/* af_rds.c */
579char *rds_str_array(char **array, size_t elements, size_t index);
490void rds_sock_addref(struct rds_sock *rs); 580void rds_sock_addref(struct rds_sock *rs);
491void rds_sock_put(struct rds_sock *rs); 581void rds_sock_put(struct rds_sock *rs);
492void rds_wake_sk_sleep(struct rds_sock *rs); 582void rds_wake_sk_sleep(struct rds_sock *rs);
@@ -521,15 +611,17 @@ void rds_cong_exit(void);
521struct rds_message *rds_cong_update_alloc(struct rds_connection *conn); 611struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
522 612
523/* conn.c */ 613/* conn.c */
524int __init rds_conn_init(void); 614int rds_conn_init(void);
525void rds_conn_exit(void); 615void rds_conn_exit(void);
526struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr, 616struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
527 struct rds_transport *trans, gfp_t gfp); 617 struct rds_transport *trans, gfp_t gfp);
528struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, 618struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
529 struct rds_transport *trans, gfp_t gfp); 619 struct rds_transport *trans, gfp_t gfp);
620void rds_conn_shutdown(struct rds_connection *conn);
530void rds_conn_destroy(struct rds_connection *conn); 621void rds_conn_destroy(struct rds_connection *conn);
531void rds_conn_reset(struct rds_connection *conn); 622void rds_conn_reset(struct rds_connection *conn);
532void rds_conn_drop(struct rds_connection *conn); 623void rds_conn_drop(struct rds_connection *conn);
624void rds_conn_connect_if_down(struct rds_connection *conn);
533void rds_for_each_conn_info(struct socket *sock, unsigned int len, 625void rds_for_each_conn_info(struct socket *sock, unsigned int len,
534 struct rds_info_iterator *iter, 626 struct rds_info_iterator *iter,
535 struct rds_info_lengths *lens, 627 struct rds_info_lengths *lens,
@@ -566,7 +658,8 @@ rds_conn_connecting(struct rds_connection *conn)
566 658
567/* message.c */ 659/* message.c */
568struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp); 660struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
569struct rds_message *rds_message_copy_from_user(struct iovec *first_iov, 661struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
662int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
570 size_t total_len); 663 size_t total_len);
571struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len); 664struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
572void rds_message_populate_header(struct rds_header *hdr, __be16 sport, 665void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
@@ -580,7 +673,6 @@ int rds_message_get_version_extension(struct rds_header *hdr, unsigned int *vers
580int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset); 673int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
581int rds_message_inc_copy_to_user(struct rds_incoming *inc, 674int rds_message_inc_copy_to_user(struct rds_incoming *inc,
582 struct iovec *first_iov, size_t size); 675 struct iovec *first_iov, size_t size);
583void rds_message_inc_purge(struct rds_incoming *inc);
584void rds_message_inc_free(struct rds_incoming *inc); 676void rds_message_inc_free(struct rds_incoming *inc);
585void rds_message_addref(struct rds_message *rm); 677void rds_message_addref(struct rds_message *rm);
586void rds_message_put(struct rds_message *rm); 678void rds_message_put(struct rds_message *rm);
@@ -636,14 +728,39 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
636typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack); 728typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
637void rds_send_drop_acked(struct rds_connection *conn, u64 ack, 729void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
638 is_acked_func is_acked); 730 is_acked_func is_acked);
639int rds_send_acked_before(struct rds_connection *conn, u64 seq);
640void rds_send_remove_from_sock(struct list_head *messages, int status); 731void rds_send_remove_from_sock(struct list_head *messages, int status);
641int rds_send_pong(struct rds_connection *conn, __be16 dport); 732int rds_send_pong(struct rds_connection *conn, __be16 dport);
642struct rds_message *rds_send_get_message(struct rds_connection *, 733struct rds_message *rds_send_get_message(struct rds_connection *,
643 struct rds_rdma_op *); 734 struct rm_rdma_op *);
644 735
645/* rdma.c */ 736/* rdma.c */
646void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); 737void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
738int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
739int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
740int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
741void rds_rdma_drop_keys(struct rds_sock *rs);
742int rds_rdma_extra_size(struct rds_rdma_args *args);
743int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
744 struct cmsghdr *cmsg);
745int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
746 struct cmsghdr *cmsg);
747int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
748 struct cmsghdr *cmsg);
749int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
750 struct cmsghdr *cmsg);
751void rds_rdma_free_op(struct rm_rdma_op *ro);
752void rds_atomic_free_op(struct rm_atomic_op *ao);
753void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
754void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
755int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
756 struct cmsghdr *cmsg);
757
758extern void __rds_put_mr_final(struct rds_mr *mr);
759static inline void rds_mr_put(struct rds_mr *mr)
760{
761 if (atomic_dec_and_test(&mr->r_refcount))
762 __rds_put_mr_final(mr);
763}
647 764
648/* stats.c */ 765/* stats.c */
649DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats); 766DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
@@ -657,14 +774,14 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
657 put_cpu(); \ 774 put_cpu(); \
658} while (0) 775} while (0)
659#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count) 776#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
660int __init rds_stats_init(void); 777int rds_stats_init(void);
661void rds_stats_exit(void); 778void rds_stats_exit(void);
662void rds_stats_info_copy(struct rds_info_iterator *iter, 779void rds_stats_info_copy(struct rds_info_iterator *iter,
663 uint64_t *values, const char *const *names, 780 uint64_t *values, const char *const *names,
664 size_t nr); 781 size_t nr);
665 782
666/* sysctl.c */ 783/* sysctl.c */
667int __init rds_sysctl_init(void); 784int rds_sysctl_init(void);
668void rds_sysctl_exit(void); 785void rds_sysctl_exit(void);
669extern unsigned long rds_sysctl_sndbuf_min; 786extern unsigned long rds_sysctl_sndbuf_min;
670extern unsigned long rds_sysctl_sndbuf_default; 787extern unsigned long rds_sysctl_sndbuf_default;
@@ -678,9 +795,10 @@ extern unsigned long rds_sysctl_trace_flags;
678extern unsigned int rds_sysctl_trace_level; 795extern unsigned int rds_sysctl_trace_level;
679 796
680/* threads.c */ 797/* threads.c */
681int __init rds_threads_init(void); 798int rds_threads_init(void);
682void rds_threads_exit(void); 799void rds_threads_exit(void);
683extern struct workqueue_struct *rds_wq; 800extern struct workqueue_struct *rds_wq;
801void rds_queue_reconnect(struct rds_connection *conn);
684void rds_connect_worker(struct work_struct *); 802void rds_connect_worker(struct work_struct *);
685void rds_shutdown_worker(struct work_struct *); 803void rds_shutdown_worker(struct work_struct *);
686void rds_send_worker(struct work_struct *); 804void rds_send_worker(struct work_struct *);
@@ -691,9 +809,10 @@ void rds_connect_complete(struct rds_connection *conn);
691int rds_trans_register(struct rds_transport *trans); 809int rds_trans_register(struct rds_transport *trans);
692void rds_trans_unregister(struct rds_transport *trans); 810void rds_trans_unregister(struct rds_transport *trans);
693struct rds_transport *rds_trans_get_preferred(__be32 addr); 811struct rds_transport *rds_trans_get_preferred(__be32 addr);
812void rds_trans_put(struct rds_transport *trans);
694unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, 813unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
695 unsigned int avail); 814 unsigned int avail);
696int __init rds_trans_init(void); 815int rds_trans_init(void);
697void rds_trans_exit(void); 816void rds_trans_exit(void);
698 817
699#endif 818#endif
diff --git a/net/rds/recv.c b/net/rds/recv.c
index c93588c2d553..68800f02aa30 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -36,7 +36,6 @@
36#include <linux/in.h> 36#include <linux/in.h>
37 37
38#include "rds.h" 38#include "rds.h"
39#include "rdma.h"
40 39
41void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, 40void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
42 __be32 saddr) 41 __be32 saddr)
@@ -210,7 +209,7 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
210 } 209 }
211 210
212 rs = rds_find_bound(daddr, inc->i_hdr.h_dport); 211 rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
213 if (rs == NULL) { 212 if (!rs) {
214 rds_stats_inc(s_recv_drop_no_sock); 213 rds_stats_inc(s_recv_drop_no_sock);
215 goto out; 214 goto out;
216 } 215 }
@@ -251,7 +250,7 @@ static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
251{ 250{
252 unsigned long flags; 251 unsigned long flags;
253 252
254 if (*inc == NULL) { 253 if (!*inc) {
255 read_lock_irqsave(&rs->rs_recv_lock, flags); 254 read_lock_irqsave(&rs->rs_recv_lock, flags);
256 if (!list_empty(&rs->rs_recv_queue)) { 255 if (!list_empty(&rs->rs_recv_queue)) {
257 *inc = list_entry(rs->rs_recv_queue.next, 256 *inc = list_entry(rs->rs_recv_queue.next,
@@ -334,10 +333,10 @@ int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
334 333
335 if (msghdr) { 334 if (msghdr) {
336 cmsg.user_token = notifier->n_user_token; 335 cmsg.user_token = notifier->n_user_token;
337 cmsg.status = notifier->n_status; 336 cmsg.status = notifier->n_status;
338 337
339 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS, 338 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
340 sizeof(cmsg), &cmsg); 339 sizeof(cmsg), &cmsg);
341 if (err) 340 if (err)
342 break; 341 break;
343 } 342 }
diff --git a/net/rds/send.c b/net/rds/send.c
index 9c1c6bcaa6c9..9b951a0ab6b7 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -37,7 +37,6 @@
37#include <linux/list.h> 37#include <linux/list.h>
38 38
39#include "rds.h" 39#include "rds.h"
40#include "rdma.h"
41 40
42/* When transmitting messages in rds_send_xmit, we need to emerge from 41/* When transmitting messages in rds_send_xmit, we need to emerge from
43 * time to time and briefly release the CPU. Otherwise the softlock watchdog 42 * time to time and briefly release the CPU. Otherwise the softlock watchdog
@@ -54,7 +53,8 @@ module_param(send_batch_count, int, 0444);
54MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); 53MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
55 54
56/* 55/*
57 * Reset the send state. Caller must hold c_send_lock when calling here. 56 * Reset the send state. Callers must ensure that this doesn't race with
57 * rds_send_xmit().
58 */ 58 */
59void rds_send_reset(struct rds_connection *conn) 59void rds_send_reset(struct rds_connection *conn)
60{ 60{
@@ -62,18 +62,22 @@ void rds_send_reset(struct rds_connection *conn)
62 unsigned long flags; 62 unsigned long flags;
63 63
64 if (conn->c_xmit_rm) { 64 if (conn->c_xmit_rm) {
65 rm = conn->c_xmit_rm;
66 conn->c_xmit_rm = NULL;
65 /* Tell the user the RDMA op is no longer mapped by the 67 /* Tell the user the RDMA op is no longer mapped by the
66 * transport. This isn't entirely true (it's flushed out 68 * transport. This isn't entirely true (it's flushed out
67 * independently) but as the connection is down, there's 69 * independently) but as the connection is down, there's
68 * no ongoing RDMA to/from that memory */ 70 * no ongoing RDMA to/from that memory */
69 rds_message_unmapped(conn->c_xmit_rm); 71 rds_message_unmapped(rm);
70 rds_message_put(conn->c_xmit_rm); 72 rds_message_put(rm);
71 conn->c_xmit_rm = NULL;
72 } 73 }
74
73 conn->c_xmit_sg = 0; 75 conn->c_xmit_sg = 0;
74 conn->c_xmit_hdr_off = 0; 76 conn->c_xmit_hdr_off = 0;
75 conn->c_xmit_data_off = 0; 77 conn->c_xmit_data_off = 0;
78 conn->c_xmit_atomic_sent = 0;
76 conn->c_xmit_rdma_sent = 0; 79 conn->c_xmit_rdma_sent = 0;
80 conn->c_xmit_data_sent = 0;
77 81
78 conn->c_map_queued = 0; 82 conn->c_map_queued = 0;
79 83
@@ -90,6 +94,25 @@ void rds_send_reset(struct rds_connection *conn)
90 spin_unlock_irqrestore(&conn->c_lock, flags); 94 spin_unlock_irqrestore(&conn->c_lock, flags);
91} 95}
92 96
97static int acquire_in_xmit(struct rds_connection *conn)
98{
99 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
100}
101
102static void release_in_xmit(struct rds_connection *conn)
103{
104 clear_bit(RDS_IN_XMIT, &conn->c_flags);
105 smp_mb__after_clear_bit();
106 /*
107 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
108 * hot path and finding waiters is very rare. We don't want to walk
109 * the system-wide hashed waitqueue buckets in the fast path only to
110 * almost never find waiters.
111 */
112 if (waitqueue_active(&conn->c_waitq))
113 wake_up_all(&conn->c_waitq);
114}
115
93/* 116/*
94 * We're making the concious trade-off here to only send one message 117 * We're making the concious trade-off here to only send one message
95 * down the connection at a time. 118 * down the connection at a time.
@@ -109,102 +132,69 @@ int rds_send_xmit(struct rds_connection *conn)
109 struct rds_message *rm; 132 struct rds_message *rm;
110 unsigned long flags; 133 unsigned long flags;
111 unsigned int tmp; 134 unsigned int tmp;
112 unsigned int send_quota = send_batch_count;
113 struct scatterlist *sg; 135 struct scatterlist *sg;
114 int ret = 0; 136 int ret = 0;
115 int was_empty = 0;
116 LIST_HEAD(to_be_dropped); 137 LIST_HEAD(to_be_dropped);
117 138
139restart:
140
118 /* 141 /*
119 * sendmsg calls here after having queued its message on the send 142 * sendmsg calls here after having queued its message on the send
120 * queue. We only have one task feeding the connection at a time. If 143 * queue. We only have one task feeding the connection at a time. If
121 * another thread is already feeding the queue then we back off. This 144 * another thread is already feeding the queue then we back off. This
122 * avoids blocking the caller and trading per-connection data between 145 * avoids blocking the caller and trading per-connection data between
123 * caches per message. 146 * caches per message.
124 *
125 * The sem holder will issue a retry if they notice that someone queued
126 * a message after they stopped walking the send queue but before they
127 * dropped the sem.
128 */ 147 */
129 if (!mutex_trylock(&conn->c_send_lock)) { 148 if (!acquire_in_xmit(conn)) {
130 rds_stats_inc(s_send_sem_contention); 149 rds_stats_inc(s_send_lock_contention);
131 ret = -ENOMEM; 150 ret = -ENOMEM;
132 goto out; 151 goto out;
133 } 152 }
134 153
154 /*
155 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
156 * we do the opposite to avoid races.
157 */
158 if (!rds_conn_up(conn)) {
159 release_in_xmit(conn);
160 ret = 0;
161 goto out;
162 }
163
135 if (conn->c_trans->xmit_prepare) 164 if (conn->c_trans->xmit_prepare)
136 conn->c_trans->xmit_prepare(conn); 165 conn->c_trans->xmit_prepare(conn);
137 166
138 /* 167 /*
139 * spin trying to push headers and data down the connection until 168 * spin trying to push headers and data down the connection until
140 * the connection doens't make forward progress. 169 * the connection doesn't make forward progress.
141 */ 170 */
142 while (--send_quota) { 171 while (1) {
143 /*
144 * See if need to send a congestion map update if we're
145 * between sending messages. The send_sem protects our sole
146 * use of c_map_offset and _bytes.
147 * Note this is used only by transports that define a special
148 * xmit_cong_map function. For all others, we create allocate
149 * a cong_map message and treat it just like any other send.
150 */
151 if (conn->c_map_bytes) {
152 ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
153 conn->c_map_offset);
154 if (ret <= 0)
155 break;
156 172
157 conn->c_map_offset += ret;
158 conn->c_map_bytes -= ret;
159 if (conn->c_map_bytes)
160 continue;
161 }
162
163 /* If we're done sending the current message, clear the
164 * offset and S/G temporaries.
165 */
166 rm = conn->c_xmit_rm; 173 rm = conn->c_xmit_rm;
167 if (rm != NULL &&
168 conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
169 conn->c_xmit_sg == rm->m_nents) {
170 conn->c_xmit_rm = NULL;
171 conn->c_xmit_sg = 0;
172 conn->c_xmit_hdr_off = 0;
173 conn->c_xmit_data_off = 0;
174 conn->c_xmit_rdma_sent = 0;
175 174
176 /* Release the reference to the previous message. */ 175 /*
177 rds_message_put(rm); 176 * If between sending messages, we can send a pending congestion
178 rm = NULL; 177 * map update.
179 }
180
181 /* If we're asked to send a cong map update, do so.
182 */ 178 */
183 if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) { 179 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
184 if (conn->c_trans->xmit_cong_map != NULL) {
185 conn->c_map_offset = 0;
186 conn->c_map_bytes = sizeof(struct rds_header) +
187 RDS_CONG_MAP_BYTES;
188 continue;
189 }
190
191 rm = rds_cong_update_alloc(conn); 180 rm = rds_cong_update_alloc(conn);
192 if (IS_ERR(rm)) { 181 if (IS_ERR(rm)) {
193 ret = PTR_ERR(rm); 182 ret = PTR_ERR(rm);
194 break; 183 break;
195 } 184 }
185 rm->data.op_active = 1;
196 186
197 conn->c_xmit_rm = rm; 187 conn->c_xmit_rm = rm;
198 } 188 }
199 189
200 /* 190 /*
201 * Grab the next message from the send queue, if there is one. 191 * If not already working on one, grab the next message.
202 * 192 *
203 * c_xmit_rm holds a ref while we're sending this message down 193 * c_xmit_rm holds a ref while we're sending this message down
204 * the connction. We can use this ref while holding the 194 * the connction. We can use this ref while holding the
205 * send_sem.. rds_send_reset() is serialized with it. 195 * send_sem.. rds_send_reset() is serialized with it.
206 */ 196 */
207 if (rm == NULL) { 197 if (!rm) {
208 unsigned int len; 198 unsigned int len;
209 199
210 spin_lock_irqsave(&conn->c_lock, flags); 200 spin_lock_irqsave(&conn->c_lock, flags);
@@ -224,10 +214,8 @@ int rds_send_xmit(struct rds_connection *conn)
224 214
225 spin_unlock_irqrestore(&conn->c_lock, flags); 215 spin_unlock_irqrestore(&conn->c_lock, flags);
226 216
227 if (rm == NULL) { 217 if (!rm)
228 was_empty = 1;
229 break; 218 break;
230 }
231 219
232 /* Unfortunately, the way Infiniband deals with 220 /* Unfortunately, the way Infiniband deals with
233 * RDMA to a bad MR key is by moving the entire 221 * RDMA to a bad MR key is by moving the entire
@@ -236,13 +224,12 @@ int rds_send_xmit(struct rds_connection *conn)
236 * connection. 224 * connection.
237 * Therefore, we never retransmit messages with RDMA ops. 225 * Therefore, we never retransmit messages with RDMA ops.
238 */ 226 */
239 if (rm->m_rdma_op && 227 if (rm->rdma.op_active &&
240 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { 228 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
241 spin_lock_irqsave(&conn->c_lock, flags); 229 spin_lock_irqsave(&conn->c_lock, flags);
242 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) 230 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
243 list_move(&rm->m_conn_item, &to_be_dropped); 231 list_move(&rm->m_conn_item, &to_be_dropped);
244 spin_unlock_irqrestore(&conn->c_lock, flags); 232 spin_unlock_irqrestore(&conn->c_lock, flags);
245 rds_message_put(rm);
246 continue; 233 continue;
247 } 234 }
248 235
@@ -263,23 +250,55 @@ int rds_send_xmit(struct rds_connection *conn)
263 conn->c_xmit_rm = rm; 250 conn->c_xmit_rm = rm;
264 } 251 }
265 252
266 /* 253 /* The transport either sends the whole rdma or none of it */
267 * Try and send an rdma message. Let's see if we can 254 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
268 * keep this simple and require that the transport either 255 rm->m_final_op = &rm->rdma;
269 * send the whole rdma or none of it. 256 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
270 */
271 if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) {
272 ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op);
273 if (ret) 257 if (ret)
274 break; 258 break;
275 conn->c_xmit_rdma_sent = 1; 259 conn->c_xmit_rdma_sent = 1;
260
276 /* The transport owns the mapped memory for now. 261 /* The transport owns the mapped memory for now.
277 * You can't unmap it while it's on the send queue */ 262 * You can't unmap it while it's on the send queue */
278 set_bit(RDS_MSG_MAPPED, &rm->m_flags); 263 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
279 } 264 }
280 265
281 if (conn->c_xmit_hdr_off < sizeof(struct rds_header) || 266 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
282 conn->c_xmit_sg < rm->m_nents) { 267 rm->m_final_op = &rm->atomic;
268 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
269 if (ret)
270 break;
271 conn->c_xmit_atomic_sent = 1;
272
273 /* The transport owns the mapped memory for now.
274 * You can't unmap it while it's on the send queue */
275 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
276 }
277
278 /*
279 * A number of cases require an RDS header to be sent
280 * even if there is no data.
281 * We permit 0-byte sends; rds-ping depends on this.
282 * However, if there are exclusively attached silent ops,
283 * we skip the hdr/data send, to enable silent operation.
284 */
285 if (rm->data.op_nents == 0) {
286 int ops_present;
287 int all_ops_are_silent = 1;
288
289 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
290 if (rm->atomic.op_active && !rm->atomic.op_silent)
291 all_ops_are_silent = 0;
292 if (rm->rdma.op_active && !rm->rdma.op_silent)
293 all_ops_are_silent = 0;
294
295 if (ops_present && all_ops_are_silent
296 && !rm->m_rdma_cookie)
297 rm->data.op_active = 0;
298 }
299
300 if (rm->data.op_active && !conn->c_xmit_data_sent) {
301 rm->m_final_op = &rm->data;
283 ret = conn->c_trans->xmit(conn, rm, 302 ret = conn->c_trans->xmit(conn, rm,
284 conn->c_xmit_hdr_off, 303 conn->c_xmit_hdr_off,
285 conn->c_xmit_sg, 304 conn->c_xmit_sg,
@@ -295,7 +314,7 @@ int rds_send_xmit(struct rds_connection *conn)
295 ret -= tmp; 314 ret -= tmp;
296 } 315 }
297 316
298 sg = &rm->m_sg[conn->c_xmit_sg]; 317 sg = &rm->data.op_sg[conn->c_xmit_sg];
299 while (ret) { 318 while (ret) {
300 tmp = min_t(int, ret, sg->length - 319 tmp = min_t(int, ret, sg->length -
301 conn->c_xmit_data_off); 320 conn->c_xmit_data_off);
@@ -306,49 +325,63 @@ int rds_send_xmit(struct rds_connection *conn)
306 sg++; 325 sg++;
307 conn->c_xmit_sg++; 326 conn->c_xmit_sg++;
308 BUG_ON(ret != 0 && 327 BUG_ON(ret != 0 &&
309 conn->c_xmit_sg == rm->m_nents); 328 conn->c_xmit_sg == rm->data.op_nents);
310 } 329 }
311 } 330 }
331
332 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
333 (conn->c_xmit_sg == rm->data.op_nents))
334 conn->c_xmit_data_sent = 1;
312 } 335 }
313 }
314 336
315 /* Nuke any messages we decided not to retransmit. */ 337 /*
316 if (!list_empty(&to_be_dropped)) 338 * A rm will only take multiple times through this loop
317 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); 339 * if there is a data op. Thus, if the data is sent (or there was
340 * none), then we're done with the rm.
341 */
342 if (!rm->data.op_active || conn->c_xmit_data_sent) {
343 conn->c_xmit_rm = NULL;
344 conn->c_xmit_sg = 0;
345 conn->c_xmit_hdr_off = 0;
346 conn->c_xmit_data_off = 0;
347 conn->c_xmit_rdma_sent = 0;
348 conn->c_xmit_atomic_sent = 0;
349 conn->c_xmit_data_sent = 0;
350
351 rds_message_put(rm);
352 }
353 }
318 354
319 if (conn->c_trans->xmit_complete) 355 if (conn->c_trans->xmit_complete)
320 conn->c_trans->xmit_complete(conn); 356 conn->c_trans->xmit_complete(conn);
321 357
322 /* 358 release_in_xmit(conn);
323 * We might be racing with another sender who queued a message but
324 * backed off on noticing that we held the c_send_lock. If we check
325 * for queued messages after dropping the sem then either we'll
326 * see the queued message or the queuer will get the sem. If we
327 * notice the queued message then we trigger an immediate retry.
328 *
329 * We need to be careful only to do this when we stopped processing
330 * the send queue because it was empty. It's the only way we
331 * stop processing the loop when the transport hasn't taken
332 * responsibility for forward progress.
333 */
334 mutex_unlock(&conn->c_send_lock);
335 359
336 if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) { 360 /* Nuke any messages we decided not to retransmit. */
337 /* We exhausted the send quota, but there's work left to 361 if (!list_empty(&to_be_dropped)) {
338 * do. Return and (re-)schedule the send worker. 362 /* irqs on here, so we can put(), unlike above */
339 */ 363 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
340 ret = -EAGAIN; 364 rds_message_put(rm);
365 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
341 } 366 }
342 367
343 if (ret == 0 && was_empty) { 368 /*
344 /* A simple bit test would be way faster than taking the 369 * Other senders can queue a message after we last test the send queue
345 * spin lock */ 370 * but before we clear RDS_IN_XMIT. In that case they'd back off and
346 spin_lock_irqsave(&conn->c_lock, flags); 371 * not try and send their newly queued message. We need to check the
372 * send queue after having cleared RDS_IN_XMIT so that their message
373 * doesn't get stuck on the send queue.
374 *
375 * If the transport cannot continue (i.e ret != 0), then it must
376 * call us when more room is available, such as from the tx
377 * completion handler.
378 */
379 if (ret == 0) {
380 smp_mb();
347 if (!list_empty(&conn->c_send_queue)) { 381 if (!list_empty(&conn->c_send_queue)) {
348 rds_stats_inc(s_send_sem_queue_raced); 382 rds_stats_inc(s_send_lock_queue_raced);
349 ret = -EAGAIN; 383 goto restart;
350 } 384 }
351 spin_unlock_irqrestore(&conn->c_lock, flags);
352 } 385 }
353out: 386out:
354 return ret; 387 return ret;
@@ -376,52 +409,60 @@ static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
376} 409}
377 410
378/* 411/*
379 * Returns true if there are no messages on the send and retransmit queues 412 * This is pretty similar to what happens below in the ACK
380 * which have a sequence number greater than or equal to the given sequence 413 * handling code - except that we call here as soon as we get
381 * number. 414 * the IB send completion on the RDMA op and the accompanying
415 * message.
382 */ 416 */
383int rds_send_acked_before(struct rds_connection *conn, u64 seq) 417void rds_rdma_send_complete(struct rds_message *rm, int status)
384{ 418{
385 struct rds_message *rm, *tmp; 419 struct rds_sock *rs = NULL;
386 int ret = 1; 420 struct rm_rdma_op *ro;
421 struct rds_notifier *notifier;
422 unsigned long flags;
387 423
388 spin_lock(&conn->c_lock); 424 spin_lock_irqsave(&rm->m_rs_lock, flags);
389 425
390 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { 426 ro = &rm->rdma;
391 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq) 427 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
392 ret = 0; 428 ro->op_active && ro->op_notify && ro->op_notifier) {
393 break; 429 notifier = ro->op_notifier;
394 } 430 rs = rm->m_rs;
431 sock_hold(rds_rs_to_sk(rs));
395 432
396 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { 433 notifier->n_status = status;
397 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq) 434 spin_lock(&rs->rs_lock);
398 ret = 0; 435 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
399 break; 436 spin_unlock(&rs->rs_lock);
437
438 ro->op_notifier = NULL;
400 } 439 }
401 440
402 spin_unlock(&conn->c_lock); 441 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
403 442
404 return ret; 443 if (rs) {
444 rds_wake_sk_sleep(rs);
445 sock_put(rds_rs_to_sk(rs));
446 }
405} 447}
448EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
406 449
407/* 450/*
408 * This is pretty similar to what happens below in the ACK 451 * Just like above, except looks at atomic op
409 * handling code - except that we call here as soon as we get
410 * the IB send completion on the RDMA op and the accompanying
411 * message.
412 */ 452 */
413void rds_rdma_send_complete(struct rds_message *rm, int status) 453void rds_atomic_send_complete(struct rds_message *rm, int status)
414{ 454{
415 struct rds_sock *rs = NULL; 455 struct rds_sock *rs = NULL;
416 struct rds_rdma_op *ro; 456 struct rm_atomic_op *ao;
417 struct rds_notifier *notifier; 457 struct rds_notifier *notifier;
458 unsigned long flags;
418 459
419 spin_lock(&rm->m_rs_lock); 460 spin_lock_irqsave(&rm->m_rs_lock, flags);
420 461
421 ro = rm->m_rdma_op; 462 ao = &rm->atomic;
422 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && 463 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
423 ro && ro->r_notify && ro->r_notifier) { 464 && ao->op_active && ao->op_notify && ao->op_notifier) {
424 notifier = ro->r_notifier; 465 notifier = ao->op_notifier;
425 rs = rm->m_rs; 466 rs = rm->m_rs;
426 sock_hold(rds_rs_to_sk(rs)); 467 sock_hold(rds_rs_to_sk(rs));
427 468
@@ -430,17 +471,17 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
430 list_add_tail(&notifier->n_list, &rs->rs_notify_queue); 471 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
431 spin_unlock(&rs->rs_lock); 472 spin_unlock(&rs->rs_lock);
432 473
433 ro->r_notifier = NULL; 474 ao->op_notifier = NULL;
434 } 475 }
435 476
436 spin_unlock(&rm->m_rs_lock); 477 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
437 478
438 if (rs) { 479 if (rs) {
439 rds_wake_sk_sleep(rs); 480 rds_wake_sk_sleep(rs);
440 sock_put(rds_rs_to_sk(rs)); 481 sock_put(rds_rs_to_sk(rs));
441 } 482 }
442} 483}
443EXPORT_SYMBOL_GPL(rds_rdma_send_complete); 484EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
444 485
445/* 486/*
446 * This is the same as rds_rdma_send_complete except we 487 * This is the same as rds_rdma_send_complete except we
@@ -448,15 +489,23 @@ EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
448 * socket, socket lock) and can just move the notifier. 489 * socket, socket lock) and can just move the notifier.
449 */ 490 */
450static inline void 491static inline void
451__rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) 492__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
452{ 493{
453 struct rds_rdma_op *ro; 494 struct rm_rdma_op *ro;
495 struct rm_atomic_op *ao;
496
497 ro = &rm->rdma;
498 if (ro->op_active && ro->op_notify && ro->op_notifier) {
499 ro->op_notifier->n_status = status;
500 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
501 ro->op_notifier = NULL;
502 }
454 503
455 ro = rm->m_rdma_op; 504 ao = &rm->atomic;
456 if (ro && ro->r_notify && ro->r_notifier) { 505 if (ao->op_active && ao->op_notify && ao->op_notifier) {
457 ro->r_notifier->n_status = status; 506 ao->op_notifier->n_status = status;
458 list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue); 507 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
459 ro->r_notifier = NULL; 508 ao->op_notifier = NULL;
460 } 509 }
461 510
462 /* No need to wake the app - caller does this */ 511 /* No need to wake the app - caller does this */
@@ -468,7 +517,7 @@ __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status
468 * So speed is not an issue here. 517 * So speed is not an issue here.
469 */ 518 */
470struct rds_message *rds_send_get_message(struct rds_connection *conn, 519struct rds_message *rds_send_get_message(struct rds_connection *conn,
471 struct rds_rdma_op *op) 520 struct rm_rdma_op *op)
472{ 521{
473 struct rds_message *rm, *tmp, *found = NULL; 522 struct rds_message *rm, *tmp, *found = NULL;
474 unsigned long flags; 523 unsigned long flags;
@@ -476,7 +525,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
476 spin_lock_irqsave(&conn->c_lock, flags); 525 spin_lock_irqsave(&conn->c_lock, flags);
477 526
478 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { 527 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
479 if (rm->m_rdma_op == op) { 528 if (&rm->rdma == op) {
480 atomic_inc(&rm->m_refcount); 529 atomic_inc(&rm->m_refcount);
481 found = rm; 530 found = rm;
482 goto out; 531 goto out;
@@ -484,7 +533,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
484 } 533 }
485 534
486 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { 535 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
487 if (rm->m_rdma_op == op) { 536 if (&rm->rdma == op) {
488 atomic_inc(&rm->m_refcount); 537 atomic_inc(&rm->m_refcount);
489 found = rm; 538 found = rm;
490 break; 539 break;
@@ -544,19 +593,20 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
544 spin_lock(&rs->rs_lock); 593 spin_lock(&rs->rs_lock);
545 594
546 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { 595 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
547 struct rds_rdma_op *ro = rm->m_rdma_op; 596 struct rm_rdma_op *ro = &rm->rdma;
548 struct rds_notifier *notifier; 597 struct rds_notifier *notifier;
549 598
550 list_del_init(&rm->m_sock_item); 599 list_del_init(&rm->m_sock_item);
551 rds_send_sndbuf_remove(rs, rm); 600 rds_send_sndbuf_remove(rs, rm);
552 601
553 if (ro && ro->r_notifier && (status || ro->r_notify)) { 602 if (ro->op_active && ro->op_notifier &&
554 notifier = ro->r_notifier; 603 (ro->op_notify || (ro->op_recverr && status))) {
604 notifier = ro->op_notifier;
555 list_add_tail(&notifier->n_list, 605 list_add_tail(&notifier->n_list,
556 &rs->rs_notify_queue); 606 &rs->rs_notify_queue);
557 if (!notifier->n_status) 607 if (!notifier->n_status)
558 notifier->n_status = status; 608 notifier->n_status = status;
559 rm->m_rdma_op->r_notifier = NULL; 609 rm->rdma.op_notifier = NULL;
560 } 610 }
561 was_on_sock = 1; 611 was_on_sock = 1;
562 rm->m_rs = NULL; 612 rm->m_rs = NULL;
@@ -619,9 +669,8 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
619{ 669{
620 struct rds_message *rm, *tmp; 670 struct rds_message *rm, *tmp;
621 struct rds_connection *conn; 671 struct rds_connection *conn;
622 unsigned long flags, flags2; 672 unsigned long flags;
623 LIST_HEAD(list); 673 LIST_HEAD(list);
624 int wake = 0;
625 674
626 /* get all the messages we're dropping under the rs lock */ 675 /* get all the messages we're dropping under the rs lock */
627 spin_lock_irqsave(&rs->rs_lock, flags); 676 spin_lock_irqsave(&rs->rs_lock, flags);
@@ -631,59 +680,54 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
631 dest->sin_port != rm->m_inc.i_hdr.h_dport)) 680 dest->sin_port != rm->m_inc.i_hdr.h_dport))
632 continue; 681 continue;
633 682
634 wake = 1;
635 list_move(&rm->m_sock_item, &list); 683 list_move(&rm->m_sock_item, &list);
636 rds_send_sndbuf_remove(rs, rm); 684 rds_send_sndbuf_remove(rs, rm);
637 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); 685 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
638 } 686 }
639 687
640 /* order flag updates with the rs lock */ 688 /* order flag updates with the rs lock */
641 if (wake) 689 smp_mb__after_clear_bit();
642 smp_mb__after_clear_bit();
643 690
644 spin_unlock_irqrestore(&rs->rs_lock, flags); 691 spin_unlock_irqrestore(&rs->rs_lock, flags);
645 692
646 conn = NULL; 693 if (list_empty(&list))
694 return;
647 695
648 /* now remove the messages from the conn list as needed */ 696 /* Remove the messages from the conn */
649 list_for_each_entry(rm, &list, m_sock_item) { 697 list_for_each_entry(rm, &list, m_sock_item) {
650 /* We do this here rather than in the loop above, so that
651 * we don't have to nest m_rs_lock under rs->rs_lock */
652 spin_lock_irqsave(&rm->m_rs_lock, flags2);
653 /* If this is a RDMA operation, notify the app. */
654 spin_lock(&rs->rs_lock);
655 __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
656 spin_unlock(&rs->rs_lock);
657 rm->m_rs = NULL;
658 spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
659 698
699 conn = rm->m_inc.i_conn;
700
701 spin_lock_irqsave(&conn->c_lock, flags);
660 /* 702 /*
661 * If we see this flag cleared then we're *sure* that someone 703 * Maybe someone else beat us to removing rm from the conn.
662 * else beat us to removing it from the conn. If we race 704 * If we race with their flag update we'll get the lock and
663 * with their flag update we'll get the lock and then really 705 * then really see that the flag has been cleared.
664 * see that the flag has been cleared.
665 */ 706 */
666 if (!test_bit(RDS_MSG_ON_CONN, &rm->m_flags)) 707 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
708 spin_unlock_irqrestore(&conn->c_lock, flags);
667 continue; 709 continue;
668
669 if (conn != rm->m_inc.i_conn) {
670 if (conn)
671 spin_unlock_irqrestore(&conn->c_lock, flags);
672 conn = rm->m_inc.i_conn;
673 spin_lock_irqsave(&conn->c_lock, flags);
674 } 710 }
711 list_del_init(&rm->m_conn_item);
712 spin_unlock_irqrestore(&conn->c_lock, flags);
675 713
676 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { 714 /*
677 list_del_init(&rm->m_conn_item); 715 * Couldn't grab m_rs_lock in top loop (lock ordering),
678 rds_message_put(rm); 716 * but we can now.
679 } 717 */
680 } 718 spin_lock_irqsave(&rm->m_rs_lock, flags);
681 719
682 if (conn) 720 spin_lock(&rs->rs_lock);
683 spin_unlock_irqrestore(&conn->c_lock, flags); 721 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
722 spin_unlock(&rs->rs_lock);
684 723
685 if (wake) 724 rm->m_rs = NULL;
686 rds_wake_sk_sleep(rs); 725 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
726
727 rds_message_put(rm);
728 }
729
730 rds_wake_sk_sleep(rs);
687 731
688 while (!list_empty(&list)) { 732 while (!list_empty(&list)) {
689 rm = list_entry(list.next, struct rds_message, m_sock_item); 733 rm = list_entry(list.next, struct rds_message, m_sock_item);
@@ -763,6 +807,63 @@ out:
763 return *queued; 807 return *queued;
764} 808}
765 809
810/*
811 * rds_message is getting to be quite complicated, and we'd like to allocate
812 * it all in one go. This figures out how big it needs to be up front.
813 */
814static int rds_rm_size(struct msghdr *msg, int data_len)
815{
816 struct cmsghdr *cmsg;
817 int size = 0;
818 int cmsg_groups = 0;
819 int retval;
820
821 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
822 if (!CMSG_OK(msg, cmsg))
823 return -EINVAL;
824
825 if (cmsg->cmsg_level != SOL_RDS)
826 continue;
827
828 switch (cmsg->cmsg_type) {
829 case RDS_CMSG_RDMA_ARGS:
830 cmsg_groups |= 1;
831 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
832 if (retval < 0)
833 return retval;
834 size += retval;
835
836 break;
837
838 case RDS_CMSG_RDMA_DEST:
839 case RDS_CMSG_RDMA_MAP:
840 cmsg_groups |= 2;
841 /* these are valid but do no add any size */
842 break;
843
844 case RDS_CMSG_ATOMIC_CSWP:
845 case RDS_CMSG_ATOMIC_FADD:
846 case RDS_CMSG_MASKED_ATOMIC_CSWP:
847 case RDS_CMSG_MASKED_ATOMIC_FADD:
848 cmsg_groups |= 1;
849 size += sizeof(struct scatterlist);
850 break;
851
852 default:
853 return -EINVAL;
854 }
855
856 }
857
858 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
859
860 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
861 if (cmsg_groups == 3)
862 return -EINVAL;
863
864 return size;
865}
866
766static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, 867static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
767 struct msghdr *msg, int *allocated_mr) 868 struct msghdr *msg, int *allocated_mr)
768{ 869{
@@ -777,7 +878,7 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
777 continue; 878 continue;
778 879
779 /* As a side effect, RDMA_DEST and RDMA_MAP will set 880 /* As a side effect, RDMA_DEST and RDMA_MAP will set
780 * rm->m_rdma_cookie and rm->m_rdma_mr. 881 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
781 */ 882 */
782 switch (cmsg->cmsg_type) { 883 switch (cmsg->cmsg_type) {
783 case RDS_CMSG_RDMA_ARGS: 884 case RDS_CMSG_RDMA_ARGS:
@@ -793,6 +894,12 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
793 if (!ret) 894 if (!ret)
794 *allocated_mr = 1; 895 *allocated_mr = 1;
795 break; 896 break;
897 case RDS_CMSG_ATOMIC_CSWP:
898 case RDS_CMSG_ATOMIC_FADD:
899 case RDS_CMSG_MASKED_ATOMIC_CSWP:
900 case RDS_CMSG_MASKED_ATOMIC_FADD:
901 ret = rds_cmsg_atomic(rs, rm, cmsg);
902 break;
796 903
797 default: 904 default:
798 return -EINVAL; 905 return -EINVAL;
@@ -850,13 +957,26 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
850 goto out; 957 goto out;
851 } 958 }
852 959
853 rm = rds_message_copy_from_user(msg->msg_iov, payload_len); 960 /* size of rm including all sgs */
854 if (IS_ERR(rm)) { 961 ret = rds_rm_size(msg, payload_len);
855 ret = PTR_ERR(rm); 962 if (ret < 0)
856 rm = NULL; 963 goto out;
964
965 rm = rds_message_alloc(ret, GFP_KERNEL);
966 if (!rm) {
967 ret = -ENOMEM;
857 goto out; 968 goto out;
858 } 969 }
859 970
971 /* Attach data to the rm */
972 if (payload_len) {
973 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
974 ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
975 if (ret)
976 goto out;
977 }
978 rm->data.op_active = 1;
979
860 rm->m_daddr = daddr; 980 rm->m_daddr = daddr;
861 981
862 /* rds_conn_create has a spinlock that runs with IRQ off. 982 /* rds_conn_create has a spinlock that runs with IRQ off.
@@ -879,22 +999,23 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
879 if (ret) 999 if (ret)
880 goto out; 1000 goto out;
881 1001
882 if ((rm->m_rdma_cookie || rm->m_rdma_op) && 1002 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
883 conn->c_trans->xmit_rdma == NULL) {
884 if (printk_ratelimit()) 1003 if (printk_ratelimit())
885 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", 1004 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
886 rm->m_rdma_op, conn->c_trans->xmit_rdma); 1005 &rm->rdma, conn->c_trans->xmit_rdma);
887 ret = -EOPNOTSUPP; 1006 ret = -EOPNOTSUPP;
888 goto out; 1007 goto out;
889 } 1008 }
890 1009
891 /* If the connection is down, trigger a connect. We may 1010 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
892 * have scheduled a delayed reconnect however - in this case 1011 if (printk_ratelimit())
893 * we should not interfere. 1012 printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
894 */ 1013 &rm->atomic, conn->c_trans->xmit_atomic);
895 if (rds_conn_state(conn) == RDS_CONN_DOWN && 1014 ret = -EOPNOTSUPP;
896 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) 1015 goto out;
897 queue_delayed_work(rds_wq, &conn->c_conn_w, 0); 1016 }
1017
1018 rds_conn_connect_if_down(conn);
898 1019
899 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); 1020 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
900 if (ret) { 1021 if (ret) {
@@ -938,7 +1059,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
938 rds_stats_inc(s_send_queued); 1059 rds_stats_inc(s_send_queued);
939 1060
940 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) 1061 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
941 rds_send_worker(&conn->c_send_w.work); 1062 rds_send_xmit(conn);
942 1063
943 rds_message_put(rm); 1064 rds_message_put(rm);
944 return payload_len; 1065 return payload_len;
@@ -966,20 +1087,15 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
966 int ret = 0; 1087 int ret = 0;
967 1088
968 rm = rds_message_alloc(0, GFP_ATOMIC); 1089 rm = rds_message_alloc(0, GFP_ATOMIC);
969 if (rm == NULL) { 1090 if (!rm) {
970 ret = -ENOMEM; 1091 ret = -ENOMEM;
971 goto out; 1092 goto out;
972 } 1093 }
973 1094
974 rm->m_daddr = conn->c_faddr; 1095 rm->m_daddr = conn->c_faddr;
1096 rm->data.op_active = 1;
975 1097
976 /* If the connection is down, trigger a connect. We may 1098 rds_conn_connect_if_down(conn);
977 * have scheduled a delayed reconnect however - in this case
978 * we should not interfere.
979 */
980 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
981 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
982 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
983 1099
984 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); 1100 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
985 if (ret) 1101 if (ret)
@@ -999,7 +1115,9 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
999 rds_stats_inc(s_send_queued); 1115 rds_stats_inc(s_send_queued);
1000 rds_stats_inc(s_send_pong); 1116 rds_stats_inc(s_send_pong);
1001 1117
1002 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 1118 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1119 rds_send_xmit(conn);
1120
1003 rds_message_put(rm); 1121 rds_message_put(rm);
1004 return 0; 1122 return 0;
1005 1123
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 7598eb07cfb1..10c759ccac0c 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -57,8 +57,8 @@ static const char *const rds_stat_names[] = {
57 "recv_ping", 57 "recv_ping",
58 "send_queue_empty", 58 "send_queue_empty",
59 "send_queue_full", 59 "send_queue_full",
60 "send_sem_contention", 60 "send_lock_contention",
61 "send_sem_queue_raced", 61 "send_lock_queue_raced",
62 "send_immediate_retry", 62 "send_immediate_retry",
63 "send_delayed_retry", 63 "send_delayed_retry",
64 "send_drop_acked", 64 "send_drop_acked",
@@ -143,7 +143,7 @@ void rds_stats_exit(void)
143 rds_info_deregister_func(RDS_INFO_COUNTERS, rds_stats_info); 143 rds_info_deregister_func(RDS_INFO_COUNTERS, rds_stats_info);
144} 144}
145 145
146int __init rds_stats_init(void) 146int rds_stats_init(void)
147{ 147{
148 rds_info_register_func(RDS_INFO_COUNTERS, rds_stats_info); 148 rds_info_register_func(RDS_INFO_COUNTERS, rds_stats_info);
149 return 0; 149 return 0;
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index 7829a20325d3..25ad0c77a26c 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -105,13 +105,13 @@ void rds_sysctl_exit(void)
105 unregister_sysctl_table(rds_sysctl_reg_table); 105 unregister_sysctl_table(rds_sysctl_reg_table);
106} 106}
107 107
108int __init rds_sysctl_init(void) 108int rds_sysctl_init(void)
109{ 109{
110 rds_sysctl_reconnect_min = msecs_to_jiffies(1); 110 rds_sysctl_reconnect_min = msecs_to_jiffies(1);
111 rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; 111 rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
112 112
113 rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table); 113 rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table);
114 if (rds_sysctl_reg_table == NULL) 114 if (!rds_sysctl_reg_table)
115 return -ENOMEM; 115 return -ENOMEM;
116 return 0; 116 return 0;
117} 117}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index babf4577ff7d..eeb08e6ab96b 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -200,7 +200,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
200 struct rds_tcp_connection *tc; 200 struct rds_tcp_connection *tc;
201 201
202 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); 202 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
203 if (tc == NULL) 203 if (!tc)
204 return -ENOMEM; 204 return -ENOMEM;
205 205
206 tc->t_sock = NULL; 206 tc->t_sock = NULL;
@@ -258,7 +258,6 @@ struct rds_transport rds_tcp_transport = {
258 .laddr_check = rds_tcp_laddr_check, 258 .laddr_check = rds_tcp_laddr_check,
259 .xmit_prepare = rds_tcp_xmit_prepare, 259 .xmit_prepare = rds_tcp_xmit_prepare,
260 .xmit_complete = rds_tcp_xmit_complete, 260 .xmit_complete = rds_tcp_xmit_complete,
261 .xmit_cong_map = rds_tcp_xmit_cong_map,
262 .xmit = rds_tcp_xmit, 261 .xmit = rds_tcp_xmit,
263 .recv = rds_tcp_recv, 262 .recv = rds_tcp_recv,
264 .conn_alloc = rds_tcp_conn_alloc, 263 .conn_alloc = rds_tcp_conn_alloc,
@@ -266,7 +265,6 @@ struct rds_transport rds_tcp_transport = {
266 .conn_connect = rds_tcp_conn_connect, 265 .conn_connect = rds_tcp_conn_connect,
267 .conn_shutdown = rds_tcp_conn_shutdown, 266 .conn_shutdown = rds_tcp_conn_shutdown,
268 .inc_copy_to_user = rds_tcp_inc_copy_to_user, 267 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
269 .inc_purge = rds_tcp_inc_purge,
270 .inc_free = rds_tcp_inc_free, 268 .inc_free = rds_tcp_inc_free,
271 .stats_info_copy = rds_tcp_stats_info_copy, 269 .stats_info_copy = rds_tcp_stats_info_copy,
272 .exit = rds_tcp_exit, 270 .exit = rds_tcp_exit,
@@ -276,14 +274,14 @@ struct rds_transport rds_tcp_transport = {
276 .t_prefer_loopback = 1, 274 .t_prefer_loopback = 1,
277}; 275};
278 276
279int __init rds_tcp_init(void) 277int rds_tcp_init(void)
280{ 278{
281 int ret; 279 int ret;
282 280
283 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection", 281 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
284 sizeof(struct rds_tcp_connection), 282 sizeof(struct rds_tcp_connection),
285 0, 0, NULL); 283 0, 0, NULL);
286 if (rds_tcp_conn_slab == NULL) { 284 if (!rds_tcp_conn_slab) {
287 ret = -ENOMEM; 285 ret = -ENOMEM;
288 goto out; 286 goto out;
289 } 287 }
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 844fa6b9cf5a..f5e6f7bebb50 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -43,7 +43,7 @@ struct rds_tcp_statistics {
43}; 43};
44 44
45/* tcp.c */ 45/* tcp.c */
46int __init rds_tcp_init(void); 46int rds_tcp_init(void);
47void rds_tcp_exit(void); 47void rds_tcp_exit(void);
48void rds_tcp_tune(struct socket *sock); 48void rds_tcp_tune(struct socket *sock);
49void rds_tcp_nonagle(struct socket *sock); 49void rds_tcp_nonagle(struct socket *sock);
@@ -61,16 +61,15 @@ void rds_tcp_conn_shutdown(struct rds_connection *conn);
61void rds_tcp_state_change(struct sock *sk); 61void rds_tcp_state_change(struct sock *sk);
62 62
63/* tcp_listen.c */ 63/* tcp_listen.c */
64int __init rds_tcp_listen_init(void); 64int rds_tcp_listen_init(void);
65void rds_tcp_listen_stop(void); 65void rds_tcp_listen_stop(void);
66void rds_tcp_listen_data_ready(struct sock *sk, int bytes); 66void rds_tcp_listen_data_ready(struct sock *sk, int bytes);
67 67
68/* tcp_recv.c */ 68/* tcp_recv.c */
69int __init rds_tcp_recv_init(void); 69int rds_tcp_recv_init(void);
70void rds_tcp_recv_exit(void); 70void rds_tcp_recv_exit(void);
71void rds_tcp_data_ready(struct sock *sk, int bytes); 71void rds_tcp_data_ready(struct sock *sk, int bytes);
72int rds_tcp_recv(struct rds_connection *conn); 72int rds_tcp_recv(struct rds_connection *conn);
73void rds_tcp_inc_purge(struct rds_incoming *inc);
74void rds_tcp_inc_free(struct rds_incoming *inc); 73void rds_tcp_inc_free(struct rds_incoming *inc);
75int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov, 74int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
76 size_t size); 75 size_t size);
@@ -81,8 +80,6 @@ void rds_tcp_xmit_complete(struct rds_connection *conn);
81int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, 80int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
82 unsigned int hdr_off, unsigned int sg, unsigned int off); 81 unsigned int hdr_off, unsigned int sg, unsigned int off);
83void rds_tcp_write_space(struct sock *sk); 82void rds_tcp_write_space(struct sock *sk);
84int rds_tcp_xmit_cong_map(struct rds_connection *conn,
85 struct rds_cong_map *map, unsigned long offset);
86 83
87/* tcp_stats.c */ 84/* tcp_stats.c */
88DECLARE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats); 85DECLARE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index c519939e8da9..af95c8e058fc 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -45,7 +45,7 @@ void rds_tcp_state_change(struct sock *sk)
45 45
46 read_lock_bh(&sk->sk_callback_lock); 46 read_lock_bh(&sk->sk_callback_lock);
47 conn = sk->sk_user_data; 47 conn = sk->sk_user_data;
48 if (conn == NULL) { 48 if (!conn) {
49 state_change = sk->sk_state_change; 49 state_change = sk->sk_state_change;
50 goto out; 50 goto out;
51 } 51 }
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 27844f231d10..8b5cc4aa8868 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -116,7 +116,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
116 116
117 read_lock_bh(&sk->sk_callback_lock); 117 read_lock_bh(&sk->sk_callback_lock);
118 ready = sk->sk_user_data; 118 ready = sk->sk_user_data;
119 if (ready == NULL) { /* check for teardown race */ 119 if (!ready) { /* check for teardown race */
120 ready = sk->sk_data_ready; 120 ready = sk->sk_data_ready;
121 goto out; 121 goto out;
122 } 122 }
@@ -135,7 +135,7 @@ out:
135 ready(sk, bytes); 135 ready(sk, bytes);
136} 136}
137 137
138int __init rds_tcp_listen_init(void) 138int rds_tcp_listen_init(void)
139{ 139{
140 struct sockaddr_in sin; 140 struct sockaddr_in sin;
141 struct socket *sock = NULL; 141 struct socket *sock = NULL;
@@ -178,7 +178,7 @@ void rds_tcp_listen_stop(void)
178 struct socket *sock = rds_tcp_listen_sock; 178 struct socket *sock = rds_tcp_listen_sock;
179 struct sock *sk; 179 struct sock *sk;
180 180
181 if (sock == NULL) 181 if (!sock)
182 return; 182 return;
183 183
184 sk = sock->sk; 184 sk = sock->sk;
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index e43797404102..67263fbee623 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -39,7 +39,7 @@
39 39
40static struct kmem_cache *rds_tcp_incoming_slab; 40static struct kmem_cache *rds_tcp_incoming_slab;
41 41
42void rds_tcp_inc_purge(struct rds_incoming *inc) 42static void rds_tcp_inc_purge(struct rds_incoming *inc)
43{ 43{
44 struct rds_tcp_incoming *tinc; 44 struct rds_tcp_incoming *tinc;
45 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc); 45 tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
@@ -190,10 +190,10 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
190 * processing. 190 * processing.
191 */ 191 */
192 while (left) { 192 while (left) {
193 if (tinc == NULL) { 193 if (!tinc) {
194 tinc = kmem_cache_alloc(rds_tcp_incoming_slab, 194 tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
195 arg->gfp); 195 arg->gfp);
196 if (tinc == NULL) { 196 if (!tinc) {
197 desc->error = -ENOMEM; 197 desc->error = -ENOMEM;
198 goto out; 198 goto out;
199 } 199 }
@@ -229,7 +229,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
229 229
230 if (left && tc->t_tinc_data_rem) { 230 if (left && tc->t_tinc_data_rem) {
231 clone = skb_clone(skb, arg->gfp); 231 clone = skb_clone(skb, arg->gfp);
232 if (clone == NULL) { 232 if (!clone) {
233 desc->error = -ENOMEM; 233 desc->error = -ENOMEM;
234 goto out; 234 goto out;
235 } 235 }
@@ -326,7 +326,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
326 326
327 read_lock_bh(&sk->sk_callback_lock); 327 read_lock_bh(&sk->sk_callback_lock);
328 conn = sk->sk_user_data; 328 conn = sk->sk_user_data;
329 if (conn == NULL) { /* check for teardown race */ 329 if (!conn) { /* check for teardown race */
330 ready = sk->sk_data_ready; 330 ready = sk->sk_data_ready;
331 goto out; 331 goto out;
332 } 332 }
@@ -342,12 +342,12 @@ out:
342 ready(sk, bytes); 342 ready(sk, bytes);
343} 343}
344 344
345int __init rds_tcp_recv_init(void) 345int rds_tcp_recv_init(void)
346{ 346{
347 rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming", 347 rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
348 sizeof(struct rds_tcp_incoming), 348 sizeof(struct rds_tcp_incoming),
349 0, 0, NULL); 349 0, 0, NULL);
350 if (rds_tcp_incoming_slab == NULL) 350 if (!rds_tcp_incoming_slab)
351 return -ENOMEM; 351 return -ENOMEM;
352 return 0; 352 return 0;
353} 353}
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 2f012a07d94d..aa16841afbdf 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -77,56 +77,6 @@ int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
77} 77}
78 78
79/* the core send_sem serializes this with other xmit and shutdown */ 79/* the core send_sem serializes this with other xmit and shutdown */
80int rds_tcp_xmit_cong_map(struct rds_connection *conn,
81 struct rds_cong_map *map, unsigned long offset)
82{
83 static struct rds_header rds_tcp_map_header = {
84 .h_flags = RDS_FLAG_CONG_BITMAP,
85 };
86 struct rds_tcp_connection *tc = conn->c_transport_data;
87 unsigned long i;
88 int ret;
89 int copied = 0;
90
91 /* Some problem claims cpu_to_be32(constant) isn't a constant. */
92 rds_tcp_map_header.h_len = cpu_to_be32(RDS_CONG_MAP_BYTES);
93
94 if (offset < sizeof(struct rds_header)) {
95 ret = rds_tcp_sendmsg(tc->t_sock,
96 (void *)&rds_tcp_map_header + offset,
97 sizeof(struct rds_header) - offset);
98 if (ret <= 0)
99 return ret;
100 offset += ret;
101 copied = ret;
102 if (offset < sizeof(struct rds_header))
103 return ret;
104 }
105
106 offset -= sizeof(struct rds_header);
107 i = offset / PAGE_SIZE;
108 offset = offset % PAGE_SIZE;
109 BUG_ON(i >= RDS_CONG_MAP_PAGES);
110
111 do {
112 ret = tc->t_sock->ops->sendpage(tc->t_sock,
113 virt_to_page(map->m_page_addrs[i]),
114 offset, PAGE_SIZE - offset,
115 MSG_DONTWAIT);
116 if (ret <= 0)
117 break;
118 copied += ret;
119 offset += ret;
120 if (offset == PAGE_SIZE) {
121 offset = 0;
122 i++;
123 }
124 } while (i < RDS_CONG_MAP_PAGES);
125
126 return copied ? copied : ret;
127}
128
129/* the core send_sem serializes this with other xmit and shutdown */
130int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, 80int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
131 unsigned int hdr_off, unsigned int sg, unsigned int off) 81 unsigned int hdr_off, unsigned int sg, unsigned int off)
132{ 82{
@@ -166,21 +116,21 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
166 goto out; 116 goto out;
167 } 117 }
168 118
169 while (sg < rm->m_nents) { 119 while (sg < rm->data.op_nents) {
170 ret = tc->t_sock->ops->sendpage(tc->t_sock, 120 ret = tc->t_sock->ops->sendpage(tc->t_sock,
171 sg_page(&rm->m_sg[sg]), 121 sg_page(&rm->data.op_sg[sg]),
172 rm->m_sg[sg].offset + off, 122 rm->data.op_sg[sg].offset + off,
173 rm->m_sg[sg].length - off, 123 rm->data.op_sg[sg].length - off,
174 MSG_DONTWAIT|MSG_NOSIGNAL); 124 MSG_DONTWAIT|MSG_NOSIGNAL);
175 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->m_sg[sg]), 125 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
176 rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off, 126 rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
177 ret); 127 ret);
178 if (ret <= 0) 128 if (ret <= 0)
179 break; 129 break;
180 130
181 off += ret; 131 off += ret;
182 done += ret; 132 done += ret;
183 if (off == rm->m_sg[sg].length) { 133 if (off == rm->data.op_sg[sg].length) {
184 off = 0; 134 off = 0;
185 sg++; 135 sg++;
186 } 136 }
@@ -226,7 +176,7 @@ void rds_tcp_write_space(struct sock *sk)
226 176
227 read_lock_bh(&sk->sk_callback_lock); 177 read_lock_bh(&sk->sk_callback_lock);
228 conn = sk->sk_user_data; 178 conn = sk->sk_user_data;
229 if (conn == NULL) { 179 if (!conn) {
230 write_space = sk->sk_write_space; 180 write_space = sk->sk_write_space;
231 goto out; 181 goto out;
232 } 182 }
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 786c20eaaf5e..0fd90f8c5f59 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -61,7 +61,7 @@
61 * 61 *
62 * Transition to state DISCONNECTING/DOWN: 62 * Transition to state DISCONNECTING/DOWN:
63 * - Inside the shutdown worker; synchronizes with xmit path 63 * - Inside the shutdown worker; synchronizes with xmit path
64 * through c_send_lock, and with connection management callbacks 64 * through RDS_IN_XMIT, and with connection management callbacks
65 * via c_cm_lock. 65 * via c_cm_lock.
66 * 66 *
67 * For receive callbacks, we rely on the underlying transport 67 * For receive callbacks, we rely on the underlying transport
@@ -110,7 +110,7 @@ EXPORT_SYMBOL_GPL(rds_connect_complete);
110 * We should *always* start with a random backoff; otherwise a broken connection 110 * We should *always* start with a random backoff; otherwise a broken connection
111 * will always take several iterations to be re-established. 111 * will always take several iterations to be re-established.
112 */ 112 */
113static void rds_queue_reconnect(struct rds_connection *conn) 113void rds_queue_reconnect(struct rds_connection *conn)
114{ 114{
115 unsigned long rand; 115 unsigned long rand;
116 116
@@ -156,58 +156,6 @@ void rds_connect_worker(struct work_struct *work)
156 } 156 }
157} 157}
158 158
159void rds_shutdown_worker(struct work_struct *work)
160{
161 struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w);
162
163 /* shut it down unless it's down already */
164 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
165 /*
166 * Quiesce the connection mgmt handlers before we start tearing
167 * things down. We don't hold the mutex for the entire
168 * duration of the shutdown operation, else we may be
169 * deadlocking with the CM handler. Instead, the CM event
170 * handler is supposed to check for state DISCONNECTING
171 */
172 mutex_lock(&conn->c_cm_lock);
173 if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) &&
174 !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
175 rds_conn_error(conn, "shutdown called in state %d\n",
176 atomic_read(&conn->c_state));
177 mutex_unlock(&conn->c_cm_lock);
178 return;
179 }
180 mutex_unlock(&conn->c_cm_lock);
181
182 mutex_lock(&conn->c_send_lock);
183 conn->c_trans->conn_shutdown(conn);
184 rds_conn_reset(conn);
185 mutex_unlock(&conn->c_send_lock);
186
187 if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
188 /* This can happen - eg when we're in the middle of tearing
189 * down the connection, and someone unloads the rds module.
190 * Quite reproduceable with loopback connections.
191 * Mostly harmless.
192 */
193 rds_conn_error(conn,
194 "%s: failed to transition to state DOWN, "
195 "current state is %d\n",
196 __func__,
197 atomic_read(&conn->c_state));
198 return;
199 }
200 }
201
202 /* Then reconnect if it's still live.
203 * The passive side of an IB loopback connection is never added
204 * to the conn hash, so we never trigger a reconnect on this
205 * conn - the reconnect is always triggered by the active peer. */
206 cancel_delayed_work(&conn->c_conn_w);
207 if (!hlist_unhashed(&conn->c_hash_node))
208 rds_queue_reconnect(conn);
209}
210
211void rds_send_worker(struct work_struct *work) 159void rds_send_worker(struct work_struct *work)
212{ 160{
213 struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work); 161 struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work);
@@ -252,15 +200,22 @@ void rds_recv_worker(struct work_struct *work)
252 } 200 }
253} 201}
254 202
203void rds_shutdown_worker(struct work_struct *work)
204{
205 struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w);
206
207 rds_conn_shutdown(conn);
208}
209
255void rds_threads_exit(void) 210void rds_threads_exit(void)
256{ 211{
257 destroy_workqueue(rds_wq); 212 destroy_workqueue(rds_wq);
258} 213}
259 214
260int __init rds_threads_init(void) 215int rds_threads_init(void)
261{ 216{
262 rds_wq = create_workqueue("krdsd"); 217 rds_wq = create_singlethread_workqueue("krdsd");
263 if (rds_wq == NULL) 218 if (!rds_wq)
264 return -ENOMEM; 219 return -ENOMEM;
265 220
266 return 0; 221 return 0;
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 7e1067901353..7f2ac4fec367 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -71,19 +71,28 @@ void rds_trans_unregister(struct rds_transport *trans)
71} 71}
72EXPORT_SYMBOL_GPL(rds_trans_unregister); 72EXPORT_SYMBOL_GPL(rds_trans_unregister);
73 73
74void rds_trans_put(struct rds_transport *trans)
75{
76 if (trans && trans->t_owner)
77 module_put(trans->t_owner);
78}
79
74struct rds_transport *rds_trans_get_preferred(__be32 addr) 80struct rds_transport *rds_trans_get_preferred(__be32 addr)
75{ 81{
76 struct rds_transport *ret = NULL; 82 struct rds_transport *ret = NULL;
77 int i; 83 struct rds_transport *trans;
84 unsigned int i;
78 85
79 if (IN_LOOPBACK(ntohl(addr))) 86 if (IN_LOOPBACK(ntohl(addr)))
80 return &rds_loop_transport; 87 return &rds_loop_transport;
81 88
82 down_read(&rds_trans_sem); 89 down_read(&rds_trans_sem);
83 for (i = 0; i < RDS_TRANS_COUNT; i++) 90 for (i = 0; i < RDS_TRANS_COUNT; i++) {
84 { 91 trans = transports[i];
85 if (transports[i] && (transports[i]->laddr_check(addr) == 0)) { 92
86 ret = transports[i]; 93 if (trans && (trans->laddr_check(addr) == 0) &&
94 (!trans->t_owner || try_module_get(trans->t_owner))) {
95 ret = trans;
87 break; 96 break;
88 } 97 }
89 } 98 }
diff --git a/net/rds/xlist.h b/net/rds/xlist.h
new file mode 100644
index 000000000000..e6b5190daddd
--- /dev/null
+++ b/net/rds/xlist.h
@@ -0,0 +1,80 @@
1#ifndef _LINUX_XLIST_H
2#define _LINUX_XLIST_H
3
4#include <linux/stddef.h>
5#include <linux/poison.h>
6#include <linux/prefetch.h>
7#include <asm/system.h>
8
9struct xlist_head {
10 struct xlist_head *next;
11};
12
13static inline void INIT_XLIST_HEAD(struct xlist_head *list)
14{
15 list->next = NULL;
16}
17
18static inline int xlist_empty(struct xlist_head *head)
19{
20 return head->next == NULL;
21}
22
23static inline void xlist_add(struct xlist_head *new, struct xlist_head *tail,
24 struct xlist_head *head)
25{
26 struct xlist_head *cur;
27 struct xlist_head *check;
28
29 while (1) {
30 cur = head->next;
31 tail->next = cur;
32 check = cmpxchg(&head->next, cur, new);
33 if (check == cur)
34 break;
35 }
36}
37
38static inline struct xlist_head *xlist_del_head(struct xlist_head *head)
39{
40 struct xlist_head *cur;
41 struct xlist_head *check;
42 struct xlist_head *next;
43
44 while (1) {
45 cur = head->next;
46 if (!cur)
47 goto out;
48
49 next = cur->next;
50 check = cmpxchg(&head->next, cur, next);
51 if (check == cur)
52 goto out;
53 }
54out:
55 return cur;
56}
57
58static inline struct xlist_head *xlist_del_head_fast(struct xlist_head *head)
59{
60 struct xlist_head *cur;
61
62 cur = head->next;
63 if (!cur)
64 return NULL;
65
66 head->next = cur->next;
67 return cur;
68}
69
70static inline void xlist_splice(struct xlist_head *list,
71 struct xlist_head *head)
72{
73 struct xlist_head *cur;
74
75 WARN_ON(head->next);
76 cur = xchg(&list->next, NULL);
77 head->next = cur;
78}
79
80#endif
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index 3713d7ecab96..1bca6d49ec96 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -142,7 +142,7 @@ static unsigned long rfkill_last_scheduled;
142static unsigned long rfkill_ratelimit(const unsigned long last) 142static unsigned long rfkill_ratelimit(const unsigned long last)
143{ 143{
144 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); 144 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
145 return (time_after(jiffies, last + delay)) ? 0 : delay; 145 return time_after(jiffies, last + delay) ? 0 : delay;
146} 146}
147 147
148static void rfkill_schedule_ratelimited(void) 148static void rfkill_schedule_ratelimited(void)
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index a750a28e0221..fa5f5641a2c2 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -114,7 +114,7 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
114 if (ax25s) 114 if (ax25s)
115 ax25_cb_put(ax25s); 115 ax25_cb_put(ax25s);
116 116
117 return (neigh->ax25 != NULL); 117 return neigh->ax25 != NULL;
118} 118}
119 119
120/* 120/*
@@ -137,7 +137,7 @@ static int rose_link_up(struct rose_neigh *neigh)
137 if (ax25s) 137 if (ax25s)
138 ax25_cb_put(ax25s); 138 ax25_cb_put(ax25s);
139 139
140 return (neigh->ax25 != NULL); 140 return neigh->ax25 != NULL;
141} 141}
142 142
143/* 143/*
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 2f691fb180d1..a36270a994d7 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -518,6 +518,16 @@ config NET_ACT_SKBEDIT
518 To compile this code as a module, choose M here: the 518 To compile this code as a module, choose M here: the
519 module will be called act_skbedit. 519 module will be called act_skbedit.
520 520
521config NET_ACT_CSUM
522 tristate "Checksum Updating"
523 depends on NET_CLS_ACT && INET
524 ---help---
525 Say Y here to update some common checksum after some direct
526 packet alterations.
527
528 To compile this code as a module, choose M here: the
529 module will be called act_csum.
530
521config NET_CLS_IND 531config NET_CLS_IND
522 bool "Incoming device classification" 532 bool "Incoming device classification"
523 depends on NET_CLS_U32 || NET_CLS_FW 533 depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index f14e71bfa58f..960f5dba6304 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_ACT_NAT) += act_nat.o
15obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o 15obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o
16obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o 16obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
17obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o 17obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
18obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
18obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o 19obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
19obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o 20obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
20obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o 21obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
new file mode 100644
index 000000000000..67dc7ce9b63a
--- /dev/null
+++ b/net/sched/act_csum.c
@@ -0,0 +1,595 @@
1/*
2 * Checksum updating actions
3 *
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/types.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/spinlock.h>
18
19#include <linux/netlink.h>
20#include <net/netlink.h>
21#include <linux/rtnetlink.h>
22
23#include <linux/skbuff.h>
24
25#include <net/ip.h>
26#include <net/ipv6.h>
27#include <net/icmp.h>
28#include <linux/icmpv6.h>
29#include <linux/igmp.h>
30#include <net/tcp.h>
31#include <net/udp.h>
32#include <net/ip6_checksum.h>
33
34#include <net/act_api.h>
35
36#include <linux/tc_act/tc_csum.h>
37#include <net/tc_act/tc_csum.h>
38
39#define CSUM_TAB_MASK 15
40static struct tcf_common *tcf_csum_ht[CSUM_TAB_MASK + 1];
41static u32 csum_idx_gen;
42static DEFINE_RWLOCK(csum_lock);
43
44static struct tcf_hashinfo csum_hash_info = {
45 .htab = tcf_csum_ht,
46 .hmask = CSUM_TAB_MASK,
47 .lock = &csum_lock,
48};
49
50static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
51 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
52};
53
54static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
55 struct tc_action *a, int ovr, int bind)
56{
57 struct nlattr *tb[TCA_CSUM_MAX + 1];
58 struct tc_csum *parm;
59 struct tcf_common *pc;
60 struct tcf_csum *p;
61 int ret = 0, err;
62
63 if (nla == NULL)
64 return -EINVAL;
65
66 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy);
67 if (err < 0)
68 return err;
69
70 if (tb[TCA_CSUM_PARMS] == NULL)
71 return -EINVAL;
72 parm = nla_data(tb[TCA_CSUM_PARMS]);
73
74 pc = tcf_hash_check(parm->index, a, bind, &csum_hash_info);
75 if (!pc) {
76 pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
77 &csum_idx_gen, &csum_hash_info);
78 if (IS_ERR(pc))
79 return PTR_ERR(pc);
80 p = to_tcf_csum(pc);
81 ret = ACT_P_CREATED;
82 } else {
83 p = to_tcf_csum(pc);
84 if (!ovr) {
85 tcf_hash_release(pc, bind, &csum_hash_info);
86 return -EEXIST;
87 }
88 }
89
90 spin_lock_bh(&p->tcf_lock);
91 p->tcf_action = parm->action;
92 p->update_flags = parm->update_flags;
93 spin_unlock_bh(&p->tcf_lock);
94
95 if (ret == ACT_P_CREATED)
96 tcf_hash_insert(pc, &csum_hash_info);
97
98 return ret;
99}
100
101static int tcf_csum_cleanup(struct tc_action *a, int bind)
102{
103 struct tcf_csum *p = a->priv;
104 return tcf_hash_release(&p->common, bind, &csum_hash_info);
105}
106
107/**
108 * tcf_csum_skb_nextlayer - Get next layer pointer
109 * @skb: sk_buff to use
110 * @ihl: previous summed headers length
111 * @ipl: complete packet length
112 * @jhl: next header length
113 *
114 * Check the expected next layer availability in the specified sk_buff.
115 * Return the next layer pointer if pass, NULL otherwise.
116 */
117static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
118 unsigned int ihl, unsigned int ipl,
119 unsigned int jhl)
120{
121 int ntkoff = skb_network_offset(skb);
122 int hl = ihl + jhl;
123
124 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
125 (skb_cloned(skb) &&
126 !skb_clone_writable(skb, hl + ntkoff) &&
127 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
128 return NULL;
129 else
130 return (void *)(skb_network_header(skb) + ihl);
131}
132
133static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
134 unsigned int ihl, unsigned int ipl)
135{
136 struct icmphdr *icmph;
137
138 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
139 if (icmph == NULL)
140 return 0;
141
142 icmph->checksum = 0;
143 skb->csum = csum_partial(icmph, ipl - ihl, 0);
144 icmph->checksum = csum_fold(skb->csum);
145
146 skb->ip_summed = CHECKSUM_NONE;
147
148 return 1;
149}
150
151static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
152 unsigned int ihl, unsigned int ipl)
153{
154 struct igmphdr *igmph;
155
156 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
157 if (igmph == NULL)
158 return 0;
159
160 igmph->csum = 0;
161 skb->csum = csum_partial(igmph, ipl - ihl, 0);
162 igmph->csum = csum_fold(skb->csum);
163
164 skb->ip_summed = CHECKSUM_NONE;
165
166 return 1;
167}
168
169static int tcf_csum_ipv6_icmp(struct sk_buff *skb, struct ipv6hdr *ip6h,
170 unsigned int ihl, unsigned int ipl)
171{
172 struct icmp6hdr *icmp6h;
173
174 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
175 if (icmp6h == NULL)
176 return 0;
177
178 icmp6h->icmp6_cksum = 0;
179 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
180 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
181 ipl - ihl, IPPROTO_ICMPV6,
182 skb->csum);
183
184 skb->ip_summed = CHECKSUM_NONE;
185
186 return 1;
187}
188
189static int tcf_csum_ipv4_tcp(struct sk_buff *skb, struct iphdr *iph,
190 unsigned int ihl, unsigned int ipl)
191{
192 struct tcphdr *tcph;
193
194 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
195 if (tcph == NULL)
196 return 0;
197
198 tcph->check = 0;
199 skb->csum = csum_partial(tcph, ipl - ihl, 0);
200 tcph->check = tcp_v4_check(ipl - ihl,
201 iph->saddr, iph->daddr, skb->csum);
202
203 skb->ip_summed = CHECKSUM_NONE;
204
205 return 1;
206}
207
208static int tcf_csum_ipv6_tcp(struct sk_buff *skb, struct ipv6hdr *ip6h,
209 unsigned int ihl, unsigned int ipl)
210{
211 struct tcphdr *tcph;
212
213 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
214 if (tcph == NULL)
215 return 0;
216
217 tcph->check = 0;
218 skb->csum = csum_partial(tcph, ipl - ihl, 0);
219 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
220 ipl - ihl, IPPROTO_TCP,
221 skb->csum);
222
223 skb->ip_summed = CHECKSUM_NONE;
224
225 return 1;
226}
227
228static int tcf_csum_ipv4_udp(struct sk_buff *skb, struct iphdr *iph,
229 unsigned int ihl, unsigned int ipl, int udplite)
230{
231 struct udphdr *udph;
232 u16 ul;
233
234 /*
235 * Support both UDP and UDPLITE checksum algorithms, Don't use
236 * udph->len to get the real length without any protocol check,
237 * UDPLITE uses udph->len for another thing,
238 * Use iph->tot_len, or just ipl.
239 */
240
241 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
242 if (udph == NULL)
243 return 0;
244
245 ul = ntohs(udph->len);
246
247 if (udplite || udph->check) {
248
249 udph->check = 0;
250
251 if (udplite) {
252 if (ul == 0)
253 skb->csum = csum_partial(udph, ipl - ihl, 0);
254 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
255 skb->csum = csum_partial(udph, ul, 0);
256 else
257 goto ignore_obscure_skb;
258 } else {
259 if (ul != ipl - ihl)
260 goto ignore_obscure_skb;
261
262 skb->csum = csum_partial(udph, ul, 0);
263 }
264
265 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
266 ul, iph->protocol,
267 skb->csum);
268
269 if (!udph->check)
270 udph->check = CSUM_MANGLED_0;
271 }
272
273 skb->ip_summed = CHECKSUM_NONE;
274
275ignore_obscure_skb:
276 return 1;
277}
278
279static int tcf_csum_ipv6_udp(struct sk_buff *skb, struct ipv6hdr *ip6h,
280 unsigned int ihl, unsigned int ipl, int udplite)
281{
282 struct udphdr *udph;
283 u16 ul;
284
285 /*
286 * Support both UDP and UDPLITE checksum algorithms, Don't use
287 * udph->len to get the real length without any protocol check,
288 * UDPLITE uses udph->len for another thing,
289 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
290 */
291
292 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
293 if (udph == NULL)
294 return 0;
295
296 ul = ntohs(udph->len);
297
298 udph->check = 0;
299
300 if (udplite) {
301 if (ul == 0)
302 skb->csum = csum_partial(udph, ipl - ihl, 0);
303
304 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
305 skb->csum = csum_partial(udph, ul, 0);
306
307 else
308 goto ignore_obscure_skb;
309 } else {
310 if (ul != ipl - ihl)
311 goto ignore_obscure_skb;
312
313 skb->csum = csum_partial(udph, ul, 0);
314 }
315
316 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
317 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
318 skb->csum);
319
320 if (!udph->check)
321 udph->check = CSUM_MANGLED_0;
322
323 skb->ip_summed = CHECKSUM_NONE;
324
325ignore_obscure_skb:
326 return 1;
327}
328
329static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
330{
331 struct iphdr *iph;
332 int ntkoff;
333
334 ntkoff = skb_network_offset(skb);
335
336 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
337 goto fail;
338
339 iph = ip_hdr(skb);
340
341 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
342 case IPPROTO_ICMP:
343 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
344 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
345 ntohs(iph->tot_len)))
346 goto fail;
347 break;
348 case IPPROTO_IGMP:
349 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
350 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
351 ntohs(iph->tot_len)))
352 goto fail;
353 break;
354 case IPPROTO_TCP:
355 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
356 if (!tcf_csum_ipv4_tcp(skb, iph, iph->ihl * 4,
357 ntohs(iph->tot_len)))
358 goto fail;
359 break;
360 case IPPROTO_UDP:
361 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
362 if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4,
363 ntohs(iph->tot_len), 0))
364 goto fail;
365 break;
366 case IPPROTO_UDPLITE:
367 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
368 if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4,
369 ntohs(iph->tot_len), 1))
370 goto fail;
371 break;
372 }
373
374 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
375 if (skb_cloned(skb) &&
376 !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
377 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
378 goto fail;
379
380 ip_send_check(iph);
381 }
382
383 return 1;
384
385fail:
386 return 0;
387}
388
389static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
390 unsigned int ixhl, unsigned int *pl)
391{
392 int off, len, optlen;
393 unsigned char *xh = (void *)ip6xh;
394
395 off = sizeof(*ip6xh);
396 len = ixhl - off;
397
398 while (len > 1) {
399 switch (xh[off]) {
400 case IPV6_TLV_PAD0:
401 optlen = 1;
402 break;
403 case IPV6_TLV_JUMBO:
404 optlen = xh[off + 1] + 2;
405 if (optlen != 6 || len < 6 || (off & 3) != 2)
406 /* wrong jumbo option length/alignment */
407 return 0;
408 *pl = ntohl(*(__be32 *)(xh + off + 2));
409 goto done;
410 default:
411 optlen = xh[off + 1] + 2;
412 if (optlen > len)
413 /* ignore obscure options */
414 goto done;
415 break;
416 }
417 off += optlen;
418 len -= optlen;
419 }
420
421done:
422 return 1;
423}
424
425static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
426{
427 struct ipv6hdr *ip6h;
428 struct ipv6_opt_hdr *ip6xh;
429 unsigned int hl, ixhl;
430 unsigned int pl;
431 int ntkoff;
432 u8 nexthdr;
433
434 ntkoff = skb_network_offset(skb);
435
436 hl = sizeof(*ip6h);
437
438 if (!pskb_may_pull(skb, hl + ntkoff))
439 goto fail;
440
441 ip6h = ipv6_hdr(skb);
442
443 pl = ntohs(ip6h->payload_len);
444 nexthdr = ip6h->nexthdr;
445
446 do {
447 switch (nexthdr) {
448 case NEXTHDR_FRAGMENT:
449 goto ignore_skb;
450 case NEXTHDR_ROUTING:
451 case NEXTHDR_HOP:
452 case NEXTHDR_DEST:
453 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
454 goto fail;
455 ip6xh = (void *)(skb_network_header(skb) + hl);
456 ixhl = ipv6_optlen(ip6xh);
457 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
458 goto fail;
459 if ((nexthdr == NEXTHDR_HOP) &&
460 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
461 goto fail;
462 nexthdr = ip6xh->nexthdr;
463 hl += ixhl;
464 break;
465 case IPPROTO_ICMPV6:
466 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
467 if (!tcf_csum_ipv6_icmp(skb, ip6h,
468 hl, pl + sizeof(*ip6h)))
469 goto fail;
470 goto done;
471 case IPPROTO_TCP:
472 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
473 if (!tcf_csum_ipv6_tcp(skb, ip6h,
474 hl, pl + sizeof(*ip6h)))
475 goto fail;
476 goto done;
477 case IPPROTO_UDP:
478 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
479 if (!tcf_csum_ipv6_udp(skb, ip6h, hl,
480 pl + sizeof(*ip6h), 0))
481 goto fail;
482 goto done;
483 case IPPROTO_UDPLITE:
484 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
485 if (!tcf_csum_ipv6_udp(skb, ip6h, hl,
486 pl + sizeof(*ip6h), 1))
487 goto fail;
488 goto done;
489 default:
490 goto ignore_skb;
491 }
492 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
493
494done:
495ignore_skb:
496 return 1;
497
498fail:
499 return 0;
500}
501
502static int tcf_csum(struct sk_buff *skb,
503 struct tc_action *a, struct tcf_result *res)
504{
505 struct tcf_csum *p = a->priv;
506 int action;
507 u32 update_flags;
508
509 spin_lock(&p->tcf_lock);
510 p->tcf_tm.lastuse = jiffies;
511 p->tcf_bstats.bytes += qdisc_pkt_len(skb);
512 p->tcf_bstats.packets++;
513 action = p->tcf_action;
514 update_flags = p->update_flags;
515 spin_unlock(&p->tcf_lock);
516
517 if (unlikely(action == TC_ACT_SHOT))
518 goto drop;
519
520 switch (skb->protocol) {
521 case cpu_to_be16(ETH_P_IP):
522 if (!tcf_csum_ipv4(skb, update_flags))
523 goto drop;
524 break;
525 case cpu_to_be16(ETH_P_IPV6):
526 if (!tcf_csum_ipv6(skb, update_flags))
527 goto drop;
528 break;
529 }
530
531 return action;
532
533drop:
534 spin_lock(&p->tcf_lock);
535 p->tcf_qstats.drops++;
536 spin_unlock(&p->tcf_lock);
537 return TC_ACT_SHOT;
538}
539
540static int tcf_csum_dump(struct sk_buff *skb,
541 struct tc_action *a, int bind, int ref)
542{
543 unsigned char *b = skb_tail_pointer(skb);
544 struct tcf_csum *p = a->priv;
545 struct tc_csum opt = {
546 .update_flags = p->update_flags,
547 .index = p->tcf_index,
548 .action = p->tcf_action,
549 .refcnt = p->tcf_refcnt - ref,
550 .bindcnt = p->tcf_bindcnt - bind,
551 };
552 struct tcf_t t;
553
554 NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt);
555 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
556 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
557 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
558 NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t);
559
560 return skb->len;
561
562nla_put_failure:
563 nlmsg_trim(skb, b);
564 return -1;
565}
566
567static struct tc_action_ops act_csum_ops = {
568 .kind = "csum",
569 .hinfo = &csum_hash_info,
570 .type = TCA_ACT_CSUM,
571 .capab = TCA_CAP_NONE,
572 .owner = THIS_MODULE,
573 .act = tcf_csum,
574 .dump = tcf_csum_dump,
575 .cleanup = tcf_csum_cleanup,
576 .lookup = tcf_hash_search,
577 .init = tcf_csum_init,
578 .walk = tcf_generic_walker
579};
580
581MODULE_DESCRIPTION("Checksum updating actions");
582MODULE_LICENSE("GPL");
583
584static int __init csum_init_module(void)
585{
586 return tcf_register_action(&act_csum_ops);
587}
588
589static void __exit csum_cleanup_module(void)
590{
591 tcf_unregister_action(&act_csum_ops);
592}
593
594module_init(csum_init_module);
595module_exit(csum_cleanup_module);
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index e17096e3913c..5b271a18bc3a 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -111,44 +111,41 @@ static u32 flow_get_proto(struct sk_buff *skb)
111 } 111 }
112} 112}
113 113
114static int has_ports(u8 protocol)
115{
116 switch (protocol) {
117 case IPPROTO_TCP:
118 case IPPROTO_UDP:
119 case IPPROTO_UDPLITE:
120 case IPPROTO_SCTP:
121 case IPPROTO_DCCP:
122 case IPPROTO_ESP:
123 return 1;
124 default:
125 return 0;
126 }
127}
128
129static u32 flow_get_proto_src(struct sk_buff *skb) 114static u32 flow_get_proto_src(struct sk_buff *skb)
130{ 115{
131 switch (skb->protocol) { 116 switch (skb->protocol) {
132 case htons(ETH_P_IP): { 117 case htons(ETH_P_IP): {
133 struct iphdr *iph; 118 struct iphdr *iph;
119 int poff;
134 120
135 if (!pskb_network_may_pull(skb, sizeof(*iph))) 121 if (!pskb_network_may_pull(skb, sizeof(*iph)))
136 break; 122 break;
137 iph = ip_hdr(skb); 123 iph = ip_hdr(skb);
138 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 124 if (iph->frag_off & htons(IP_MF|IP_OFFSET))
139 has_ports(iph->protocol) && 125 break;
140 pskb_network_may_pull(skb, iph->ihl * 4 + 2)) 126 poff = proto_ports_offset(iph->protocol);
141 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); 127 if (poff >= 0 &&
128 pskb_network_may_pull(skb, iph->ihl * 4 + 2 + poff)) {
129 iph = ip_hdr(skb);
130 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
131 poff));
132 }
142 break; 133 break;
143 } 134 }
144 case htons(ETH_P_IPV6): { 135 case htons(ETH_P_IPV6): {
145 struct ipv6hdr *iph; 136 struct ipv6hdr *iph;
137 int poff;
146 138
147 if (!pskb_network_may_pull(skb, sizeof(*iph) + 2)) 139 if (!pskb_network_may_pull(skb, sizeof(*iph)))
148 break; 140 break;
149 iph = ipv6_hdr(skb); 141 iph = ipv6_hdr(skb);
150 if (has_ports(iph->nexthdr)) 142 poff = proto_ports_offset(iph->nexthdr);
151 return ntohs(*(__be16 *)&iph[1]); 143 if (poff >= 0 &&
144 pskb_network_may_pull(skb, sizeof(*iph) + poff + 2)) {
145 iph = ipv6_hdr(skb);
146 return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
147 poff));
148 }
152 break; 149 break;
153 } 150 }
154 } 151 }
@@ -161,24 +158,36 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
161 switch (skb->protocol) { 158 switch (skb->protocol) {
162 case htons(ETH_P_IP): { 159 case htons(ETH_P_IP): {
163 struct iphdr *iph; 160 struct iphdr *iph;
161 int poff;
164 162
165 if (!pskb_network_may_pull(skb, sizeof(*iph))) 163 if (!pskb_network_may_pull(skb, sizeof(*iph)))
166 break; 164 break;
167 iph = ip_hdr(skb); 165 iph = ip_hdr(skb);
168 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 166 if (iph->frag_off & htons(IP_MF|IP_OFFSET))
169 has_ports(iph->protocol) && 167 break;
170 pskb_network_may_pull(skb, iph->ihl * 4 + 4)) 168 poff = proto_ports_offset(iph->protocol);
171 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); 169 if (poff >= 0 &&
170 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
171 iph = ip_hdr(skb);
172 return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
173 2 + poff));
174 }
172 break; 175 break;
173 } 176 }
174 case htons(ETH_P_IPV6): { 177 case htons(ETH_P_IPV6): {
175 struct ipv6hdr *iph; 178 struct ipv6hdr *iph;
179 int poff;
176 180
177 if (!pskb_network_may_pull(skb, sizeof(*iph) + 4)) 181 if (!pskb_network_may_pull(skb, sizeof(*iph)))
178 break; 182 break;
179 iph = ipv6_hdr(skb); 183 iph = ipv6_hdr(skb);
180 if (has_ports(iph->nexthdr)) 184 poff = proto_ports_offset(iph->nexthdr);
181 return ntohs(*(__be16 *)((void *)&iph[1] + 2)); 185 if (poff >= 0 &&
186 pskb_network_may_pull(skb, sizeof(*iph) + poff + 4)) {
187 iph = ipv6_hdr(skb);
188 return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
189 poff + 2));
190 }
182 break; 191 break;
183 } 192 }
184 } 193 }
@@ -297,6 +306,11 @@ static u32 flow_get_vlan_tag(const struct sk_buff *skb)
297 return tag & VLAN_VID_MASK; 306 return tag & VLAN_VID_MASK;
298} 307}
299 308
309static u32 flow_get_rxhash(struct sk_buff *skb)
310{
311 return skb_get_rxhash(skb);
312}
313
300static u32 flow_key_get(struct sk_buff *skb, int key) 314static u32 flow_key_get(struct sk_buff *skb, int key)
301{ 315{
302 switch (key) { 316 switch (key) {
@@ -334,6 +348,8 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
334 return flow_get_skgid(skb); 348 return flow_get_skgid(skb);
335 case FLOW_KEY_VLAN_TAG: 349 case FLOW_KEY_VLAN_TAG:
336 return flow_get_vlan_tag(skb); 350 return flow_get_vlan_tag(skb);
351 case FLOW_KEY_RXHASH:
352 return flow_get_rxhash(skb);
337 default: 353 default:
338 WARN_ON(1); 354 WARN_ON(1);
339 return 0; 355 return 0;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 3bcac8aa333c..34da5e29ea1a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -223,6 +223,11 @@ META_COLLECTOR(int_maclen)
223 dst->value = skb->mac_len; 223 dst->value = skb->mac_len;
224} 224}
225 225
226META_COLLECTOR(int_rxhash)
227{
228 dst->value = skb_get_rxhash(skb);
229}
230
226/************************************************************************** 231/**************************************************************************
227 * Netfilter 232 * Netfilter
228 **************************************************************************/ 233 **************************************************************************/
@@ -541,6 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
541 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off), 546 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
542 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend), 547 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
543 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag), 548 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
549 [META_ID(RXHASH)] = META_FUNC(int_rxhash),
544 } 550 }
545}; 551};
546 552
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 408eea7086aa..6fb3d41c0e41 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -360,7 +360,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
360 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16); 360 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
361 } 361 }
362 362
363 if (!s || tsize != s->tsize || (!tab && tsize > 0)) 363 if (tsize != s->tsize || (!tab && tsize > 0))
364 return ERR_PTR(-EINVAL); 364 return ERR_PTR(-EINVAL);
365 365
366 spin_lock(&qdisc_stab_lock); 366 spin_lock(&qdisc_stab_lock);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 201cbac2b32c..3cf478d012dd 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -123,40 +123,39 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
123 case htons(ETH_P_IP): 123 case htons(ETH_P_IP):
124 { 124 {
125 const struct iphdr *iph; 125 const struct iphdr *iph;
126 int poff;
126 127
127 if (!pskb_network_may_pull(skb, sizeof(*iph))) 128 if (!pskb_network_may_pull(skb, sizeof(*iph)))
128 goto err; 129 goto err;
129 iph = ip_hdr(skb); 130 iph = ip_hdr(skb);
130 h = (__force u32)iph->daddr; 131 h = (__force u32)iph->daddr;
131 h2 = (__force u32)iph->saddr ^ iph->protocol; 132 h2 = (__force u32)iph->saddr ^ iph->protocol;
132 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 133 if (iph->frag_off & htons(IP_MF|IP_OFFSET))
133 (iph->protocol == IPPROTO_TCP || 134 break;
134 iph->protocol == IPPROTO_UDP || 135 poff = proto_ports_offset(iph->protocol);
135 iph->protocol == IPPROTO_UDPLITE || 136 if (poff >= 0 &&
136 iph->protocol == IPPROTO_SCTP || 137 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
137 iph->protocol == IPPROTO_DCCP || 138 iph = ip_hdr(skb);
138 iph->protocol == IPPROTO_ESP) && 139 h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
139 pskb_network_may_pull(skb, iph->ihl * 4 + 4)) 140 }
140 h2 ^= *(((u32*)iph) + iph->ihl);
141 break; 141 break;
142 } 142 }
143 case htons(ETH_P_IPV6): 143 case htons(ETH_P_IPV6):
144 { 144 {
145 struct ipv6hdr *iph; 145 struct ipv6hdr *iph;
146 int poff;
146 147
147 if (!pskb_network_may_pull(skb, sizeof(*iph))) 148 if (!pskb_network_may_pull(skb, sizeof(*iph)))
148 goto err; 149 goto err;
149 iph = ipv6_hdr(skb); 150 iph = ipv6_hdr(skb);
150 h = (__force u32)iph->daddr.s6_addr32[3]; 151 h = (__force u32)iph->daddr.s6_addr32[3];
151 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; 152 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
152 if ((iph->nexthdr == IPPROTO_TCP || 153 poff = proto_ports_offset(iph->nexthdr);
153 iph->nexthdr == IPPROTO_UDP || 154 if (poff >= 0 &&
154 iph->nexthdr == IPPROTO_UDPLITE || 155 pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
155 iph->nexthdr == IPPROTO_SCTP || 156 iph = ipv6_hdr(skb);
156 iph->nexthdr == IPPROTO_DCCP || 157 h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
157 iph->nexthdr == IPPROTO_ESP) && 158 }
158 pskb_network_may_pull(skb, sizeof(*iph) + 4))
159 h2 ^= *(u32*)&iph[1];
160 break; 159 break;
161 } 160 }
162 default: 161 default:
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0b85e5256434..5f1fb8bd862d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -48,6 +48,8 @@
48 * be incorporated into the next SCTP release. 48 * be incorporated into the next SCTP release.
49 */ 49 */
50 50
51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
51#include <linux/types.h> 53#include <linux/types.h>
52#include <linux/fcntl.h> 54#include <linux/fcntl.h>
53#include <linux/poll.h> 55#include <linux/poll.h>
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 476caaf100ed..6c8556459a75 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -37,6 +37,8 @@
37 * be incorporated into the next SCTP release. 37 * be incorporated into the next SCTP release.
38 */ 38 */
39 39
40#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41
40#include <linux/types.h> 42#include <linux/types.h>
41#include <linux/kernel.h> 43#include <linux/kernel.h>
42#include <linux/net.h> 44#include <linux/net.h>
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index ccb6dc48d15b..397296fb156f 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -43,6 +43,8 @@
43 * be incorporated into the next SCTP release. 43 * be incorporated into the next SCTP release.
44 */ 44 */
45 45
46#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
47
46#include <net/sctp/sctp.h> 48#include <net/sctp/sctp.h>
47#include <net/sctp/sm.h> 49#include <net/sctp/sm.h>
48#include <linux/interrupt.h> 50#include <linux/interrupt.h>
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 732689140fb8..95e0c8eda1a0 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -47,6 +47,8 @@
47 * be incorporated into the next SCTP release. 47 * be incorporated into the next SCTP release.
48 */ 48 */
49 49
50#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51
50#include <linux/module.h> 52#include <linux/module.h>
51#include <linux/errno.h> 53#include <linux/errno.h>
52#include <linux/types.h> 54#include <linux/types.h>
@@ -336,7 +338,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk,
336 memcpy(saddr, baddr, sizeof(union sctp_addr)); 338 memcpy(saddr, baddr, sizeof(union sctp_addr));
337 SCTP_DEBUG_PRINTK("saddr: %pI6\n", &saddr->v6.sin6_addr); 339 SCTP_DEBUG_PRINTK("saddr: %pI6\n", &saddr->v6.sin6_addr);
338 } else { 340 } else {
339 printk(KERN_ERR "%s: asoc:%p Could not find a valid source " 341 pr_err("%s: asoc:%p Could not find a valid source "
340 "address for the dest:%pI6\n", 342 "address for the dest:%pI6\n",
341 __func__, asoc, &daddr->v6.sin6_addr); 343 __func__, asoc, &daddr->v6.sin6_addr);
342 } 344 }
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index f73ec0ea93ba..8ef8e7d9eb61 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -38,6 +38,8 @@
38 * be incorporated into the next SCTP release. 38 * be incorporated into the next SCTP release.
39 */ 39 */
40 40
41#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
42
41#include <linux/kernel.h> 43#include <linux/kernel.h>
42#include <net/sctp/sctp.h> 44#include <net/sctp/sctp.h>
43 45
@@ -134,8 +136,7 @@ void sctp_dbg_objcnt_init(void)
134 ent = proc_create("sctp_dbg_objcnt", 0, 136 ent = proc_create("sctp_dbg_objcnt", 0,
135 proc_net_sctp, &sctp_objcnt_ops); 137 proc_net_sctp, &sctp_objcnt_ops);
136 if (!ent) 138 if (!ent)
137 printk(KERN_WARNING 139 pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n");
138 "sctp_dbg_objcnt: Unable to create /proc entry.\n");
139} 140}
140 141
141/* Cleanup the objcount entry in the proc filesystem. */ 142/* Cleanup the objcount entry in the proc filesystem. */
diff --git a/net/sctp/output.c b/net/sctp/output.c
index bcc4590ccaf2..60600d337a3a 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -41,6 +41,8 @@
41 * be incorporated into the next SCTP release. 41 * be incorporated into the next SCTP release.
42 */ 42 */
43 43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
44#include <linux/types.h> 46#include <linux/types.h>
45#include <linux/kernel.h> 47#include <linux/kernel.h>
46#include <linux/wait.h> 48#include <linux/wait.h>
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c04b2eb59186..8c6d379b4bb6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -46,6 +46,8 @@
46 * be incorporated into the next SCTP release. 46 * be incorporated into the next SCTP release.
47 */ 47 */
48 48
49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
49#include <linux/types.h> 51#include <linux/types.h>
50#include <linux/list.h> /* For struct list_head */ 52#include <linux/list.h> /* For struct list_head */
51#include <linux/socket.h> 53#include <linux/socket.h>
@@ -1463,23 +1465,23 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1463 /* Display the end of the 1465 /* Display the end of the
1464 * current range. 1466 * current range.
1465 */ 1467 */
1466 SCTP_DEBUG_PRINTK("-%08x", 1468 SCTP_DEBUG_PRINTK_CONT("-%08x",
1467 dbg_last_ack_tsn); 1469 dbg_last_ack_tsn);
1468 } 1470 }
1469 1471
1470 /* Start a new range. */ 1472 /* Start a new range. */
1471 SCTP_DEBUG_PRINTK(",%08x", tsn); 1473 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1472 dbg_ack_tsn = tsn; 1474 dbg_ack_tsn = tsn;
1473 break; 1475 break;
1474 1476
1475 case 1: /* The last TSN was NOT ACKed. */ 1477 case 1: /* The last TSN was NOT ACKed. */
1476 if (dbg_last_kept_tsn != dbg_kept_tsn) { 1478 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1477 /* Display the end of current range. */ 1479 /* Display the end of current range. */
1478 SCTP_DEBUG_PRINTK("-%08x", 1480 SCTP_DEBUG_PRINTK_CONT("-%08x",
1479 dbg_last_kept_tsn); 1481 dbg_last_kept_tsn);
1480 } 1482 }
1481 1483
1482 SCTP_DEBUG_PRINTK("\n"); 1484 SCTP_DEBUG_PRINTK_CONT("\n");
1483 1485
1484 /* FALL THROUGH... */ 1486 /* FALL THROUGH... */
1485 default: 1487 default:
@@ -1526,18 +1528,18 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1526 break; 1528 break;
1527 1529
1528 if (dbg_last_kept_tsn != dbg_kept_tsn) 1530 if (dbg_last_kept_tsn != dbg_kept_tsn)
1529 SCTP_DEBUG_PRINTK("-%08x", 1531 SCTP_DEBUG_PRINTK_CONT("-%08x",
1530 dbg_last_kept_tsn); 1532 dbg_last_kept_tsn);
1531 1533
1532 SCTP_DEBUG_PRINTK(",%08x", tsn); 1534 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1533 dbg_kept_tsn = tsn; 1535 dbg_kept_tsn = tsn;
1534 break; 1536 break;
1535 1537
1536 case 0: 1538 case 0:
1537 if (dbg_last_ack_tsn != dbg_ack_tsn) 1539 if (dbg_last_ack_tsn != dbg_ack_tsn)
1538 SCTP_DEBUG_PRINTK("-%08x", 1540 SCTP_DEBUG_PRINTK_CONT("-%08x",
1539 dbg_last_ack_tsn); 1541 dbg_last_ack_tsn);
1540 SCTP_DEBUG_PRINTK("\n"); 1542 SCTP_DEBUG_PRINTK_CONT("\n");
1541 1543
1542 /* FALL THROUGH... */ 1544 /* FALL THROUGH... */
1543 default: 1545 default:
@@ -1556,17 +1558,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1556 switch (dbg_prt_state) { 1558 switch (dbg_prt_state) {
1557 case 0: 1559 case 0:
1558 if (dbg_last_ack_tsn != dbg_ack_tsn) { 1560 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1559 SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_ack_tsn); 1561 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn);
1560 } else { 1562 } else {
1561 SCTP_DEBUG_PRINTK("\n"); 1563 SCTP_DEBUG_PRINTK_CONT("\n");
1562 } 1564 }
1563 break; 1565 break;
1564 1566
1565 case 1: 1567 case 1:
1566 if (dbg_last_kept_tsn != dbg_kept_tsn) { 1568 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1567 SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_kept_tsn); 1569 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn);
1568 } else { 1570 } else {
1569 SCTP_DEBUG_PRINTK("\n"); 1571 SCTP_DEBUG_PRINTK_CONT("\n");
1570 } 1572 }
1571 } 1573 }
1572#endif /* SCTP_DEBUG */ 1574#endif /* SCTP_DEBUG */
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index db3a42b8b349..2e63e9dc010e 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -22,6 +22,8 @@
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
25#include <linux/kernel.h> 27#include <linux/kernel.h>
26#include <linux/kprobes.h> 28#include <linux/kprobes.h>
27#include <linux/socket.h> 29#include <linux/socket.h>
@@ -192,7 +194,7 @@ static __init int sctpprobe_init(void)
192 if (ret) 194 if (ret)
193 goto remove_proc; 195 goto remove_proc;
194 196
195 pr_info("SCTP probe registered (port=%d)\n", port); 197 pr_info("probe registered (port=%d)\n", port);
196 198
197 return 0; 199 return 0;
198 200
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 5027b83f1cc0..1ef29c74d85e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -46,6 +46,8 @@
46 * be incorporated into the next SCTP release. 46 * be incorporated into the next SCTP release.
47 */ 47 */
48 48
49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
49#include <linux/module.h> 51#include <linux/module.h>
50#include <linux/init.h> 52#include <linux/init.h>
51#include <linux/netdevice.h> 53#include <linux/netdevice.h>
@@ -707,8 +709,7 @@ static int sctp_ctl_sock_init(void)
707 &init_net); 709 &init_net);
708 710
709 if (err < 0) { 711 if (err < 0) {
710 printk(KERN_ERR 712 pr_err("Failed to create the SCTP control socket\n");
711 "SCTP: Failed to create the SCTP control socket.\n");
712 return err; 713 return err;
713 } 714 }
714 return 0; 715 return 0;
@@ -798,7 +799,7 @@ static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len)
798static int sctp_inet_af_supported(sa_family_t family, struct sctp_sock *sp) 799static int sctp_inet_af_supported(sa_family_t family, struct sctp_sock *sp)
799{ 800{
800 /* PF_INET only supports AF_INET addresses. */ 801 /* PF_INET only supports AF_INET addresses. */
801 return (AF_INET == family); 802 return AF_INET == family;
802} 803}
803 804
804/* Address matching with wildcards allowed. */ 805/* Address matching with wildcards allowed. */
@@ -1206,7 +1207,7 @@ SCTP_STATIC __init int sctp_init(void)
1206 __get_free_pages(GFP_ATOMIC, order); 1207 __get_free_pages(GFP_ATOMIC, order);
1207 } while (!sctp_assoc_hashtable && --order > 0); 1208 } while (!sctp_assoc_hashtable && --order > 0);
1208 if (!sctp_assoc_hashtable) { 1209 if (!sctp_assoc_hashtable) {
1209 printk(KERN_ERR "SCTP: Failed association hash alloc.\n"); 1210 pr_err("Failed association hash alloc\n");
1210 status = -ENOMEM; 1211 status = -ENOMEM;
1211 goto err_ahash_alloc; 1212 goto err_ahash_alloc;
1212 } 1213 }
@@ -1220,7 +1221,7 @@ SCTP_STATIC __init int sctp_init(void)
1220 sctp_ep_hashtable = (struct sctp_hashbucket *) 1221 sctp_ep_hashtable = (struct sctp_hashbucket *)
1221 kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL); 1222 kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL);
1222 if (!sctp_ep_hashtable) { 1223 if (!sctp_ep_hashtable) {
1223 printk(KERN_ERR "SCTP: Failed endpoint_hash alloc.\n"); 1224 pr_err("Failed endpoint_hash alloc\n");
1224 status = -ENOMEM; 1225 status = -ENOMEM;
1225 goto err_ehash_alloc; 1226 goto err_ehash_alloc;
1226 } 1227 }
@@ -1239,7 +1240,7 @@ SCTP_STATIC __init int sctp_init(void)
1239 __get_free_pages(GFP_ATOMIC, order); 1240 __get_free_pages(GFP_ATOMIC, order);
1240 } while (!sctp_port_hashtable && --order > 0); 1241 } while (!sctp_port_hashtable && --order > 0);
1241 if (!sctp_port_hashtable) { 1242 if (!sctp_port_hashtable) {
1242 printk(KERN_ERR "SCTP: Failed bind hash alloc."); 1243 pr_err("Failed bind hash alloc\n");
1243 status = -ENOMEM; 1244 status = -ENOMEM;
1244 goto err_bhash_alloc; 1245 goto err_bhash_alloc;
1245 } 1246 }
@@ -1248,8 +1249,7 @@ SCTP_STATIC __init int sctp_init(void)
1248 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); 1249 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
1249 } 1250 }
1250 1251
1251 printk(KERN_INFO "SCTP: Hash tables configured " 1252 pr_info("Hash tables configured (established %d bind %d)\n",
1252 "(established %d bind %d)\n",
1253 sctp_assoc_hashsize, sctp_port_hashsize); 1253 sctp_assoc_hashsize, sctp_port_hashsize);
1254 1254
1255 /* Disable ADDIP by default. */ 1255 /* Disable ADDIP by default. */
@@ -1290,8 +1290,7 @@ SCTP_STATIC __init int sctp_init(void)
1290 1290
1291 /* Initialize the control inode/socket for handling OOTB packets. */ 1291 /* Initialize the control inode/socket for handling OOTB packets. */
1292 if ((status = sctp_ctl_sock_init())) { 1292 if ((status = sctp_ctl_sock_init())) {
1293 printk (KERN_ERR 1293 pr_err("Failed to initialize the SCTP control sock\n");
1294 "SCTP: Failed to initialize the SCTP control sock.\n");
1295 goto err_ctl_sock_init; 1294 goto err_ctl_sock_init;
1296 } 1295 }
1297 1296
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 246f92924658..2cc46f0962ca 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -50,6 +50,8 @@
50 * be incorporated into the next SCTP release. 50 * be incorporated into the next SCTP release.
51 */ 51 */
52 52
53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
53#include <linux/types.h> 55#include <linux/types.h>
54#include <linux/kernel.h> 56#include <linux/kernel.h>
55#include <linux/ip.h> 57#include <linux/ip.h>
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index f5e5e27cac5e..b21b218d564f 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -47,6 +47,8 @@
47 * be incorporated into the next SCTP release. 47 * be incorporated into the next SCTP release.
48 */ 48 */
49 49
50#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51
50#include <linux/skbuff.h> 52#include <linux/skbuff.h>
51#include <linux/types.h> 53#include <linux/types.h>
52#include <linux/socket.h> 54#include <linux/socket.h>
@@ -1146,26 +1148,23 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
1146 1148
1147 case SCTP_DISPOSITION_VIOLATION: 1149 case SCTP_DISPOSITION_VIOLATION:
1148 if (net_ratelimit()) 1150 if (net_ratelimit())
1149 printk(KERN_ERR "sctp protocol violation state %d " 1151 pr_err("protocol violation state %d chunkid %d\n",
1150 "chunkid %d\n", state, subtype.chunk); 1152 state, subtype.chunk);
1151 break; 1153 break;
1152 1154
1153 case SCTP_DISPOSITION_NOT_IMPL: 1155 case SCTP_DISPOSITION_NOT_IMPL:
1154 printk(KERN_WARNING "sctp unimplemented feature in state %d, " 1156 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1155 "event_type %d, event_id %d\n", 1157 state, event_type, subtype.chunk);
1156 state, event_type, subtype.chunk);
1157 break; 1158 break;
1158 1159
1159 case SCTP_DISPOSITION_BUG: 1160 case SCTP_DISPOSITION_BUG:
1160 printk(KERN_ERR "sctp bug in state %d, " 1161 pr_err("bug in state %d, event_type %d, event_id %d\n",
1161 "event_type %d, event_id %d\n",
1162 state, event_type, subtype.chunk); 1162 state, event_type, subtype.chunk);
1163 BUG(); 1163 BUG();
1164 break; 1164 break;
1165 1165
1166 default: 1166 default:
1167 printk(KERN_ERR "sctp impossible disposition %d " 1167 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1168 "in state %d, event_type %d, event_id %d\n",
1169 status, state, event_type, subtype.chunk); 1168 status, state, event_type, subtype.chunk);
1170 BUG(); 1169 BUG();
1171 break; 1170 break;
@@ -1679,8 +1678,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1679 sctp_cmd_send_asconf(asoc); 1678 sctp_cmd_send_asconf(asoc);
1680 break; 1679 break;
1681 default: 1680 default:
1682 printk(KERN_WARNING "Impossible command: %u, %p\n", 1681 pr_warn("Impossible command: %u, %p\n",
1683 cmd->verb, cmd->obj.ptr); 1682 cmd->verb, cmd->obj.ptr);
1684 break; 1683 break;
1685 } 1684 }
1686 1685
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index d344dc481ccc..4b4eb7c96bbd 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -50,6 +50,8 @@
50 * be incorporated into the next SCTP release. 50 * be incorporated into the next SCTP release.
51 */ 51 */
52 52
53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
53#include <linux/types.h> 55#include <linux/types.h>
54#include <linux/kernel.h> 56#include <linux/kernel.h>
55#include <linux/ip.h> 57#include <linux/ip.h>
@@ -1138,18 +1140,16 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
1138 if (unlikely(!link)) { 1140 if (unlikely(!link)) {
1139 if (from_addr.sa.sa_family == AF_INET6) { 1141 if (from_addr.sa.sa_family == AF_INET6) {
1140 if (net_ratelimit()) 1142 if (net_ratelimit())
1141 printk(KERN_WARNING 1143 pr_warn("%s association %p could not find address %pI6\n",
1142 "%s association %p could not find address %pI6\n", 1144 __func__,
1143 __func__, 1145 asoc,
1144 asoc, 1146 &from_addr.v6.sin6_addr);
1145 &from_addr.v6.sin6_addr);
1146 } else { 1147 } else {
1147 if (net_ratelimit()) 1148 if (net_ratelimit())
1148 printk(KERN_WARNING 1149 pr_warn("%s association %p could not find address %pI4\n",
1149 "%s association %p could not find address %pI4\n", 1150 __func__,
1150 __func__, 1151 asoc,
1151 asoc, 1152 &from_addr.v4.sin_addr.s_addr);
1152 &from_addr.v4.sin_addr.s_addr);
1153 } 1153 }
1154 return SCTP_DISPOSITION_DISCARD; 1154 return SCTP_DISPOSITION_DISCARD;
1155 } 1155 }
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 6d9b3aafcc5d..546d4387fb3c 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -46,6 +46,8 @@
46 * be incorporated into the next SCTP release. 46 * be incorporated into the next SCTP release.
47 */ 47 */
48 48
49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
49#include <linux/skbuff.h> 51#include <linux/skbuff.h>
50#include <net/sctp/sctp.h> 52#include <net/sctp/sctp.h>
51#include <net/sctp/sm.h> 53#include <net/sctp/sm.h>
@@ -66,15 +68,19 @@ static const sctp_sm_table_entry_t bug = {
66 .name = "sctp_sf_bug" 68 .name = "sctp_sf_bug"
67}; 69};
68 70
69#define DO_LOOKUP(_max, _type, _table) \ 71#define DO_LOOKUP(_max, _type, _table) \
70 if ((event_subtype._type > (_max))) { \ 72({ \
71 printk(KERN_WARNING \ 73 const sctp_sm_table_entry_t *rtn; \
72 "sctp table %p possible attack:" \ 74 \
73 " event %d exceeds max %d\n", \ 75 if ((event_subtype._type > (_max))) { \
74 _table, event_subtype._type, _max); \ 76 pr_warn("table %p possible attack: event %d exceeds max %d\n", \
75 return &bug; \ 77 _table, event_subtype._type, _max); \
76 } \ 78 rtn = &bug; \
77 return &_table[event_subtype._type][(int)state]; 79 } else \
80 rtn = &_table[event_subtype._type][(int)state]; \
81 \
82 rtn; \
83})
78 84
79const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, 85const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
80 sctp_state_t state, 86 sctp_state_t state,
@@ -83,21 +89,15 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
83 switch (event_type) { 89 switch (event_type) {
84 case SCTP_EVENT_T_CHUNK: 90 case SCTP_EVENT_T_CHUNK:
85 return sctp_chunk_event_lookup(event_subtype.chunk, state); 91 return sctp_chunk_event_lookup(event_subtype.chunk, state);
86 break;
87 case SCTP_EVENT_T_TIMEOUT: 92 case SCTP_EVENT_T_TIMEOUT:
88 DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout, 93 return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
89 timeout_event_table); 94 timeout_event_table);
90 break;
91
92 case SCTP_EVENT_T_OTHER: 95 case SCTP_EVENT_T_OTHER:
93 DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other, other_event_table); 96 return DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other,
94 break; 97 other_event_table);
95
96 case SCTP_EVENT_T_PRIMITIVE: 98 case SCTP_EVENT_T_PRIMITIVE:
97 DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive, 99 return DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive,
98 primitive_event_table); 100 primitive_event_table);
99 break;
100
101 default: 101 default:
102 /* Yikes! We got an illegal event type. */ 102 /* Yikes! We got an illegal event type. */
103 return &bug; 103 return &bug;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ca44917872d2..535659fdbaa1 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -57,6 +57,8 @@
57 * be incorporated into the next SCTP release. 57 * be incorporated into the next SCTP release.
58 */ 58 */
59 59
60#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61
60#include <linux/types.h> 62#include <linux/types.h>
61#include <linux/kernel.h> 63#include <linux/kernel.h>
62#include <linux/wait.h> 64#include <linux/wait.h>
@@ -2458,9 +2460,8 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
2458 if (params.sack_delay == 0 && params.sack_freq == 0) 2460 if (params.sack_delay == 0 && params.sack_freq == 0)
2459 return 0; 2461 return 0;
2460 } else if (optlen == sizeof(struct sctp_assoc_value)) { 2462 } else if (optlen == sizeof(struct sctp_assoc_value)) {
2461 printk(KERN_WARNING "SCTP: Use of struct sctp_assoc_value " 2463 pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
2462 "in delayed_ack socket option deprecated\n"); 2464 pr_warn("Use struct sctp_sack_info instead\n");
2463 printk(KERN_WARNING "SCTP: Use struct sctp_sack_info instead\n");
2464 if (copy_from_user(&params, optval, optlen)) 2465 if (copy_from_user(&params, optval, optlen))
2465 return -EFAULT; 2466 return -EFAULT;
2466 2467
@@ -2868,10 +2869,8 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
2868 int val; 2869 int val;
2869 2870
2870 if (optlen == sizeof(int)) { 2871 if (optlen == sizeof(int)) {
2871 printk(KERN_WARNING 2872 pr_warn("Use of int in maxseg socket option deprecated\n");
2872 "SCTP: Use of int in maxseg socket option deprecated\n"); 2873 pr_warn("Use struct sctp_assoc_value instead\n");
2873 printk(KERN_WARNING
2874 "SCTP: Use struct sctp_assoc_value instead\n");
2875 if (copy_from_user(&val, optval, optlen)) 2874 if (copy_from_user(&val, optval, optlen))
2876 return -EFAULT; 2875 return -EFAULT;
2877 params.assoc_id = 0; 2876 params.assoc_id = 0;
@@ -3121,10 +3120,8 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
3121 int assoc_id = 0; 3120 int assoc_id = 0;
3122 3121
3123 if (optlen == sizeof(int)) { 3122 if (optlen == sizeof(int)) {
3124 printk(KERN_WARNING 3123 pr_warn("Use of int in max_burst socket option deprecated\n");
3125 "SCTP: Use of int in max_burst socket option deprecated\n"); 3124 pr_warn("Use struct sctp_assoc_value instead\n");
3126 printk(KERN_WARNING
3127 "SCTP: Use struct sctp_assoc_value instead\n");
3128 if (copy_from_user(&val, optval, optlen)) 3125 if (copy_from_user(&val, optval, optlen))
3129 return -EFAULT; 3126 return -EFAULT;
3130 } else if (optlen == sizeof(struct sctp_assoc_value)) { 3127 } else if (optlen == sizeof(struct sctp_assoc_value)) {
@@ -3595,7 +3592,40 @@ out:
3595/* The SCTP ioctl handler. */ 3592/* The SCTP ioctl handler. */
3596SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) 3593SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
3597{ 3594{
3598 return -ENOIOCTLCMD; 3595 int rc = -ENOTCONN;
3596
3597 sctp_lock_sock(sk);
3598
3599 /*
3600 * SEQPACKET-style sockets in LISTENING state are valid, for
3601 * SCTP, so only discard TCP-style sockets in LISTENING state.
3602 */
3603 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
3604 goto out;
3605
3606 switch (cmd) {
3607 case SIOCINQ: {
3608 struct sk_buff *skb;
3609 unsigned int amount = 0;
3610
3611 skb = skb_peek(&sk->sk_receive_queue);
3612 if (skb != NULL) {
3613 /*
3614 * We will only return the amount of this packet since
3615 * that is all that will be read.
3616 */
3617 amount = skb->len;
3618 }
3619 rc = put_user(amount, (int __user *)arg);
3620 }
3621 break;
3622 default:
3623 rc = -ENOIOCTLCMD;
3624 break;
3625 }
3626out:
3627 sctp_release_sock(sk);
3628 return rc;
3599} 3629}
3600 3630
3601/* This is the function which gets called during socket creation to 3631/* This is the function which gets called during socket creation to
@@ -3854,7 +3884,7 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
3854 } 3884 }
3855 3885
3856out: 3886out:
3857 return (retval); 3887 return retval;
3858} 3888}
3859 3889
3860 3890
@@ -3910,7 +3940,7 @@ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
3910 } 3940 }
3911 3941
3912out: 3942out:
3913 return (retval); 3943 return retval;
3914} 3944}
3915 3945
3916/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) 3946/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
@@ -4281,9 +4311,8 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
4281 if (copy_from_user(&params, optval, len)) 4311 if (copy_from_user(&params, optval, len))
4282 return -EFAULT; 4312 return -EFAULT;
4283 } else if (len == sizeof(struct sctp_assoc_value)) { 4313 } else if (len == sizeof(struct sctp_assoc_value)) {
4284 printk(KERN_WARNING "SCTP: Use of struct sctp_assoc_value " 4314 pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
4285 "in delayed_ack socket option deprecated\n"); 4315 pr_warn("Use struct sctp_sack_info instead\n");
4286 printk(KERN_WARNING "SCTP: Use struct sctp_sack_info instead\n");
4287 if (copy_from_user(&params, optval, len)) 4316 if (copy_from_user(&params, optval, len))
4288 return -EFAULT; 4317 return -EFAULT;
4289 } else 4318 } else
@@ -4929,10 +4958,8 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
4929 struct sctp_association *asoc; 4958 struct sctp_association *asoc;
4930 4959
4931 if (len == sizeof(int)) { 4960 if (len == sizeof(int)) {
4932 printk(KERN_WARNING 4961 pr_warn("Use of int in maxseg socket option deprecated\n");
4933 "SCTP: Use of int in maxseg socket option deprecated\n"); 4962 pr_warn("Use struct sctp_assoc_value instead\n");
4934 printk(KERN_WARNING
4935 "SCTP: Use struct sctp_assoc_value instead\n");
4936 params.assoc_id = 0; 4963 params.assoc_id = 0;
4937 } else if (len >= sizeof(struct sctp_assoc_value)) { 4964 } else if (len >= sizeof(struct sctp_assoc_value)) {
4938 len = sizeof(struct sctp_assoc_value); 4965 len = sizeof(struct sctp_assoc_value);
@@ -5023,10 +5050,8 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
5023 struct sctp_association *asoc; 5050 struct sctp_association *asoc;
5024 5051
5025 if (len == sizeof(int)) { 5052 if (len == sizeof(int)) {
5026 printk(KERN_WARNING 5053 pr_warn("Use of int in max_burst socket option deprecated\n");
5027 "SCTP: Use of int in max_burst socket option deprecated\n"); 5054 pr_warn("Use struct sctp_assoc_value instead\n");
5028 printk(KERN_WARNING
5029 "SCTP: Use struct sctp_assoc_value instead\n");
5030 params.assoc_id = 0; 5055 params.assoc_id = 0;
5031 } else if (len >= sizeof(struct sctp_assoc_value)) { 5056 } else if (len >= sizeof(struct sctp_assoc_value)) {
5032 len = sizeof(struct sctp_assoc_value); 5057 len = sizeof(struct sctp_assoc_value);
@@ -5569,7 +5594,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
5569 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 5594 /* Note: sk->sk_num gets filled in if ephemeral port request. */
5570 ret = sctp_get_port_local(sk, &addr); 5595 ret = sctp_get_port_local(sk, &addr);
5571 5596
5572 return (ret ? 1 : 0); 5597 return ret ? 1 : 0;
5573} 5598}
5574 5599
5575/* 5600/*
@@ -5586,8 +5611,7 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
5586 tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); 5611 tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
5587 if (IS_ERR(tfm)) { 5612 if (IS_ERR(tfm)) {
5588 if (net_ratelimit()) { 5613 if (net_ratelimit()) {
5589 printk(KERN_INFO 5614 pr_info("failed to load transform for %s: %ld\n",
5590 "SCTP: failed to load transform for %s: %ld\n",
5591 sctp_hmac_alg, PTR_ERR(tfm)); 5615 sctp_hmac_alg, PTR_ERR(tfm));
5592 } 5616 }
5593 return -ENOSYS; 5617 return -ENOSYS;
@@ -5716,13 +5740,12 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
5716 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 5740 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
5717 mask |= POLLERR; 5741 mask |= POLLERR;
5718 if (sk->sk_shutdown & RCV_SHUTDOWN) 5742 if (sk->sk_shutdown & RCV_SHUTDOWN)
5719 mask |= POLLRDHUP; 5743 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
5720 if (sk->sk_shutdown == SHUTDOWN_MASK) 5744 if (sk->sk_shutdown == SHUTDOWN_MASK)
5721 mask |= POLLHUP; 5745 mask |= POLLHUP;
5722 5746
5723 /* Is it readable? Reconsider this code with TCP-style support. */ 5747 /* Is it readable? Reconsider this code with TCP-style support. */
5724 if (!skb_queue_empty(&sk->sk_receive_queue) || 5748 if (!skb_queue_empty(&sk->sk_receive_queue))
5725 (sk->sk_shutdown & RCV_SHUTDOWN))
5726 mask |= POLLIN | POLLRDNORM; 5749 mask |= POLLIN | POLLRDNORM;
5727 5750
5728 /* The association is either gone or not ready. */ 5751 /* The association is either gone or not ready. */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 132046cb82fc..d3ae493d234a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -48,6 +48,8 @@
48 * be incorporated into the next SCTP release. 48 * be incorporated into the next SCTP release.
49 */ 49 */
50 50
51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
51#include <linux/slab.h> 53#include <linux/slab.h>
52#include <linux/types.h> 54#include <linux/types.h>
53#include <linux/random.h> 55#include <linux/random.h>
@@ -244,10 +246,9 @@ void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
244 struct dst_entry *dst; 246 struct dst_entry *dst;
245 247
246 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 248 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
247 printk(KERN_WARNING "%s: Reported pmtu %d too low, " 249 pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
248 "using default minimum of %d\n", 250 __func__, pmtu,
249 __func__, pmtu, 251 SCTP_DEFAULT_MINSEGMENT);
250 SCTP_DEFAULT_MINSEGMENT);
251 /* Use default minimum segment size and disable 252 /* Use default minimum segment size and disable
252 * pmtu discovery on this transport. 253 * pmtu discovery on this transport.
253 */ 254 */
diff --git a/net/socket.c b/net/socket.c
index 2270b941bcc7..717a5f1c8792 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -535,14 +535,13 @@ void sock_release(struct socket *sock)
535} 535}
536EXPORT_SYMBOL(sock_release); 536EXPORT_SYMBOL(sock_release);
537 537
538int sock_tx_timestamp(struct msghdr *msg, struct sock *sk, 538int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
539 union skb_shared_tx *shtx)
540{ 539{
541 shtx->flags = 0; 540 *tx_flags = 0;
542 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 541 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
543 shtx->hardware = 1; 542 *tx_flags |= SKBTX_HW_TSTAMP;
544 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 543 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
545 shtx->software = 1; 544 *tx_flags |= SKBTX_SW_TSTAMP;
546 return 0; 545 return 0;
547} 546}
548EXPORT_SYMBOL(sock_tx_timestamp); 547EXPORT_SYMBOL(sock_tx_timestamp);
@@ -1919,7 +1918,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1919 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted 1918 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted
1920 * checking falls down on this. 1919 * checking falls down on this.
1921 */ 1920 */
1922 if (copy_from_user(ctl_buf, (void __user *)msg_sys.msg_control, 1921 if (copy_from_user(ctl_buf,
1922 (void __user __force *)msg_sys.msg_control,
1923 ctl_len)) 1923 ctl_len))
1924 goto out_freectl; 1924 goto out_freectl;
1925 msg_sys.msg_control = ctl_buf; 1925 msg_sys.msg_control = ctl_buf;
@@ -3054,14 +3054,19 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
3054 char *optval, int *optlen) 3054 char *optval, int *optlen)
3055{ 3055{
3056 mm_segment_t oldfs = get_fs(); 3056 mm_segment_t oldfs = get_fs();
3057 char __user *uoptval;
3058 int __user *uoptlen;
3057 int err; 3059 int err;
3058 3060
3061 uoptval = (char __user __force *) optval;
3062 uoptlen = (int __user __force *) optlen;
3063
3059 set_fs(KERNEL_DS); 3064 set_fs(KERNEL_DS);
3060 if (level == SOL_SOCKET) 3065 if (level == SOL_SOCKET)
3061 err = sock_getsockopt(sock, level, optname, optval, optlen); 3066 err = sock_getsockopt(sock, level, optname, uoptval, uoptlen);
3062 else 3067 else
3063 err = sock->ops->getsockopt(sock, level, optname, optval, 3068 err = sock->ops->getsockopt(sock, level, optname, uoptval,
3064 optlen); 3069 uoptlen);
3065 set_fs(oldfs); 3070 set_fs(oldfs);
3066 return err; 3071 return err;
3067} 3072}
@@ -3071,13 +3076,16 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
3071 char *optval, unsigned int optlen) 3076 char *optval, unsigned int optlen)
3072{ 3077{
3073 mm_segment_t oldfs = get_fs(); 3078 mm_segment_t oldfs = get_fs();
3079 char __user *uoptval;
3074 int err; 3080 int err;
3075 3081
3082 uoptval = (char __user __force *) optval;
3083
3076 set_fs(KERNEL_DS); 3084 set_fs(KERNEL_DS);
3077 if (level == SOL_SOCKET) 3085 if (level == SOL_SOCKET)
3078 err = sock_setsockopt(sock, level, optname, optval, optlen); 3086 err = sock_setsockopt(sock, level, optname, uoptval, optlen);
3079 else 3087 else
3080 err = sock->ops->setsockopt(sock, level, optname, optval, 3088 err = sock->ops->setsockopt(sock, level, optname, uoptval,
3081 optlen); 3089 optlen);
3082 set_fs(oldfs); 3090 set_fs(oldfs);
3083 return err; 3091 return err;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index dcfc66bab2bb..597c493392ad 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1049,7 +1049,7 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
1049out: 1049out:
1050 if (acred->machine_cred != gss_cred->gc_machine_cred) 1050 if (acred->machine_cred != gss_cred->gc_machine_cred)
1051 return 0; 1051 return 0;
1052 return (rc->cr_uid == acred->uid); 1052 return rc->cr_uid == acred->uid;
1053} 1053}
1054 1054
1055/* 1055/*
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index 310b78e99456..c586e92bcf76 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -76,19 +76,19 @@ static int
76der_length_size( int length) 76der_length_size( int length)
77{ 77{
78 if (length < (1<<7)) 78 if (length < (1<<7))
79 return(1); 79 return 1;
80 else if (length < (1<<8)) 80 else if (length < (1<<8))
81 return(2); 81 return 2;
82#if (SIZEOF_INT == 2) 82#if (SIZEOF_INT == 2)
83 else 83 else
84 return(3); 84 return 3;
85#else 85#else
86 else if (length < (1<<16)) 86 else if (length < (1<<16))
87 return(3); 87 return 3;
88 else if (length < (1<<24)) 88 else if (length < (1<<24))
89 return(4); 89 return 4;
90 else 90 else
91 return(5); 91 return 5;
92#endif 92#endif
93} 93}
94 94
@@ -121,14 +121,14 @@ der_read_length(unsigned char **buf, int *bufsize)
121 int ret; 121 int ret;
122 122
123 if (*bufsize < 1) 123 if (*bufsize < 1)
124 return(-1); 124 return -1;
125 sf = *(*buf)++; 125 sf = *(*buf)++;
126 (*bufsize)--; 126 (*bufsize)--;
127 if (sf & 0x80) { 127 if (sf & 0x80) {
128 if ((sf &= 0x7f) > ((*bufsize)-1)) 128 if ((sf &= 0x7f) > ((*bufsize)-1))
129 return(-1); 129 return -1;
130 if (sf > SIZEOF_INT) 130 if (sf > SIZEOF_INT)
131 return (-1); 131 return -1;
132 ret = 0; 132 ret = 0;
133 for (; sf; sf--) { 133 for (; sf; sf--) {
134 ret = (ret<<8) + (*(*buf)++); 134 ret = (ret<<8) + (*(*buf)++);
@@ -138,7 +138,7 @@ der_read_length(unsigned char **buf, int *bufsize)
138 ret = sf; 138 ret = sf;
139 } 139 }
140 140
141 return(ret); 141 return ret;
142} 142}
143 143
144/* returns the length of a token, given the mech oid and the body size */ 144/* returns the length of a token, given the mech oid and the body size */
@@ -148,7 +148,7 @@ g_token_size(struct xdr_netobj *mech, unsigned int body_size)
148{ 148{
149 /* set body_size to sequence contents size */ 149 /* set body_size to sequence contents size */
150 body_size += 2 + (int) mech->len; /* NEED overflow check */ 150 body_size += 2 + (int) mech->len; /* NEED overflow check */
151 return(1 + der_length_size(body_size) + body_size); 151 return 1 + der_length_size(body_size) + body_size;
152} 152}
153 153
154EXPORT_SYMBOL_GPL(g_token_size); 154EXPORT_SYMBOL_GPL(g_token_size);
@@ -186,27 +186,27 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
186 int ret = 0; 186 int ret = 0;
187 187
188 if ((toksize-=1) < 0) 188 if ((toksize-=1) < 0)
189 return(G_BAD_TOK_HEADER); 189 return G_BAD_TOK_HEADER;
190 if (*buf++ != 0x60) 190 if (*buf++ != 0x60)
191 return(G_BAD_TOK_HEADER); 191 return G_BAD_TOK_HEADER;
192 192
193 if ((seqsize = der_read_length(&buf, &toksize)) < 0) 193 if ((seqsize = der_read_length(&buf, &toksize)) < 0)
194 return(G_BAD_TOK_HEADER); 194 return G_BAD_TOK_HEADER;
195 195
196 if (seqsize != toksize) 196 if (seqsize != toksize)
197 return(G_BAD_TOK_HEADER); 197 return G_BAD_TOK_HEADER;
198 198
199 if ((toksize-=1) < 0) 199 if ((toksize-=1) < 0)
200 return(G_BAD_TOK_HEADER); 200 return G_BAD_TOK_HEADER;
201 if (*buf++ != 0x06) 201 if (*buf++ != 0x06)
202 return(G_BAD_TOK_HEADER); 202 return G_BAD_TOK_HEADER;
203 203
204 if ((toksize-=1) < 0) 204 if ((toksize-=1) < 0)
205 return(G_BAD_TOK_HEADER); 205 return G_BAD_TOK_HEADER;
206 toid.len = *buf++; 206 toid.len = *buf++;
207 207
208 if ((toksize-=toid.len) < 0) 208 if ((toksize-=toid.len) < 0)
209 return(G_BAD_TOK_HEADER); 209 return G_BAD_TOK_HEADER;
210 toid.data = buf; 210 toid.data = buf;
211 buf+=toid.len; 211 buf+=toid.len;
212 212
@@ -217,17 +217,17 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
217 to return G_BAD_TOK_HEADER if the token header is in fact bad */ 217 to return G_BAD_TOK_HEADER if the token header is in fact bad */
218 218
219 if ((toksize-=2) < 0) 219 if ((toksize-=2) < 0)
220 return(G_BAD_TOK_HEADER); 220 return G_BAD_TOK_HEADER;
221 221
222 if (ret) 222 if (ret)
223 return(ret); 223 return ret;
224 224
225 if (!ret) { 225 if (!ret) {
226 *buf_in = buf; 226 *buf_in = buf;
227 *body_size = toksize; 227 *body_size = toksize;
228 } 228 }
229 229
230 return(ret); 230 return ret;
231} 231}
232 232
233EXPORT_SYMBOL_GPL(g_verify_token_header); 233EXPORT_SYMBOL_GPL(g_verify_token_header);
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 415c013ba382..62ac90c62cb1 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -162,5 +162,5 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
162 *seqnum = ((plain[0]) | 162 *seqnum = ((plain[0]) |
163 (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); 163 (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
164 164
165 return (0); 165 return 0;
166} 166}
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 2689de39dc78..8b4061049d76 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -331,7 +331,7 @@ gss_delete_sec_context(struct gss_ctx **context_handle)
331 *context_handle); 331 *context_handle);
332 332
333 if (!*context_handle) 333 if (!*context_handle)
334 return(GSS_S_NO_CONTEXT); 334 return GSS_S_NO_CONTEXT;
335 if ((*context_handle)->internal_ctx_id) 335 if ((*context_handle)->internal_ctx_id)
336 (*context_handle)->mech_type->gm_ops 336 (*context_handle)->mech_type->gm_ops
337 ->gss_delete_sec_context((*context_handle) 337 ->gss_delete_sec_context((*context_handle)
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index cace6049e4a5..aa5dbda6608c 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -376,7 +376,7 @@ int rpc_queue_empty(struct rpc_wait_queue *queue)
376 spin_lock_bh(&queue->lock); 376 spin_lock_bh(&queue->lock);
377 res = queue->qlen; 377 res = queue->qlen;
378 spin_unlock_bh(&queue->lock); 378 spin_unlock_bh(&queue->lock);
379 return (res == 0); 379 return res == 0;
380} 380}
381EXPORT_SYMBOL_GPL(rpc_queue_empty); 381EXPORT_SYMBOL_GPL(rpc_queue_empty);
382 382
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index c048543ffbeb..2ddc351b3be9 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -89,7 +89,7 @@ int tipc_addr_domain_valid(u32 addr)
89 89
90int tipc_addr_node_valid(u32 addr) 90int tipc_addr_node_valid(u32 addr)
91{ 91{
92 return (tipc_addr_domain_valid(addr) && tipc_node(addr)); 92 return tipc_addr_domain_valid(addr) && tipc_node(addr);
93} 93}
94 94
95int tipc_in_scope(u32 domain, u32 addr) 95int tipc_in_scope(u32 domain, u32 addr)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a008c6689305..ecfaac10d0b4 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -143,6 +143,19 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
143} 143}
144 144
145 145
146static void bclink_set_last_sent(void)
147{
148 if (bcl->next_out)
149 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
150 else
151 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
152}
153
154u32 tipc_bclink_get_last_sent(void)
155{
156 return bcl->fsm_msg_cnt;
157}
158
146/** 159/**
147 * bclink_set_gap - set gap according to contents of current deferred pkt queue 160 * bclink_set_gap - set gap according to contents of current deferred pkt queue
148 * 161 *
@@ -171,7 +184,7 @@ static void bclink_set_gap(struct tipc_node *n_ptr)
171 184
172static int bclink_ack_allowed(u32 n) 185static int bclink_ack_allowed(u32 n)
173{ 186{
174 return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag); 187 return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag;
175} 188}
176 189
177 190
@@ -237,8 +250,10 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
237 250
238 /* Try resolving broadcast link congestion, if necessary */ 251 /* Try resolving broadcast link congestion, if necessary */
239 252
240 if (unlikely(bcl->next_out)) 253 if (unlikely(bcl->next_out)) {
241 tipc_link_push_queue(bcl); 254 tipc_link_push_queue(bcl);
255 bclink_set_last_sent();
256 }
242 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 257 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
243 tipc_link_wakeup_ports(bcl, 0); 258 tipc_link_wakeup_ports(bcl, 0);
244 spin_unlock_bh(&bc_lock); 259 spin_unlock_bh(&bc_lock);
@@ -395,7 +410,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
395 if (unlikely(res == -ELINKCONG)) 410 if (unlikely(res == -ELINKCONG))
396 buf_discard(buf); 411 buf_discard(buf);
397 else 412 else
398 bcl->stats.sent_info++; 413 bclink_set_last_sent();
399 414
400 if (bcl->out_queue_size > bcl->stats.max_queue_sz) 415 if (bcl->out_queue_size > bcl->stats.max_queue_sz)
401 bcl->stats.max_queue_sz = bcl->out_queue_size; 416 bcl->stats.max_queue_sz = bcl->out_queue_size;
@@ -529,15 +544,6 @@ receive:
529 tipc_node_unlock(node); 544 tipc_node_unlock(node);
530} 545}
531 546
532u32 tipc_bclink_get_last_sent(void)
533{
534 u32 last_sent = mod(bcl->next_out_no - 1);
535
536 if (bcl->next_out)
537 last_sent = mod(buf_seqno(bcl->next_out) - 1);
538 return last_sent;
539}
540
541u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 547u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
542{ 548{
543 return (n_ptr->bclink.supported && 549 return (n_ptr->bclink.supported &&
@@ -570,6 +576,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
570 msg = buf_msg(buf); 576 msg = buf_msg(buf);
571 msg_set_non_seq(msg, 1); 577 msg_set_non_seq(msg, 1);
572 msg_set_mc_netid(msg, tipc_net_id); 578 msg_set_mc_netid(msg, tipc_net_id);
579 bcl->stats.sent_info++;
573 } 580 }
574 581
575 /* Send buffer over bearers until all targets reached */ 582 /* Send buffer over bearers until all targets reached */
@@ -609,11 +616,13 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
609 bcbearer->remains = bcbearer->remains_new; 616 bcbearer->remains = bcbearer->remains_new;
610 } 617 }
611 618
612 /* Unable to reach all targets */ 619 /*
620 * Unable to reach all targets (indicate success, since currently
621 * there isn't code in place to properly block & unblock the
622 * pseudo-bearer used by the broadcast link)
623 */
613 624
614 bcbearer->bearer.publ.blocked = 1; 625 return TIPC_OK;
615 bcl->stats.bearer_congs++;
616 return 1;
617} 626}
618 627
619/** 628/**
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 52ae17b2583e..9c10c6b7c12b 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -63,7 +63,7 @@ static int media_name_valid(const char *name)
63 len = strlen(name); 63 len = strlen(name);
64 if ((len + 1) > TIPC_MAX_MEDIA_NAME) 64 if ((len + 1) > TIPC_MAX_MEDIA_NAME)
65 return 0; 65 return 0;
66 return (strspn(name, tipc_alphabet) == len); 66 return strspn(name, tipc_alphabet) == len;
67} 67}
68 68
69/** 69/**
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 696468117985..466b861dab91 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -169,6 +169,7 @@ void tipc_core_stop(void)
169 tipc_nametbl_stop(); 169 tipc_nametbl_stop();
170 tipc_ref_table_stop(); 170 tipc_ref_table_stop();
171 tipc_socket_stop(); 171 tipc_socket_stop();
172 tipc_log_resize(0);
172} 173}
173 174
174/** 175/**
@@ -203,7 +204,9 @@ static int __init tipc_init(void)
203{ 204{
204 int res; 205 int res;
205 206
206 tipc_log_resize(CONFIG_TIPC_LOG); 207 if (tipc_log_resize(CONFIG_TIPC_LOG) != 0)
208 warn("Unable to create log buffer\n");
209
207 info("Activated (version " TIPC_MOD_VER 210 info("Activated (version " TIPC_MOD_VER
208 " compiled " __DATE__ " " __TIME__ ")\n"); 211 " compiled " __DATE__ " " __TIME__ ")\n");
209 212
@@ -230,7 +233,6 @@ static void __exit tipc_exit(void)
230 tipc_core_stop_net(); 233 tipc_core_stop_net();
231 tipc_core_stop(); 234 tipc_core_stop();
232 info("Deactivated\n"); 235 info("Deactivated\n");
233 tipc_log_resize(0);
234} 236}
235 237
236module_init(tipc_init); 238module_init(tipc_init);
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index 1885a7edb0c8..6569d45bfb9a 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -134,7 +134,7 @@ void tipc_printbuf_reset(struct print_buf *pb)
134 134
135int tipc_printbuf_empty(struct print_buf *pb) 135int tipc_printbuf_empty(struct print_buf *pb)
136{ 136{
137 return (!pb->buf || (pb->crs == pb->buf)); 137 return !pb->buf || (pb->crs == pb->buf);
138} 138}
139 139
140/** 140/**
@@ -169,7 +169,7 @@ int tipc_printbuf_validate(struct print_buf *pb)
169 tipc_printf(pb, err); 169 tipc_printf(pb, err);
170 } 170 }
171 } 171 }
172 return (pb->crs - pb->buf + 1); 172 return pb->crs - pb->buf + 1;
173} 173}
174 174
175/** 175/**
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index fc1fcf5e6b53..f28d1ae93125 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -203,6 +203,14 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
203 return; 203 return;
204 } 204 }
205 spin_lock_bh(&n_ptr->lock); 205 spin_lock_bh(&n_ptr->lock);
206
207 /* Don't talk to neighbor during cleanup after last session */
208
209 if (n_ptr->cleanup_required) {
210 spin_unlock_bh(&n_ptr->lock);
211 return;
212 }
213
206 link = n_ptr->links[b_ptr->identity]; 214 link = n_ptr->links[b_ptr->identity];
207 if (!link) { 215 if (!link) {
208 dbg("creating link\n"); 216 dbg("creating link\n");
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 6230d16020c4..6e988ba485fd 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -72,17 +72,26 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
72{ 72{
73 struct sk_buff *clone; 73 struct sk_buff *clone;
74 struct net_device *dev; 74 struct net_device *dev;
75 int delta;
75 76
76 clone = skb_clone(buf, GFP_ATOMIC); 77 clone = skb_clone(buf, GFP_ATOMIC);
77 if (clone) { 78 if (!clone)
78 skb_reset_network_header(clone); 79 return 0;
79 dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev; 80
80 clone->dev = dev; 81 dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
81 dev_hard_header(clone, dev, ETH_P_TIPC, 82 delta = dev->hard_header_len - skb_headroom(buf);
82 &dest->dev_addr.eth_addr, 83
83 dev->dev_addr, clone->len); 84 if ((delta > 0) &&
84 dev_queue_xmit(clone); 85 pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
86 kfree_skb(clone);
87 return 0;
85 } 88 }
89
90 skb_reset_network_header(clone);
91 clone->dev = dev;
92 dev_hard_header(clone, dev, ETH_P_TIPC, &dest->dev_addr.eth_addr,
93 dev->dev_addr, clone->len);
94 dev_queue_xmit(clone);
86 return 0; 95 return 0;
87} 96}
88 97
@@ -92,15 +101,12 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
92 * Accept only packets explicitly sent to this node, or broadcast packets; 101 * Accept only packets explicitly sent to this node, or broadcast packets;
93 * ignores packets sent using Ethernet multicast, and traffic sent to other 102 * ignores packets sent using Ethernet multicast, and traffic sent to other
94 * nodes (which can happen if interface is running in promiscuous mode). 103 * nodes (which can happen if interface is running in promiscuous mode).
95 * Routine truncates any Ethernet padding/CRC appended to the message,
96 * and ensures message size matches actual length
97 */ 104 */
98 105
99static int recv_msg(struct sk_buff *buf, struct net_device *dev, 106static int recv_msg(struct sk_buff *buf, struct net_device *dev,
100 struct packet_type *pt, struct net_device *orig_dev) 107 struct packet_type *pt, struct net_device *orig_dev)
101{ 108{
102 struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv; 109 struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
103 u32 size;
104 110
105 if (!net_eq(dev_net(dev), &init_net)) { 111 if (!net_eq(dev_net(dev), &init_net)) {
106 kfree_skb(buf); 112 kfree_skb(buf);
@@ -109,13 +115,9 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
109 115
110 if (likely(eb_ptr->bearer)) { 116 if (likely(eb_ptr->bearer)) {
111 if (likely(buf->pkt_type <= PACKET_BROADCAST)) { 117 if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
112 size = msg_size((struct tipc_msg *)buf->data); 118 buf->next = NULL;
113 skb_trim(buf, size); 119 tipc_recv_msg(buf, eb_ptr->bearer);
114 if (likely(buf->len == size)) { 120 return 0;
115 buf->next = NULL;
116 tipc_recv_msg(buf, eb_ptr->bearer);
117 return 0;
118 }
119 } 121 }
120 } 122 }
121 kfree_skb(buf); 123 kfree_skb(buf);
@@ -133,6 +135,16 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
133 struct eth_bearer *eb_ptr = &eth_bearers[0]; 135 struct eth_bearer *eb_ptr = &eth_bearers[0];
134 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS]; 136 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
135 char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1; 137 char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
138 int pending_dev = 0;
139
140 /* Find unused Ethernet bearer structure */
141
142 while (eb_ptr->dev) {
143 if (!eb_ptr->bearer)
144 pending_dev++;
145 if (++eb_ptr == stop)
146 return pending_dev ? -EAGAIN : -EDQUOT;
147 }
136 148
137 /* Find device with specified name */ 149 /* Find device with specified name */
138 150
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a3616b99529b..b8cf1e9d0b86 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -239,13 +239,13 @@ int tipc_link_is_up(struct link *l_ptr)
239{ 239{
240 if (!l_ptr) 240 if (!l_ptr)
241 return 0; 241 return 0;
242 return (link_working_working(l_ptr) || link_working_unknown(l_ptr)); 242 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
243} 243}
244 244
245int tipc_link_is_active(struct link *l_ptr) 245int tipc_link_is_active(struct link *l_ptr)
246{ 246{
247 return ((l_ptr->owner->active_links[0] == l_ptr) || 247 return (l_ptr->owner->active_links[0] == l_ptr) ||
248 (l_ptr->owner->active_links[1] == l_ptr)); 248 (l_ptr->owner->active_links[1] == l_ptr);
249} 249}
250 250
251/** 251/**
@@ -1802,6 +1802,15 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1802 return pskb_may_pull(buf, hdr_size); 1802 return pskb_may_pull(buf, hdr_size);
1803} 1803}
1804 1804
1805/**
1806 * tipc_recv_msg - process TIPC messages arriving from off-node
1807 * @head: pointer to message buffer chain
1808 * @tb_ptr: pointer to bearer message arrived on
1809 *
1810 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1811 * structure (i.e. cannot be NULL), but bearer can be inactive.
1812 */
1813
1805void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) 1814void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1806{ 1815{
1807 read_lock_bh(&tipc_net_lock); 1816 read_lock_bh(&tipc_net_lock);
@@ -1819,6 +1828,11 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1819 1828
1820 head = head->next; 1829 head = head->next;
1821 1830
1831 /* Ensure bearer is still enabled */
1832
1833 if (unlikely(!b_ptr->active))
1834 goto cont;
1835
1822 /* Ensure message is well-formed */ 1836 /* Ensure message is well-formed */
1823 1837
1824 if (unlikely(!link_recv_buf_validate(buf))) 1838 if (unlikely(!link_recv_buf_validate(buf)))
@@ -1855,13 +1869,22 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1855 goto cont; 1869 goto cont;
1856 } 1870 }
1857 1871
1858 /* Locate unicast link endpoint that should handle message */ 1872 /* Locate neighboring node that sent message */
1859 1873
1860 n_ptr = tipc_node_find(msg_prevnode(msg)); 1874 n_ptr = tipc_node_find(msg_prevnode(msg));
1861 if (unlikely(!n_ptr)) 1875 if (unlikely(!n_ptr))
1862 goto cont; 1876 goto cont;
1863 tipc_node_lock(n_ptr); 1877 tipc_node_lock(n_ptr);
1864 1878
1879 /* Don't talk to neighbor during cleanup after last session */
1880
1881 if (n_ptr->cleanup_required) {
1882 tipc_node_unlock(n_ptr);
1883 goto cont;
1884 }
1885
1886 /* Locate unicast link endpoint that should handle message */
1887
1865 l_ptr = n_ptr->links[b_ptr->identity]; 1888 l_ptr = n_ptr->links[b_ptr->identity];
1866 if (unlikely(!l_ptr)) { 1889 if (unlikely(!l_ptr)) {
1867 tipc_node_unlock(n_ptr); 1890 tipc_node_unlock(n_ptr);
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 2e5385c47d30..26151d30589d 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -279,12 +279,12 @@ static inline int between(u32 lower, u32 upper, u32 n)
279 279
280static inline int less_eq(u32 left, u32 right) 280static inline int less_eq(u32 left, u32 right)
281{ 281{
282 return (mod(right - left) < 32768u); 282 return mod(right - left) < 32768u;
283} 283}
284 284
285static inline int less(u32 left, u32 right) 285static inline int less(u32 left, u32 right)
286{ 286{
287 return (less_eq(left, right) && (mod(right) != mod(left))); 287 return less_eq(left, right) && (mod(right) != mod(left));
288} 288}
289 289
290static inline u32 lesser(u32 left, u32 right) 290static inline u32 lesser(u32 left, u32 right)
@@ -299,32 +299,32 @@ static inline u32 lesser(u32 left, u32 right)
299 299
300static inline int link_working_working(struct link *l_ptr) 300static inline int link_working_working(struct link *l_ptr)
301{ 301{
302 return (l_ptr->state == WORKING_WORKING); 302 return l_ptr->state == WORKING_WORKING;
303} 303}
304 304
305static inline int link_working_unknown(struct link *l_ptr) 305static inline int link_working_unknown(struct link *l_ptr)
306{ 306{
307 return (l_ptr->state == WORKING_UNKNOWN); 307 return l_ptr->state == WORKING_UNKNOWN;
308} 308}
309 309
310static inline int link_reset_unknown(struct link *l_ptr) 310static inline int link_reset_unknown(struct link *l_ptr)
311{ 311{
312 return (l_ptr->state == RESET_UNKNOWN); 312 return l_ptr->state == RESET_UNKNOWN;
313} 313}
314 314
315static inline int link_reset_reset(struct link *l_ptr) 315static inline int link_reset_reset(struct link *l_ptr)
316{ 316{
317 return (l_ptr->state == RESET_RESET); 317 return l_ptr->state == RESET_RESET;
318} 318}
319 319
320static inline int link_blocked(struct link *l_ptr) 320static inline int link_blocked(struct link *l_ptr)
321{ 321{
322 return (l_ptr->exp_msg_count || l_ptr->blocked); 322 return l_ptr->exp_msg_count || l_ptr->blocked;
323} 323}
324 324
325static inline int link_congested(struct link *l_ptr) 325static inline int link_congested(struct link *l_ptr)
326{ 326{
327 return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]); 327 return l_ptr->out_queue_size >= l_ptr->queue_limit[0];
328} 328}
329 329
330#endif 330#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 995d2da35b01..031aad18efce 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -104,7 +104,7 @@ static inline u32 msg_user(struct tipc_msg *m)
104 104
105static inline u32 msg_isdata(struct tipc_msg *m) 105static inline u32 msg_isdata(struct tipc_msg *m)
106{ 106{
107 return (msg_user(m) <= TIPC_CRITICAL_IMPORTANCE); 107 return msg_user(m) <= TIPC_CRITICAL_IMPORTANCE;
108} 108}
109 109
110static inline void msg_set_user(struct tipc_msg *m, u32 n) 110static inline void msg_set_user(struct tipc_msg *m, u32 n)
@@ -289,7 +289,7 @@ static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
289 289
290static inline int msg_is_dest(struct tipc_msg *m, u32 d) 290static inline int msg_is_dest(struct tipc_msg *m, u32 d)
291{ 291{
292 return(msg_short(m) || (msg_destnode(m) == d)); 292 return msg_short(m) || (msg_destnode(m) == d);
293} 293}
294 294
295static inline u32 msg_routed(struct tipc_msg *m) 295static inline u32 msg_routed(struct tipc_msg *m)
@@ -632,7 +632,7 @@ static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
632 632
633static inline u32 msg_max_pkt(struct tipc_msg *m) 633static inline u32 msg_max_pkt(struct tipc_msg *m)
634{ 634{
635 return (msg_bits(m, 9, 16, 0xffff) * 4); 635 return msg_bits(m, 9, 16, 0xffff) * 4;
636} 636}
637 637
638static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n) 638static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n)
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 8ba79620db3f..9ca4b0689237 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -116,7 +116,7 @@ DEFINE_RWLOCK(tipc_nametbl_lock);
116 116
117static int hash(int x) 117static int hash(int x)
118{ 118{
119 return(x & (tipc_nametbl_size - 1)); 119 return x & (tipc_nametbl_size - 1);
120} 120}
121 121
122/** 122/**
@@ -613,8 +613,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
613} 613}
614 614
615/* 615/*
616 * tipc_nametbl_translate(): Translate tipc_name -> tipc_portid. 616 * tipc_nametbl_translate - translate name to port id
617 * Very time-critical.
618 * 617 *
619 * Note: on entry 'destnode' is the search domain used during translation; 618 * Note: on entry 'destnode' is the search domain used during translation;
620 * on exit it passes back the node address of the matching port (if any) 619 * on exit it passes back the node address of the matching port (if any)
@@ -685,7 +684,6 @@ found:
685 } 684 }
686 spin_unlock_bh(&seq->lock); 685 spin_unlock_bh(&seq->lock);
687not_found: 686not_found:
688 *destnode = 0;
689 read_unlock_bh(&tipc_nametbl_lock); 687 read_unlock_bh(&tipc_nametbl_lock);
690 return 0; 688 return 0;
691} 689}
@@ -877,7 +875,7 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
877 u32 index) 875 u32 index)
878{ 876{
879 char portIdStr[27]; 877 char portIdStr[27];
880 char *scopeStr; 878 const char *scope_str[] = {"", " zone", " cluster", " node"};
881 struct publication *publ = sseq->zone_list; 879 struct publication *publ = sseq->zone_list;
882 880
883 tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper); 881 tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
@@ -893,15 +891,8 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
893 tipc_node(publ->node), publ->ref); 891 tipc_node(publ->node), publ->ref);
894 tipc_printf(buf, "%-26s ", portIdStr); 892 tipc_printf(buf, "%-26s ", portIdStr);
895 if (depth > 3) { 893 if (depth > 3) {
896 if (publ->node != tipc_own_addr) 894 tipc_printf(buf, "%-10u %s", publ->key,
897 scopeStr = ""; 895 scope_str[publ->scope]);
898 else if (publ->scope == TIPC_NODE_SCOPE)
899 scopeStr = "node";
900 else if (publ->scope == TIPC_CLUSTER_SCOPE)
901 scopeStr = "cluster";
902 else
903 scopeStr = "zone";
904 tipc_printf(buf, "%-10u %s", publ->key, scopeStr);
905 } 896 }
906 897
907 publ = publ->zone_list_next; 898 publ = publ->zone_list_next;
@@ -951,24 +942,19 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
951 942
952static void nametbl_header(struct print_buf *buf, u32 depth) 943static void nametbl_header(struct print_buf *buf, u32 depth)
953{ 944{
954 tipc_printf(buf, "Type "); 945 const char *header[] = {
955 946 "Type ",
956 if (depth > 1) 947 "Lower Upper ",
957 tipc_printf(buf, "Lower Upper "); 948 "Port Identity ",
958 if (depth > 2) 949 "Publication Scope"
959 tipc_printf(buf, "Port Identity "); 950 };
960 if (depth > 3) 951
961 tipc_printf(buf, "Publication"); 952 int i;
962 953
963 tipc_printf(buf, "\n-----------"); 954 if (depth > 4)
964 955 depth = 4;
965 if (depth > 1) 956 for (i = 0; i < depth; i++)
966 tipc_printf(buf, "--------------------- "); 957 tipc_printf(buf, header[i]);
967 if (depth > 2)
968 tipc_printf(buf, "-------------------------- ");
969 if (depth > 3)
970 tipc_printf(buf, "------------------");
971
972 tipc_printf(buf, "\n"); 958 tipc_printf(buf, "\n");
973} 959}
974 960
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f61b7694138b..7e05af47a196 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -248,6 +248,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
248 248
249 /* Handle message for another node */ 249 /* Handle message for another node */
250 msg_dbg(msg, "NET>SEND>: "); 250 msg_dbg(msg, "NET>SEND>: ");
251 skb_trim(buf, msg_size(msg));
251 tipc_link_send(buf, dnode, msg_link_selector(msg)); 252 tipc_link_send(buf, dnode, msg_link_selector(msg));
252} 253}
253 254
diff --git a/net/tipc/node.c b/net/tipc/node.c
index b634942caba5..7c49cd056df7 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -237,23 +237,22 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
237 237
238int tipc_node_has_active_links(struct tipc_node *n_ptr) 238int tipc_node_has_active_links(struct tipc_node *n_ptr)
239{ 239{
240 return (n_ptr && 240 return n_ptr->active_links[0] != NULL;
241 ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
242} 241}
243 242
244int tipc_node_has_redundant_links(struct tipc_node *n_ptr) 243int tipc_node_has_redundant_links(struct tipc_node *n_ptr)
245{ 244{
246 return (n_ptr->working_links > 1); 245 return n_ptr->working_links > 1;
247} 246}
248 247
249static int tipc_node_has_active_routes(struct tipc_node *n_ptr) 248static int tipc_node_has_active_routes(struct tipc_node *n_ptr)
250{ 249{
251 return (n_ptr && (n_ptr->last_router >= 0)); 250 return n_ptr && (n_ptr->last_router >= 0);
252} 251}
253 252
254int tipc_node_is_up(struct tipc_node *n_ptr) 253int tipc_node_is_up(struct tipc_node *n_ptr)
255{ 254{
256 return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr)); 255 return tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr);
257} 256}
258 257
259struct tipc_node *tipc_node_attach_link(struct link *l_ptr) 258struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
@@ -384,6 +383,20 @@ static void node_established_contact(struct tipc_node *n_ptr)
384 tipc_highest_allowed_slave); 383 tipc_highest_allowed_slave);
385} 384}
386 385
386static void node_cleanup_finished(unsigned long node_addr)
387{
388 struct tipc_node *n_ptr;
389
390 read_lock_bh(&tipc_net_lock);
391 n_ptr = tipc_node_find(node_addr);
392 if (n_ptr) {
393 tipc_node_lock(n_ptr);
394 n_ptr->cleanup_required = 0;
395 tipc_node_unlock(n_ptr);
396 }
397 read_unlock_bh(&tipc_net_lock);
398}
399
387static void node_lost_contact(struct tipc_node *n_ptr) 400static void node_lost_contact(struct tipc_node *n_ptr)
388{ 401{
389 struct cluster *c_ptr; 402 struct cluster *c_ptr;
@@ -458,6 +471,11 @@ static void node_lost_contact(struct tipc_node *n_ptr)
458 tipc_k_signal((Handler)ns->handle_node_down, 471 tipc_k_signal((Handler)ns->handle_node_down,
459 (unsigned long)ns->usr_handle); 472 (unsigned long)ns->usr_handle);
460 } 473 }
474
475 /* Prevent re-contact with node until all cleanup is done */
476
477 n_ptr->cleanup_required = 1;
478 tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr);
461} 479}
462 480
463/** 481/**
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 6f990da5d143..45f3db3a595d 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -52,6 +52,7 @@
52 * @active_links: pointers to active links to node 52 * @active_links: pointers to active links to node
53 * @links: pointers to all links to node 53 * @links: pointers to all links to node
54 * @working_links: number of working links to node (both active and standby) 54 * @working_links: number of working links to node (both active and standby)
55 * @cleanup_required: non-zero if cleaning up after a prior loss of contact
55 * @link_cnt: number of links to node 56 * @link_cnt: number of links to node
56 * @permit_changeover: non-zero if node has redundant links to this system 57 * @permit_changeover: non-zero if node has redundant links to this system
57 * @routers: bitmap (used for multicluster communication) 58 * @routers: bitmap (used for multicluster communication)
@@ -78,6 +79,7 @@ struct tipc_node {
78 struct link *links[MAX_BEARERS]; 79 struct link *links[MAX_BEARERS];
79 int link_cnt; 80 int link_cnt;
80 int working_links; 81 int working_links;
82 int cleanup_required;
81 int permit_changeover; 83 int permit_changeover;
82 u32 routers[512/32]; 84 u32 routers[512/32];
83 int last_router; 85 int last_router;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 0737680e9266..d760336f2ca8 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -588,19 +588,10 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
588 if (!p_ptr) { 588 if (!p_ptr) {
589 err = TIPC_ERR_NO_PORT; 589 err = TIPC_ERR_NO_PORT;
590 } else if (p_ptr->publ.connected) { 590 } else if (p_ptr->publ.connected) {
591 if (port_peernode(p_ptr) != msg_orignode(msg)) 591 if ((port_peernode(p_ptr) != msg_orignode(msg)) ||
592 (port_peerport(p_ptr) != msg_origport(msg))) {
592 err = TIPC_ERR_NO_PORT; 593 err = TIPC_ERR_NO_PORT;
593 if (port_peerport(p_ptr) != msg_origport(msg)) 594 } else if (msg_type(msg) == CONN_ACK) {
594 err = TIPC_ERR_NO_PORT;
595 if (!err && msg_routed(msg)) {
596 u32 seqno = msg_transp_seqno(msg);
597 u32 myno = ++p_ptr->last_in_seqno;
598 if (seqno != myno) {
599 err = TIPC_ERR_NO_PORT;
600 abort_buf = port_build_self_abort_msg(p_ptr, err);
601 }
602 }
603 if (msg_type(msg) == CONN_ACK) {
604 int wakeup = tipc_port_congested(p_ptr) && 595 int wakeup = tipc_port_congested(p_ptr) &&
605 p_ptr->publ.congested && 596 p_ptr->publ.congested &&
606 p_ptr->wakeup; 597 p_ptr->wakeup;
@@ -1473,7 +1464,7 @@ int tipc_forward2name(u32 ref,
1473 msg_set_destnode(msg, destnode); 1464 msg_set_destnode(msg, destnode);
1474 msg_set_destport(msg, destport); 1465 msg_set_destport(msg, destport);
1475 1466
1476 if (likely(destport || destnode)) { 1467 if (likely(destport)) {
1477 p_ptr->sent++; 1468 p_ptr->sent++;
1478 if (likely(destnode == tipc_own_addr)) 1469 if (likely(destnode == tipc_own_addr))
1479 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1470 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
@@ -1551,7 +1542,7 @@ int tipc_forward_buf2name(u32 ref,
1551 skb_push(buf, LONG_H_SIZE); 1542 skb_push(buf, LONG_H_SIZE);
1552 skb_copy_to_linear_data(buf, msg, LONG_H_SIZE); 1543 skb_copy_to_linear_data(buf, msg, LONG_H_SIZE);
1553 msg_dbg(buf_msg(buf),"PREP:"); 1544 msg_dbg(buf_msg(buf),"PREP:");
1554 if (likely(destport || destnode)) { 1545 if (likely(destport)) {
1555 p_ptr->sent++; 1546 p_ptr->sent++;
1556 if (destnode == tipc_own_addr) 1547 if (destnode == tipc_own_addr)
1557 return tipc_port_recv_msg(buf); 1548 return tipc_port_recv_msg(buf);
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 8d1652aab298..e74bd9563739 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -157,7 +157,7 @@ static inline u32 tipc_peer_node(struct port *p_ptr)
157 157
158static inline int tipc_port_congested(struct port *p_ptr) 158static inline int tipc_port_congested(struct port *p_ptr)
159{ 159{
160 return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2)); 160 return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
161} 161}
162 162
163/** 163/**
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 66e889ba48fd..33217fc3d697 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -64,6 +64,7 @@ struct tipc_sock {
64 struct sock sk; 64 struct sock sk;
65 struct tipc_port *p; 65 struct tipc_port *p;
66 struct tipc_portid peer_name; 66 struct tipc_portid peer_name;
67 long conn_timeout;
67}; 68};
68 69
69#define tipc_sk(sk) ((struct tipc_sock *)(sk)) 70#define tipc_sk(sk) ((struct tipc_sock *)(sk))
@@ -240,9 +241,9 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
240 sock->state = state; 241 sock->state = state;
241 242
242 sock_init_data(sock, sk); 243 sock_init_data(sock, sk);
243 sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
244 sk->sk_backlog_rcv = backlog_rcv; 244 sk->sk_backlog_rcv = backlog_rcv;
245 tipc_sk(sk)->p = tp_ptr; 245 tipc_sk(sk)->p = tp_ptr;
246 tipc_sk(sk)->conn_timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
246 247
247 spin_unlock_bh(tp_ptr->lock); 248 spin_unlock_bh(tp_ptr->lock);
248 249
@@ -429,36 +430,55 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
429 * to handle any preventable race conditions, so TIPC will do the same ... 430 * to handle any preventable race conditions, so TIPC will do the same ...
430 * 431 *
431 * TIPC sets the returned events as follows: 432 * TIPC sets the returned events as follows:
432 * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty 433 *
433 * or if a connection-oriented socket is does not have an active connection 434 * socket state flags set
434 * (i.e. a read operation will not block). 435 * ------------ ---------
435 * b) POLLOUT is set except when a socket's connection has been terminated 436 * unconnected no read flags
436 * (i.e. a write operation will not block). 437 * no write flags
437 * c) POLLHUP is set when a socket's connection has been terminated. 438 *
438 * 439 * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
439 * IMPORTANT: The fact that a read or write operation will not block does NOT 440 * no write flags
440 * imply that the operation will succeed! 441 *
442 * connected POLLIN/POLLRDNORM if data in rx queue
443 * POLLOUT if port is not congested
444 *
445 * disconnecting POLLIN/POLLRDNORM/POLLHUP
446 * no write flags
447 *
448 * listening POLLIN if SYN in rx queue
449 * no write flags
450 *
451 * ready POLLIN/POLLRDNORM if data in rx queue
452 * [connectionless] POLLOUT (since port cannot be congested)
453 *
454 * IMPORTANT: The fact that a read or write operation is indicated does NOT
455 * imply that the operation will succeed, merely that it should be performed
456 * and will not block.
441 */ 457 */
442 458
443static unsigned int poll(struct file *file, struct socket *sock, 459static unsigned int poll(struct file *file, struct socket *sock,
444 poll_table *wait) 460 poll_table *wait)
445{ 461{
446 struct sock *sk = sock->sk; 462 struct sock *sk = sock->sk;
447 u32 mask; 463 u32 mask = 0;
448 464
449 poll_wait(file, sk_sleep(sk), wait); 465 poll_wait(file, sk_sleep(sk), wait);
450 466
451 if (!skb_queue_empty(&sk->sk_receive_queue) || 467 switch ((int)sock->state) {
452 (sock->state == SS_UNCONNECTED) || 468 case SS_READY:
453 (sock->state == SS_DISCONNECTING)) 469 case SS_CONNECTED:
454 mask = (POLLRDNORM | POLLIN); 470 if (!tipc_sk_port(sk)->congested)
455 else 471 mask |= POLLOUT;
456 mask = 0; 472 /* fall thru' */
457 473 case SS_CONNECTING:
458 if (sock->state == SS_DISCONNECTING) 474 case SS_LISTENING:
459 mask |= POLLHUP; 475 if (!skb_queue_empty(&sk->sk_receive_queue))
460 else 476 mask |= (POLLIN | POLLRDNORM);
461 mask |= POLLOUT; 477 break;
478 case SS_DISCONNECTING:
479 mask = (POLLIN | POLLRDNORM | POLLHUP);
480 break;
481 }
462 482
463 return mask; 483 return mask;
464} 484}
@@ -1026,9 +1046,8 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1026 struct sk_buff *buf; 1046 struct sk_buff *buf;
1027 struct tipc_msg *msg; 1047 struct tipc_msg *msg;
1028 unsigned int sz; 1048 unsigned int sz;
1029 int sz_to_copy; 1049 int sz_to_copy, target, needed;
1030 int sz_copied = 0; 1050 int sz_copied = 0;
1031 int needed;
1032 char __user *crs = m->msg_iov->iov_base; 1051 char __user *crs = m->msg_iov->iov_base;
1033 unsigned char *buf_crs; 1052 unsigned char *buf_crs;
1034 u32 err; 1053 u32 err;
@@ -1050,6 +1069,8 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1050 goto exit; 1069 goto exit;
1051 } 1070 }
1052 1071
1072 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1073
1053restart: 1074restart:
1054 1075
1055 /* Look for a message in receive queue; wait if necessary */ 1076 /* Look for a message in receive queue; wait if necessary */
@@ -1138,7 +1159,7 @@ restart:
1138 1159
1139 if ((sz_copied < buf_len) && /* didn't get all requested data */ 1160 if ((sz_copied < buf_len) && /* didn't get all requested data */
1140 (!skb_queue_empty(&sk->sk_receive_queue) || 1161 (!skb_queue_empty(&sk->sk_receive_queue) ||
1141 (flags & MSG_WAITALL)) && /* and more is ready or required */ 1162 (sz_copied < target)) && /* and more is ready or required */
1142 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */ 1163 (!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
1143 (!err)) /* and haven't reached a FIN */ 1164 (!err)) /* and haven't reached a FIN */
1144 goto restart; 1165 goto restart;
@@ -1174,7 +1195,7 @@ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1174 if (msg_connected(msg)) 1195 if (msg_connected(msg))
1175 threshold *= 4; 1196 threshold *= 4;
1176 1197
1177 return (queue_size >= threshold); 1198 return queue_size >= threshold;
1178} 1199}
1179 1200
1180/** 1201/**
@@ -1365,6 +1386,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1365 struct msghdr m = {NULL,}; 1386 struct msghdr m = {NULL,};
1366 struct sk_buff *buf; 1387 struct sk_buff *buf;
1367 struct tipc_msg *msg; 1388 struct tipc_msg *msg;
1389 long timeout;
1368 int res; 1390 int res;
1369 1391
1370 lock_sock(sk); 1392 lock_sock(sk);
@@ -1379,7 +1401,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1379 /* For now, TIPC does not support the non-blocking form of connect() */ 1401 /* For now, TIPC does not support the non-blocking form of connect() */
1380 1402
1381 if (flags & O_NONBLOCK) { 1403 if (flags & O_NONBLOCK) {
1382 res = -EWOULDBLOCK; 1404 res = -EOPNOTSUPP;
1383 goto exit; 1405 goto exit;
1384 } 1406 }
1385 1407
@@ -1425,11 +1447,12 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1425 1447
1426 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 1448 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1427 1449
1450 timeout = tipc_sk(sk)->conn_timeout;
1428 release_sock(sk); 1451 release_sock(sk);
1429 res = wait_event_interruptible_timeout(*sk_sleep(sk), 1452 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1430 (!skb_queue_empty(&sk->sk_receive_queue) || 1453 (!skb_queue_empty(&sk->sk_receive_queue) ||
1431 (sock->state != SS_CONNECTING)), 1454 (sock->state != SS_CONNECTING)),
1432 sk->sk_rcvtimeo); 1455 timeout ? timeout : MAX_SCHEDULE_TIMEOUT);
1433 lock_sock(sk); 1456 lock_sock(sk);
1434 1457
1435 if (res > 0) { 1458 if (res > 0) {
@@ -1692,7 +1715,7 @@ static int setsockopt(struct socket *sock,
1692 res = tipc_set_portunreturnable(tport->ref, value); 1715 res = tipc_set_portunreturnable(tport->ref, value);
1693 break; 1716 break;
1694 case TIPC_CONN_TIMEOUT: 1717 case TIPC_CONN_TIMEOUT:
1695 sk->sk_rcvtimeo = msecs_to_jiffies(value); 1718 tipc_sk(sk)->conn_timeout = msecs_to_jiffies(value);
1696 /* no need to set "res", since already 0 at this point */ 1719 /* no need to set "res", since already 0 at this point */
1697 break; 1720 break;
1698 default: 1721 default:
@@ -1747,7 +1770,7 @@ static int getsockopt(struct socket *sock,
1747 res = tipc_portunreturnable(tport->ref, &value); 1770 res = tipc_portunreturnable(tport->ref, &value);
1748 break; 1771 break;
1749 case TIPC_CONN_TIMEOUT: 1772 case TIPC_CONN_TIMEOUT:
1750 value = jiffies_to_msecs(sk->sk_rcvtimeo); 1773 value = jiffies_to_msecs(tipc_sk(sk)->conn_timeout);
1751 /* no need to set "res", since already 0 at this point */ 1774 /* no need to set "res", since already 0 at this point */
1752 break; 1775 break;
1753 case TIPC_NODE_RECVQ_DEPTH: 1776 case TIPC_NODE_RECVQ_DEPTH:
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ab6eab4c45e2..1a5b9a6bd128 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -604,6 +604,6 @@ int tipc_ispublished(struct tipc_name const *name)
604{ 604{
605 u32 domain = 0; 605 u32 domain = 0;
606 606
607 return(tipc_nametbl_translate(name->type, name->instance,&domain) != 0); 607 return tipc_nametbl_translate(name->type, name->instance, &domain) != 0;
608} 608}
609 609
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0b39b2451ea5..c586da3f4f18 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2033,11 +2033,10 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
2033 if (sk->sk_shutdown == SHUTDOWN_MASK) 2033 if (sk->sk_shutdown == SHUTDOWN_MASK)
2034 mask |= POLLHUP; 2034 mask |= POLLHUP;
2035 if (sk->sk_shutdown & RCV_SHUTDOWN) 2035 if (sk->sk_shutdown & RCV_SHUTDOWN)
2036 mask |= POLLRDHUP; 2036 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2037 2037
2038 /* readable? */ 2038 /* readable? */
2039 if (!skb_queue_empty(&sk->sk_receive_queue) || 2039 if (!skb_queue_empty(&sk->sk_receive_queue))
2040 (sk->sk_shutdown & RCV_SHUTDOWN))
2041 mask |= POLLIN | POLLRDNORM; 2040 mask |= POLLIN | POLLRDNORM;
2042 2041
2043 /* Connection-based need to check for termination and startup */ 2042 /* Connection-based need to check for termination and startup */
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d6d046b9f6f2..9c21ebf9780e 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -253,11 +253,16 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
253 WARN_ON(err); 253 WARN_ON(err);
254 wdev->netdev->features |= NETIF_F_NETNS_LOCAL; 254 wdev->netdev->features |= NETIF_F_NETNS_LOCAL;
255 } 255 }
256
257 return err;
256 } 258 }
257 259
258 wiphy_net_set(&rdev->wiphy, net); 260 wiphy_net_set(&rdev->wiphy, net);
259 261
260 return err; 262 err = device_rename(&rdev->wiphy.dev, dev_name(&rdev->wiphy.dev));
263 WARN_ON(err);
264
265 return 0;
261} 266}
262 267
263static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) 268static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
@@ -428,7 +433,7 @@ int wiphy_register(struct wiphy *wiphy)
428 433
429 /* sanity check ifmodes */ 434 /* sanity check ifmodes */
430 WARN_ON(!ifmodes); 435 WARN_ON(!ifmodes);
431 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1; 436 ifmodes &= ((1 << NUM_NL80211_IFTYPES) - 1) & ~1;
432 if (WARN_ON(ifmodes != wiphy->interface_modes)) 437 if (WARN_ON(ifmodes != wiphy->interface_modes))
433 wiphy->interface_modes = ifmodes; 438 wiphy->interface_modes = ifmodes;
434 439
@@ -683,8 +688,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
683 INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work); 688 INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work);
684 INIT_LIST_HEAD(&wdev->event_list); 689 INIT_LIST_HEAD(&wdev->event_list);
685 spin_lock_init(&wdev->event_lock); 690 spin_lock_init(&wdev->event_lock);
686 INIT_LIST_HEAD(&wdev->action_registrations); 691 INIT_LIST_HEAD(&wdev->mgmt_registrations);
687 spin_lock_init(&wdev->action_registrations_lock); 692 spin_lock_init(&wdev->mgmt_registrations_lock);
688 693
689 mutex_lock(&rdev->devlist_mtx); 694 mutex_lock(&rdev->devlist_mtx);
690 list_add_rcu(&wdev->list, &rdev->netdev_list); 695 list_add_rcu(&wdev->list, &rdev->netdev_list);
@@ -724,6 +729,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
724 dev->ethtool_ops = &cfg80211_ethtool_ops; 729 dev->ethtool_ops = &cfg80211_ethtool_ops;
725 730
726 if ((wdev->iftype == NL80211_IFTYPE_STATION || 731 if ((wdev->iftype == NL80211_IFTYPE_STATION ||
732 wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
727 wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) 733 wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
728 dev->priv_flags |= IFF_DONT_BRIDGE; 734 dev->priv_flags |= IFF_DONT_BRIDGE;
729 break; 735 break;
@@ -732,6 +738,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
732 case NL80211_IFTYPE_ADHOC: 738 case NL80211_IFTYPE_ADHOC:
733 cfg80211_leave_ibss(rdev, dev, true); 739 cfg80211_leave_ibss(rdev, dev, true);
734 break; 740 break;
741 case NL80211_IFTYPE_P2P_CLIENT:
735 case NL80211_IFTYPE_STATION: 742 case NL80211_IFTYPE_STATION:
736 wdev_lock(wdev); 743 wdev_lock(wdev);
737#ifdef CONFIG_CFG80211_WEXT 744#ifdef CONFIG_CFG80211_WEXT
@@ -804,7 +811,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
804 sysfs_remove_link(&dev->dev.kobj, "phy80211"); 811 sysfs_remove_link(&dev->dev.kobj, "phy80211");
805 list_del_rcu(&wdev->list); 812 list_del_rcu(&wdev->list);
806 rdev->devlist_generation++; 813 rdev->devlist_generation++;
807 cfg80211_mlme_purge_actions(wdev); 814 cfg80211_mlme_purge_registrations(wdev);
808#ifdef CONFIG_CFG80211_WEXT 815#ifdef CONFIG_CFG80211_WEXT
809 kfree(wdev->wext.keys); 816 kfree(wdev->wext.keys);
810#endif 817#endif
@@ -910,52 +917,3 @@ static void __exit cfg80211_exit(void)
910 destroy_workqueue(cfg80211_wq); 917 destroy_workqueue(cfg80211_wq);
911} 918}
912module_exit(cfg80211_exit); 919module_exit(cfg80211_exit);
913
914static int ___wiphy_printk(const char *level, const struct wiphy *wiphy,
915 struct va_format *vaf)
916{
917 if (!wiphy)
918 return printk("%s(NULL wiphy *): %pV", level, vaf);
919
920 return printk("%s%s: %pV", level, wiphy_name(wiphy), vaf);
921}
922
923int __wiphy_printk(const char *level, const struct wiphy *wiphy,
924 const char *fmt, ...)
925{
926 struct va_format vaf;
927 va_list args;
928 int r;
929
930 va_start(args, fmt);
931
932 vaf.fmt = fmt;
933 vaf.va = &args;
934
935 r = ___wiphy_printk(level, wiphy, &vaf);
936 va_end(args);
937
938 return r;
939}
940EXPORT_SYMBOL(__wiphy_printk);
941
942#define define_wiphy_printk_level(func, kern_level) \
943int func(const struct wiphy *wiphy, const char *fmt, ...) \
944{ \
945 struct va_format vaf; \
946 va_list args; \
947 int r; \
948 \
949 va_start(args, fmt); \
950 \
951 vaf.fmt = fmt; \
952 vaf.va = &args; \
953 \
954 r = ___wiphy_printk(kern_level, wiphy, &vaf); \
955 va_end(args); \
956 \
957 return r; \
958} \
959EXPORT_SYMBOL(func);
960
961define_wiphy_printk_level(wiphy_debug, KERN_DEBUG);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 63d57ae399c3..5d89310b3587 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -86,7 +86,7 @@ struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy)
86static inline 86static inline
87bool wiphy_idx_valid(int wiphy_idx) 87bool wiphy_idx_valid(int wiphy_idx)
88{ 88{
89 return (wiphy_idx >= 0); 89 return wiphy_idx >= 0;
90} 90}
91 91
92 92
@@ -95,7 +95,10 @@ extern struct mutex cfg80211_mutex;
95extern struct list_head cfg80211_rdev_list; 95extern struct list_head cfg80211_rdev_list;
96extern int cfg80211_rdev_list_generation; 96extern int cfg80211_rdev_list_generation;
97 97
98#define assert_cfg80211_lock() WARN_ON(!mutex_is_locked(&cfg80211_mutex)) 98static inline void assert_cfg80211_lock(void)
99{
100 lockdep_assert_held(&cfg80211_mutex);
101}
99 102
100/* 103/*
101 * You can use this to mark a wiphy_idx as not having an associated wiphy. 104 * You can use this to mark a wiphy_idx as not having an associated wiphy.
@@ -202,8 +205,8 @@ static inline void wdev_unlock(struct wireless_dev *wdev)
202 mutex_unlock(&wdev->mtx); 205 mutex_unlock(&wdev->mtx);
203} 206}
204 207
205#define ASSERT_RDEV_LOCK(rdev) WARN_ON(!mutex_is_locked(&(rdev)->mtx)); 208#define ASSERT_RDEV_LOCK(rdev) lockdep_assert_held(&(rdev)->mtx)
206#define ASSERT_WDEV_LOCK(wdev) WARN_ON(!mutex_is_locked(&(wdev)->mtx)); 209#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx)
207 210
208enum cfg80211_event_type { 211enum cfg80211_event_type {
209 EVENT_CONNECT_RESULT, 212 EVENT_CONNECT_RESULT,
@@ -331,16 +334,17 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
331 const u8 *resp_ie, size_t resp_ie_len, 334 const u8 *resp_ie, size_t resp_ie_len,
332 u16 status, bool wextev, 335 u16 status, bool wextev,
333 struct cfg80211_bss *bss); 336 struct cfg80211_bss *bss);
334int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid, 337int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
335 const u8 *match_data, int match_len); 338 u16 frame_type, const u8 *match_data,
336void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid); 339 int match_len);
337void cfg80211_mlme_purge_actions(struct wireless_dev *wdev); 340void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
338int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, 341void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
339 struct net_device *dev, 342int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
340 struct ieee80211_channel *chan, 343 struct net_device *dev,
341 enum nl80211_channel_type channel_type, 344 struct ieee80211_channel *chan,
342 bool channel_type_valid, 345 enum nl80211_channel_type channel_type,
343 const u8 *buf, size_t len, u64 *cookie); 346 bool channel_type_valid,
347 const u8 *buf, size_t len, u64 *cookie);
344 348
345/* SME */ 349/* SME */
346int __cfg80211_connect(struct cfg80211_registered_device *rdev, 350int __cfg80211_connect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index d1a3fb99fdf2..46f371160896 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -149,7 +149,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
149 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf; 149 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
150 const u8 *bssid = mgmt->bssid; 150 const u8 *bssid = mgmt->bssid;
151 int i; 151 int i;
152 bool found = false; 152 bool found = false, was_current = false;
153 153
154 ASSERT_WDEV_LOCK(wdev); 154 ASSERT_WDEV_LOCK(wdev);
155 155
@@ -159,6 +159,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
159 cfg80211_put_bss(&wdev->current_bss->pub); 159 cfg80211_put_bss(&wdev->current_bss->pub);
160 wdev->current_bss = NULL; 160 wdev->current_bss = NULL;
161 found = true; 161 found = true;
162 was_current = true;
162 } else for (i = 0; i < MAX_AUTH_BSSES; i++) { 163 } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
163 if (wdev->auth_bsses[i] && 164 if (wdev->auth_bsses[i] &&
164 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) { 165 memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
@@ -183,7 +184,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
183 184
184 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL); 185 nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
185 186
186 if (wdev->sme_state == CFG80211_SME_CONNECTED) { 187 if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) {
187 u16 reason_code; 188 u16 reason_code;
188 bool from_ap; 189 bool from_ap;
189 190
@@ -747,31 +748,51 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
747} 748}
748EXPORT_SYMBOL(cfg80211_new_sta); 749EXPORT_SYMBOL(cfg80211_new_sta);
749 750
750struct cfg80211_action_registration { 751struct cfg80211_mgmt_registration {
751 struct list_head list; 752 struct list_head list;
752 753
753 u32 nlpid; 754 u32 nlpid;
754 755
755 int match_len; 756 int match_len;
756 757
758 __le16 frame_type;
759
757 u8 match[]; 760 u8 match[];
758}; 761};
759 762
760int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid, 763int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
761 const u8 *match_data, int match_len) 764 u16 frame_type, const u8 *match_data,
765 int match_len)
762{ 766{
763 struct cfg80211_action_registration *reg, *nreg; 767 struct cfg80211_mgmt_registration *reg, *nreg;
764 int err = 0; 768 int err = 0;
769 u16 mgmt_type;
770
771 if (!wdev->wiphy->mgmt_stypes)
772 return -EOPNOTSUPP;
773
774 if ((frame_type & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT)
775 return -EINVAL;
776
777 if (frame_type & ~(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE))
778 return -EINVAL;
779
780 mgmt_type = (frame_type & IEEE80211_FCTL_STYPE) >> 4;
781 if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].rx & BIT(mgmt_type)))
782 return -EINVAL;
765 783
766 nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL); 784 nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL);
767 if (!nreg) 785 if (!nreg)
768 return -ENOMEM; 786 return -ENOMEM;
769 787
770 spin_lock_bh(&wdev->action_registrations_lock); 788 spin_lock_bh(&wdev->mgmt_registrations_lock);
771 789
772 list_for_each_entry(reg, &wdev->action_registrations, list) { 790 list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
773 int mlen = min(match_len, reg->match_len); 791 int mlen = min(match_len, reg->match_len);
774 792
793 if (frame_type != le16_to_cpu(reg->frame_type))
794 continue;
795
775 if (memcmp(reg->match, match_data, mlen) == 0) { 796 if (memcmp(reg->match, match_data, mlen) == 0) {
776 err = -EALREADY; 797 err = -EALREADY;
777 break; 798 break;
@@ -786,69 +807,83 @@ int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid,
786 memcpy(nreg->match, match_data, match_len); 807 memcpy(nreg->match, match_data, match_len);
787 nreg->match_len = match_len; 808 nreg->match_len = match_len;
788 nreg->nlpid = snd_pid; 809 nreg->nlpid = snd_pid;
789 list_add(&nreg->list, &wdev->action_registrations); 810 nreg->frame_type = cpu_to_le16(frame_type);
811 list_add(&nreg->list, &wdev->mgmt_registrations);
790 812
791 out: 813 out:
792 spin_unlock_bh(&wdev->action_registrations_lock); 814 spin_unlock_bh(&wdev->mgmt_registrations_lock);
793 return err; 815 return err;
794} 816}
795 817
796void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid) 818void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
797{ 819{
798 struct cfg80211_action_registration *reg, *tmp; 820 struct cfg80211_mgmt_registration *reg, *tmp;
799 821
800 spin_lock_bh(&wdev->action_registrations_lock); 822 spin_lock_bh(&wdev->mgmt_registrations_lock);
801 823
802 list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) { 824 list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
803 if (reg->nlpid == nlpid) { 825 if (reg->nlpid == nlpid) {
804 list_del(&reg->list); 826 list_del(&reg->list);
805 kfree(reg); 827 kfree(reg);
806 } 828 }
807 } 829 }
808 830
809 spin_unlock_bh(&wdev->action_registrations_lock); 831 spin_unlock_bh(&wdev->mgmt_registrations_lock);
810} 832}
811 833
812void cfg80211_mlme_purge_actions(struct wireless_dev *wdev) 834void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
813{ 835{
814 struct cfg80211_action_registration *reg, *tmp; 836 struct cfg80211_mgmt_registration *reg, *tmp;
815 837
816 spin_lock_bh(&wdev->action_registrations_lock); 838 spin_lock_bh(&wdev->mgmt_registrations_lock);
817 839
818 list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) { 840 list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
819 list_del(&reg->list); 841 list_del(&reg->list);
820 kfree(reg); 842 kfree(reg);
821 } 843 }
822 844
823 spin_unlock_bh(&wdev->action_registrations_lock); 845 spin_unlock_bh(&wdev->mgmt_registrations_lock);
824} 846}
825 847
826int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, 848int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
827 struct net_device *dev, 849 struct net_device *dev,
828 struct ieee80211_channel *chan, 850 struct ieee80211_channel *chan,
829 enum nl80211_channel_type channel_type, 851 enum nl80211_channel_type channel_type,
830 bool channel_type_valid, 852 bool channel_type_valid,
831 const u8 *buf, size_t len, u64 *cookie) 853 const u8 *buf, size_t len, u64 *cookie)
832{ 854{
833 struct wireless_dev *wdev = dev->ieee80211_ptr; 855 struct wireless_dev *wdev = dev->ieee80211_ptr;
834 const struct ieee80211_mgmt *mgmt; 856 const struct ieee80211_mgmt *mgmt;
857 u16 stype;
858
859 if (!wdev->wiphy->mgmt_stypes)
860 return -EOPNOTSUPP;
835 861
836 if (rdev->ops->action == NULL) 862 if (!rdev->ops->mgmt_tx)
837 return -EOPNOTSUPP; 863 return -EOPNOTSUPP;
864
838 if (len < 24 + 1) 865 if (len < 24 + 1)
839 return -EINVAL; 866 return -EINVAL;
840 867
841 mgmt = (const struct ieee80211_mgmt *) buf; 868 mgmt = (const struct ieee80211_mgmt *) buf;
842 if (!ieee80211_is_action(mgmt->frame_control)) 869
870 if (!ieee80211_is_mgmt(mgmt->frame_control))
843 return -EINVAL; 871 return -EINVAL;
844 if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) { 872
873 stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
874 if (!(wdev->wiphy->mgmt_stypes[wdev->iftype].tx & BIT(stype >> 4)))
875 return -EINVAL;
876
877 if (ieee80211_is_action(mgmt->frame_control) &&
878 mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
845 /* Verify that we are associated with the destination AP */ 879 /* Verify that we are associated with the destination AP */
846 wdev_lock(wdev); 880 wdev_lock(wdev);
847 881
848 if (!wdev->current_bss || 882 if (!wdev->current_bss ||
849 memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, 883 memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
850 ETH_ALEN) != 0 || 884 ETH_ALEN) != 0 ||
851 (wdev->iftype == NL80211_IFTYPE_STATION && 885 ((wdev->iftype == NL80211_IFTYPE_STATION ||
886 wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
852 memcmp(wdev->current_bss->pub.bssid, mgmt->da, 887 memcmp(wdev->current_bss->pub.bssid, mgmt->da,
853 ETH_ALEN) != 0)) { 888 ETH_ALEN) != 0)) {
854 wdev_unlock(wdev); 889 wdev_unlock(wdev);
@@ -862,64 +897,75 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
862 return -EINVAL; 897 return -EINVAL;
863 898
864 /* Transmit the Action frame as requested by user space */ 899 /* Transmit the Action frame as requested by user space */
865 return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type, 900 return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, channel_type,
866 channel_type_valid, buf, len, cookie); 901 channel_type_valid, buf, len, cookie);
867} 902}
868 903
869bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, 904bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
870 size_t len, gfp_t gfp) 905 size_t len, gfp_t gfp)
871{ 906{
872 struct wireless_dev *wdev = dev->ieee80211_ptr; 907 struct wireless_dev *wdev = dev->ieee80211_ptr;
873 struct wiphy *wiphy = wdev->wiphy; 908 struct wiphy *wiphy = wdev->wiphy;
874 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 909 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
875 struct cfg80211_action_registration *reg; 910 struct cfg80211_mgmt_registration *reg;
876 const u8 *action_data; 911 const struct ieee80211_txrx_stypes *stypes =
877 int action_data_len; 912 &wiphy->mgmt_stypes[wdev->iftype];
913 struct ieee80211_mgmt *mgmt = (void *)buf;
914 const u8 *data;
915 int data_len;
878 bool result = false; 916 bool result = false;
917 __le16 ftype = mgmt->frame_control &
918 cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
919 u16 stype;
879 920
880 /* frame length - min size excluding category */ 921 stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
881 action_data_len = len - (IEEE80211_MIN_ACTION_SIZE - 1);
882 922
883 /* action data starts with category */ 923 if (!(stypes->rx & BIT(stype)))
884 action_data = buf + IEEE80211_MIN_ACTION_SIZE - 1; 924 return false;
885 925
886 spin_lock_bh(&wdev->action_registrations_lock); 926 data = buf + ieee80211_hdrlen(mgmt->frame_control);
927 data_len = len - ieee80211_hdrlen(mgmt->frame_control);
928
929 spin_lock_bh(&wdev->mgmt_registrations_lock);
930
931 list_for_each_entry(reg, &wdev->mgmt_registrations, list) {
932 if (reg->frame_type != ftype)
933 continue;
887 934
888 list_for_each_entry(reg, &wdev->action_registrations, list) { 935 if (reg->match_len > data_len)
889 if (reg->match_len > action_data_len)
890 continue; 936 continue;
891 937
892 if (memcmp(reg->match, action_data, reg->match_len)) 938 if (memcmp(reg->match, data, reg->match_len))
893 continue; 939 continue;
894 940
895 /* found match! */ 941 /* found match! */
896 942
897 /* Indicate the received Action frame to user space */ 943 /* Indicate the received Action frame to user space */
898 if (nl80211_send_action(rdev, dev, reg->nlpid, freq, 944 if (nl80211_send_mgmt(rdev, dev, reg->nlpid, freq,
899 buf, len, gfp)) 945 buf, len, gfp))
900 continue; 946 continue;
901 947
902 result = true; 948 result = true;
903 break; 949 break;
904 } 950 }
905 951
906 spin_unlock_bh(&wdev->action_registrations_lock); 952 spin_unlock_bh(&wdev->mgmt_registrations_lock);
907 953
908 return result; 954 return result;
909} 955}
910EXPORT_SYMBOL(cfg80211_rx_action); 956EXPORT_SYMBOL(cfg80211_rx_mgmt);
911 957
912void cfg80211_action_tx_status(struct net_device *dev, u64 cookie, 958void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie,
913 const u8 *buf, size_t len, bool ack, gfp_t gfp) 959 const u8 *buf, size_t len, bool ack, gfp_t gfp)
914{ 960{
915 struct wireless_dev *wdev = dev->ieee80211_ptr; 961 struct wireless_dev *wdev = dev->ieee80211_ptr;
916 struct wiphy *wiphy = wdev->wiphy; 962 struct wiphy *wiphy = wdev->wiphy;
917 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 963 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
918 964
919 /* Indicate TX status of the Action frame to user space */ 965 /* Indicate TX status of the Action frame to user space */
920 nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp); 966 nl80211_send_mgmt_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
921} 967}
922EXPORT_SYMBOL(cfg80211_action_tx_status); 968EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
923 969
924void cfg80211_cqm_rssi_notify(struct net_device *dev, 970void cfg80211_cqm_rssi_notify(struct net_device *dev,
925 enum nl80211_cqm_rssi_threshold_event rssi_event, 971 enum nl80211_cqm_rssi_threshold_event rssi_event,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 37902a54e9c1..f15b1af2c768 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -136,6 +136,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
136 .len = sizeof(struct nl80211_sta_flag_update), 136 .len = sizeof(struct nl80211_sta_flag_update),
137 }, 137 },
138 [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG }, 138 [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG },
139 [NL80211_ATTR_CONTROL_PORT_ETHERTYPE] = { .type = NLA_U16 },
140 [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
139 [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG }, 141 [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
140 [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, 142 [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
141 [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, 143 [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
@@ -156,6 +158,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
156 158
157 [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 }, 159 [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 },
158 [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 }, 160 [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 },
161 [NL80211_ATTR_FRAME_TYPE] = { .type = NLA_U16 },
159}; 162};
160 163
161/* policy for the attributes */ 164/* policy for the attributes */
@@ -407,12 +410,14 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
407 switch (wdev->iftype) { 410 switch (wdev->iftype) {
408 case NL80211_IFTYPE_AP: 411 case NL80211_IFTYPE_AP:
409 case NL80211_IFTYPE_AP_VLAN: 412 case NL80211_IFTYPE_AP_VLAN:
413 case NL80211_IFTYPE_P2P_GO:
410 break; 414 break;
411 case NL80211_IFTYPE_ADHOC: 415 case NL80211_IFTYPE_ADHOC:
412 if (!wdev->current_bss) 416 if (!wdev->current_bss)
413 return -ENOLINK; 417 return -ENOLINK;
414 break; 418 break;
415 case NL80211_IFTYPE_STATION: 419 case NL80211_IFTYPE_STATION:
420 case NL80211_IFTYPE_P2P_CLIENT:
416 if (wdev->sme_state != CFG80211_SME_CONNECTED) 421 if (wdev->sme_state != CFG80211_SME_CONNECTED)
417 return -ENOLINK; 422 return -ENOLINK;
418 break; 423 break;
@@ -437,6 +442,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
437 struct ieee80211_rate *rate; 442 struct ieee80211_rate *rate;
438 int i; 443 int i;
439 u16 ifmodes = dev->wiphy.interface_modes; 444 u16 ifmodes = dev->wiphy.interface_modes;
445 const struct ieee80211_txrx_stypes *mgmt_stypes =
446 dev->wiphy.mgmt_stypes;
440 447
441 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); 448 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY);
442 if (!hdr) 449 if (!hdr)
@@ -471,6 +478,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
471 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, 478 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
472 dev->wiphy.max_num_pmkids); 479 dev->wiphy.max_num_pmkids);
473 480
481 if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL)
482 NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE);
483
474 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); 484 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
475 if (!nl_modes) 485 if (!nl_modes)
476 goto nla_put_failure; 486 goto nla_put_failure;
@@ -587,7 +597,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
587 CMD(flush_pmksa, FLUSH_PMKSA); 597 CMD(flush_pmksa, FLUSH_PMKSA);
588 CMD(remain_on_channel, REMAIN_ON_CHANNEL); 598 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
589 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK); 599 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
590 CMD(action, ACTION); 600 CMD(mgmt_tx, FRAME);
591 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { 601 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
592 i++; 602 i++;
593 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); 603 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
@@ -608,6 +618,55 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
608 618
609 nla_nest_end(msg, nl_cmds); 619 nla_nest_end(msg, nl_cmds);
610 620
621 if (mgmt_stypes) {
622 u16 stypes;
623 struct nlattr *nl_ftypes, *nl_ifs;
624 enum nl80211_iftype ift;
625
626 nl_ifs = nla_nest_start(msg, NL80211_ATTR_TX_FRAME_TYPES);
627 if (!nl_ifs)
628 goto nla_put_failure;
629
630 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) {
631 nl_ftypes = nla_nest_start(msg, ift);
632 if (!nl_ftypes)
633 goto nla_put_failure;
634 i = 0;
635 stypes = mgmt_stypes[ift].tx;
636 while (stypes) {
637 if (stypes & 1)
638 NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
639 (i << 4) | IEEE80211_FTYPE_MGMT);
640 stypes >>= 1;
641 i++;
642 }
643 nla_nest_end(msg, nl_ftypes);
644 }
645
646 nla_nest_end(msg, nl_ifs);
647
648 nl_ifs = nla_nest_start(msg, NL80211_ATTR_RX_FRAME_TYPES);
649 if (!nl_ifs)
650 goto nla_put_failure;
651
652 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) {
653 nl_ftypes = nla_nest_start(msg, ift);
654 if (!nl_ftypes)
655 goto nla_put_failure;
656 i = 0;
657 stypes = mgmt_stypes[ift].rx;
658 while (stypes) {
659 if (stypes & 1)
660 NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE,
661 (i << 4) | IEEE80211_FTYPE_MGMT);
662 stypes >>= 1;
663 i++;
664 }
665 nla_nest_end(msg, nl_ftypes);
666 }
667 nla_nest_end(msg, nl_ifs);
668 }
669
611 return genlmsg_end(msg, hdr); 670 return genlmsg_end(msg, hdr);
612 671
613 nla_put_failure: 672 nla_put_failure:
@@ -709,7 +768,8 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
709 wdev->iftype == NL80211_IFTYPE_AP || 768 wdev->iftype == NL80211_IFTYPE_AP ||
710 wdev->iftype == NL80211_IFTYPE_WDS || 769 wdev->iftype == NL80211_IFTYPE_WDS ||
711 wdev->iftype == NL80211_IFTYPE_MESH_POINT || 770 wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
712 wdev->iftype == NL80211_IFTYPE_MONITOR; 771 wdev->iftype == NL80211_IFTYPE_MONITOR ||
772 wdev->iftype == NL80211_IFTYPE_P2P_GO;
713} 773}
714 774
715static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, 775static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
@@ -776,7 +836,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
776 struct cfg80211_registered_device *rdev; 836 struct cfg80211_registered_device *rdev;
777 struct net_device *netdev = NULL; 837 struct net_device *netdev = NULL;
778 struct wireless_dev *wdev; 838 struct wireless_dev *wdev;
779 int result, rem_txq_params = 0; 839 int result = 0, rem_txq_params = 0;
780 struct nlattr *nl_txq_params; 840 struct nlattr *nl_txq_params;
781 u32 changed; 841 u32 changed;
782 u8 retry_short = 0, retry_long = 0; 842 u8 retry_short = 0, retry_long = 0;
@@ -1636,7 +1696,8 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1636 if (err) 1696 if (err)
1637 goto unlock_rtnl; 1697 goto unlock_rtnl;
1638 1698
1639 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { 1699 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1700 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
1640 err = -EOPNOTSUPP; 1701 err = -EOPNOTSUPP;
1641 goto out; 1702 goto out;
1642 } 1703 }
@@ -1728,7 +1789,8 @@ static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info)
1728 goto out; 1789 goto out;
1729 } 1790 }
1730 1791
1731 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { 1792 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
1793 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
1732 err = -EOPNOTSUPP; 1794 err = -EOPNOTSUPP;
1733 goto out; 1795 goto out;
1734 } 1796 }
@@ -2071,10 +2133,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2071 switch (dev->ieee80211_ptr->iftype) { 2133 switch (dev->ieee80211_ptr->iftype) {
2072 case NL80211_IFTYPE_AP: 2134 case NL80211_IFTYPE_AP:
2073 case NL80211_IFTYPE_AP_VLAN: 2135 case NL80211_IFTYPE_AP_VLAN:
2136 case NL80211_IFTYPE_P2P_GO:
2074 /* disallow mesh-specific things */ 2137 /* disallow mesh-specific things */
2075 if (params.plink_action) 2138 if (params.plink_action)
2076 err = -EINVAL; 2139 err = -EINVAL;
2077 break; 2140 break;
2141 case NL80211_IFTYPE_P2P_CLIENT:
2078 case NL80211_IFTYPE_STATION: 2142 case NL80211_IFTYPE_STATION:
2079 /* disallow everything but AUTHORIZED flag */ 2143 /* disallow everything but AUTHORIZED flag */
2080 if (params.plink_action) 2144 if (params.plink_action)
@@ -2176,7 +2240,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2176 goto out_rtnl; 2240 goto out_rtnl;
2177 2241
2178 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2242 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2179 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { 2243 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
2244 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
2180 err = -EINVAL; 2245 err = -EINVAL;
2181 goto out; 2246 goto out;
2182 } 2247 }
@@ -2229,7 +2294,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
2229 2294
2230 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2295 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2231 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && 2296 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
2232 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { 2297 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
2298 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
2233 err = -EINVAL; 2299 err = -EINVAL;
2234 goto out; 2300 goto out;
2235 } 2301 }
@@ -2603,7 +2669,8 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
2603 goto out; 2669 goto out;
2604 } 2670 }
2605 2671
2606 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP) { 2672 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2673 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
2607 err = -EOPNOTSUPP; 2674 err = -EOPNOTSUPP;
2608 goto out; 2675 goto out;
2609 } 2676 }
@@ -3306,6 +3373,7 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
3306 } 3373 }
3307 3374
3308 switch (wdev->iftype) { 3375 switch (wdev->iftype) {
3376 case NL80211_IFTYPE_P2P_CLIENT:
3309 case NL80211_IFTYPE_STATION: 3377 case NL80211_IFTYPE_STATION:
3310 if (intbss == wdev->current_bss) 3378 if (intbss == wdev->current_bss)
3311 NLA_PUT_U32(msg, NL80211_BSS_STATUS, 3379 NLA_PUT_U32(msg, NL80211_BSS_STATUS,
@@ -3572,12 +3640,28 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
3572 if (err) 3640 if (err)
3573 goto unlock_rtnl; 3641 goto unlock_rtnl;
3574 3642
3643 if (key.idx >= 0) {
3644 int i;
3645 bool ok = false;
3646 for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) {
3647 if (key.p.cipher == rdev->wiphy.cipher_suites[i]) {
3648 ok = true;
3649 break;
3650 }
3651 }
3652 if (!ok) {
3653 err = -EINVAL;
3654 goto out;
3655 }
3656 }
3657
3575 if (!rdev->ops->auth) { 3658 if (!rdev->ops->auth) {
3576 err = -EOPNOTSUPP; 3659 err = -EOPNOTSUPP;
3577 goto out; 3660 goto out;
3578 } 3661 }
3579 3662
3580 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { 3663 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
3664 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
3581 err = -EOPNOTSUPP; 3665 err = -EOPNOTSUPP;
3582 goto out; 3666 goto out;
3583 } 3667 }
@@ -3624,7 +3708,8 @@ unlock_rtnl:
3624 return err; 3708 return err;
3625} 3709}
3626 3710
3627static int nl80211_crypto_settings(struct genl_info *info, 3711static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
3712 struct genl_info *info,
3628 struct cfg80211_crypto_settings *settings, 3713 struct cfg80211_crypto_settings *settings,
3629 int cipher_limit) 3714 int cipher_limit)
3630{ 3715{
@@ -3632,6 +3717,19 @@ static int nl80211_crypto_settings(struct genl_info *info,
3632 3717
3633 settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT]; 3718 settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT];
3634 3719
3720 if (info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) {
3721 u16 proto;
3722 proto = nla_get_u16(
3723 info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]);
3724 settings->control_port_ethertype = cpu_to_be16(proto);
3725 if (!(rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
3726 proto != ETH_P_PAE)
3727 return -EINVAL;
3728 if (info->attrs[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT])
3729 settings->control_port_no_encrypt = true;
3730 } else
3731 settings->control_port_ethertype = cpu_to_be16(ETH_P_PAE);
3732
3635 if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) { 3733 if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) {
3636 void *data; 3734 void *data;
3637 int len, i; 3735 int len, i;
@@ -3718,7 +3816,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
3718 goto out; 3816 goto out;
3719 } 3817 }
3720 3818
3721 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { 3819 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
3820 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
3722 err = -EOPNOTSUPP; 3821 err = -EOPNOTSUPP;
3723 goto out; 3822 goto out;
3724 } 3823 }
@@ -3759,7 +3858,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
3759 if (info->attrs[NL80211_ATTR_PREV_BSSID]) 3858 if (info->attrs[NL80211_ATTR_PREV_BSSID])
3760 prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); 3859 prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
3761 3860
3762 err = nl80211_crypto_settings(info, &crypto, 1); 3861 err = nl80211_crypto_settings(rdev, info, &crypto, 1);
3763 if (!err) 3862 if (!err)
3764 err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, 3863 err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
3765 ssid, ssid_len, ie, ie_len, use_mfp, 3864 ssid, ssid_len, ie, ie_len, use_mfp,
@@ -3802,7 +3901,8 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
3802 goto out; 3901 goto out;
3803 } 3902 }
3804 3903
3805 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { 3904 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
3905 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
3806 err = -EOPNOTSUPP; 3906 err = -EOPNOTSUPP;
3807 goto out; 3907 goto out;
3808 } 3908 }
@@ -3868,7 +3968,8 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
3868 goto out; 3968 goto out;
3869 } 3969 }
3870 3970
3871 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { 3971 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
3972 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
3872 err = -EOPNOTSUPP; 3973 err = -EOPNOTSUPP;
3873 goto out; 3974 goto out;
3874 } 3975 }
@@ -4236,7 +4337,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
4236 4337
4237 connect.privacy = info->attrs[NL80211_ATTR_PRIVACY]; 4338 connect.privacy = info->attrs[NL80211_ATTR_PRIVACY];
4238 4339
4239 err = nl80211_crypto_settings(info, &connect.crypto, 4340 err = nl80211_crypto_settings(rdev, info, &connect.crypto,
4240 NL80211_MAX_NR_CIPHER_SUITES); 4341 NL80211_MAX_NR_CIPHER_SUITES);
4241 if (err) 4342 if (err)
4242 return err; 4343 return err;
@@ -4246,7 +4347,8 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
4246 if (err) 4347 if (err)
4247 goto unlock_rtnl; 4348 goto unlock_rtnl;
4248 4349
4249 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { 4350 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
4351 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
4250 err = -EOPNOTSUPP; 4352 err = -EOPNOTSUPP;
4251 goto out; 4353 goto out;
4252 } 4354 }
@@ -4322,7 +4424,8 @@ static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
4322 if (err) 4424 if (err)
4323 goto unlock_rtnl; 4425 goto unlock_rtnl;
4324 4426
4325 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { 4427 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
4428 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
4326 err = -EOPNOTSUPP; 4429 err = -EOPNOTSUPP;
4327 goto out; 4430 goto out;
4328 } 4431 }
@@ -4410,7 +4513,8 @@ static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info)
4410 pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]); 4513 pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
4411 pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); 4514 pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
4412 4515
4413 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { 4516 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
4517 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
4414 err = -EOPNOTSUPP; 4518 err = -EOPNOTSUPP;
4415 goto out; 4519 goto out;
4416 } 4520 }
@@ -4455,7 +4559,8 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
4455 if (err) 4559 if (err)
4456 goto out_rtnl; 4560 goto out_rtnl;
4457 4561
4458 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { 4562 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
4563 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
4459 err = -EOPNOTSUPP; 4564 err = -EOPNOTSUPP;
4460 goto out; 4565 goto out;
4461 } 4566 }
@@ -4717,17 +4822,18 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
4717 return err; 4822 return err;
4718} 4823}
4719 4824
4720static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info) 4825static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
4721{ 4826{
4722 struct cfg80211_registered_device *rdev; 4827 struct cfg80211_registered_device *rdev;
4723 struct net_device *dev; 4828 struct net_device *dev;
4829 u16 frame_type = IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION;
4724 int err; 4830 int err;
4725 4831
4726 if (!info->attrs[NL80211_ATTR_FRAME_MATCH]) 4832 if (!info->attrs[NL80211_ATTR_FRAME_MATCH])
4727 return -EINVAL; 4833 return -EINVAL;
4728 4834
4729 if (nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]) < 1) 4835 if (info->attrs[NL80211_ATTR_FRAME_TYPE])
4730 return -EINVAL; 4836 frame_type = nla_get_u16(info->attrs[NL80211_ATTR_FRAME_TYPE]);
4731 4837
4732 rtnl_lock(); 4838 rtnl_lock();
4733 4839
@@ -4736,18 +4842,20 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
4736 goto unlock_rtnl; 4842 goto unlock_rtnl;
4737 4843
4738 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && 4844 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
4739 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { 4845 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
4846 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
4740 err = -EOPNOTSUPP; 4847 err = -EOPNOTSUPP;
4741 goto out; 4848 goto out;
4742 } 4849 }
4743 4850
4744 /* not much point in registering if we can't reply */ 4851 /* not much point in registering if we can't reply */
4745 if (!rdev->ops->action) { 4852 if (!rdev->ops->mgmt_tx) {
4746 err = -EOPNOTSUPP; 4853 err = -EOPNOTSUPP;
4747 goto out; 4854 goto out;
4748 } 4855 }
4749 4856
4750 err = cfg80211_mlme_register_action(dev->ieee80211_ptr, info->snd_pid, 4857 err = cfg80211_mlme_register_mgmt(dev->ieee80211_ptr, info->snd_pid,
4858 frame_type,
4751 nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), 4859 nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
4752 nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH])); 4860 nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
4753 out: 4861 out:
@@ -4758,7 +4866,7 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
4758 return err; 4866 return err;
4759} 4867}
4760 4868
4761static int nl80211_action(struct sk_buff *skb, struct genl_info *info) 4869static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
4762{ 4870{
4763 struct cfg80211_registered_device *rdev; 4871 struct cfg80211_registered_device *rdev;
4764 struct net_device *dev; 4872 struct net_device *dev;
@@ -4781,13 +4889,14 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
4781 if (err) 4889 if (err)
4782 goto unlock_rtnl; 4890 goto unlock_rtnl;
4783 4891
4784 if (!rdev->ops->action) { 4892 if (!rdev->ops->mgmt_tx) {
4785 err = -EOPNOTSUPP; 4893 err = -EOPNOTSUPP;
4786 goto out; 4894 goto out;
4787 } 4895 }
4788 4896
4789 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && 4897 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
4790 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { 4898 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
4899 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) {
4791 err = -EOPNOTSUPP; 4900 err = -EOPNOTSUPP;
4792 goto out; 4901 goto out;
4793 } 4902 }
@@ -4824,17 +4933,17 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
4824 } 4933 }
4825 4934
4826 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, 4935 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
4827 NL80211_CMD_ACTION); 4936 NL80211_CMD_FRAME);
4828 4937
4829 if (IS_ERR(hdr)) { 4938 if (IS_ERR(hdr)) {
4830 err = PTR_ERR(hdr); 4939 err = PTR_ERR(hdr);
4831 goto free_msg; 4940 goto free_msg;
4832 } 4941 }
4833 err = cfg80211_mlme_action(rdev, dev, chan, channel_type, 4942 err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, channel_type,
4834 channel_type_valid, 4943 channel_type_valid,
4835 nla_data(info->attrs[NL80211_ATTR_FRAME]), 4944 nla_data(info->attrs[NL80211_ATTR_FRAME]),
4836 nla_len(info->attrs[NL80211_ATTR_FRAME]), 4945 nla_len(info->attrs[NL80211_ATTR_FRAME]),
4837 &cookie); 4946 &cookie);
4838 if (err) 4947 if (err)
4839 goto free_msg; 4948 goto free_msg;
4840 4949
@@ -5005,7 +5114,8 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
5005 goto unlock_rdev; 5114 goto unlock_rdev;
5006 } 5115 }
5007 5116
5008 if (wdev->iftype != NL80211_IFTYPE_STATION) { 5117 if (wdev->iftype != NL80211_IFTYPE_STATION &&
5118 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
5009 err = -EOPNOTSUPP; 5119 err = -EOPNOTSUPP;
5010 goto unlock_rdev; 5120 goto unlock_rdev;
5011 } 5121 }
@@ -5333,14 +5443,14 @@ static struct genl_ops nl80211_ops[] = {
5333 .flags = GENL_ADMIN_PERM, 5443 .flags = GENL_ADMIN_PERM,
5334 }, 5444 },
5335 { 5445 {
5336 .cmd = NL80211_CMD_REGISTER_ACTION, 5446 .cmd = NL80211_CMD_REGISTER_FRAME,
5337 .doit = nl80211_register_action, 5447 .doit = nl80211_register_mgmt,
5338 .policy = nl80211_policy, 5448 .policy = nl80211_policy,
5339 .flags = GENL_ADMIN_PERM, 5449 .flags = GENL_ADMIN_PERM,
5340 }, 5450 },
5341 { 5451 {
5342 .cmd = NL80211_CMD_ACTION, 5452 .cmd = NL80211_CMD_FRAME,
5343 .doit = nl80211_action, 5453 .doit = nl80211_tx_mgmt,
5344 .policy = nl80211_policy, 5454 .policy = nl80211_policy,
5345 .flags = GENL_ADMIN_PERM, 5455 .flags = GENL_ADMIN_PERM,
5346 }, 5456 },
@@ -6040,9 +6150,9 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
6040 nl80211_mlme_mcgrp.id, gfp); 6150 nl80211_mlme_mcgrp.id, gfp);
6041} 6151}
6042 6152
6043int nl80211_send_action(struct cfg80211_registered_device *rdev, 6153int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
6044 struct net_device *netdev, u32 nlpid, 6154 struct net_device *netdev, u32 nlpid,
6045 int freq, const u8 *buf, size_t len, gfp_t gfp) 6155 int freq, const u8 *buf, size_t len, gfp_t gfp)
6046{ 6156{
6047 struct sk_buff *msg; 6157 struct sk_buff *msg;
6048 void *hdr; 6158 void *hdr;
@@ -6052,7 +6162,7 @@ int nl80211_send_action(struct cfg80211_registered_device *rdev,
6052 if (!msg) 6162 if (!msg)
6053 return -ENOMEM; 6163 return -ENOMEM;
6054 6164
6055 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION); 6165 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME);
6056 if (!hdr) { 6166 if (!hdr) {
6057 nlmsg_free(msg); 6167 nlmsg_free(msg);
6058 return -ENOMEM; 6168 return -ENOMEM;
@@ -6080,10 +6190,10 @@ int nl80211_send_action(struct cfg80211_registered_device *rdev,
6080 return -ENOBUFS; 6190 return -ENOBUFS;
6081} 6191}
6082 6192
6083void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev, 6193void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
6084 struct net_device *netdev, u64 cookie, 6194 struct net_device *netdev, u64 cookie,
6085 const u8 *buf, size_t len, bool ack, 6195 const u8 *buf, size_t len, bool ack,
6086 gfp_t gfp) 6196 gfp_t gfp)
6087{ 6197{
6088 struct sk_buff *msg; 6198 struct sk_buff *msg;
6089 void *hdr; 6199 void *hdr;
@@ -6092,7 +6202,7 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
6092 if (!msg) 6202 if (!msg)
6093 return; 6203 return;
6094 6204
6095 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION_TX_STATUS); 6205 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME_TX_STATUS);
6096 if (!hdr) { 6206 if (!hdr) {
6097 nlmsg_free(msg); 6207 nlmsg_free(msg);
6098 return; 6208 return;
@@ -6179,7 +6289,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
6179 6289
6180 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) 6290 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list)
6181 list_for_each_entry_rcu(wdev, &rdev->netdev_list, list) 6291 list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
6182 cfg80211_mlme_unregister_actions(wdev, notify->pid); 6292 cfg80211_mlme_unregister_socket(wdev, notify->pid);
6183 6293
6184 rcu_read_unlock(); 6294 rcu_read_unlock();
6185 6295
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 2ad7fbc7d9f1..30d2f939150d 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -74,13 +74,13 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
74 struct net_device *dev, const u8 *mac_addr, 74 struct net_device *dev, const u8 *mac_addr,
75 struct station_info *sinfo, gfp_t gfp); 75 struct station_info *sinfo, gfp_t gfp);
76 76
77int nl80211_send_action(struct cfg80211_registered_device *rdev, 77int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
78 struct net_device *netdev, u32 nlpid, int freq, 78 struct net_device *netdev, u32 nlpid, int freq,
79 const u8 *buf, size_t len, gfp_t gfp); 79 const u8 *buf, size_t len, gfp_t gfp);
80void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev, 80void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
81 struct net_device *netdev, u64 cookie, 81 struct net_device *netdev, u64 cookie,
82 const u8 *buf, size_t len, bool ack, 82 const u8 *buf, size_t len, bool ack,
83 gfp_t gfp); 83 gfp_t gfp);
84 84
85void 85void
86nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, 86nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index 1332c445d1c7..c774bc0f155e 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -14,6 +14,7 @@
14 * See COPYING for more details. 14 * See COPYING for more details.
15 */ 15 */
16 16
17#include <linux/kernel.h>
17#include <net/cfg80211.h> 18#include <net/cfg80211.h>
18#include <net/ieee80211_radiotap.h> 19#include <net/ieee80211_radiotap.h>
19#include <asm/unaligned.h> 20#include <asm/unaligned.h>
@@ -45,7 +46,7 @@ static const struct radiotap_align_size rtap_namespace_sizes[] = {
45}; 46};
46 47
47static const struct ieee80211_radiotap_namespace radiotap_ns = { 48static const struct ieee80211_radiotap_namespace radiotap_ns = {
48 .n_bits = sizeof(rtap_namespace_sizes) / sizeof(rtap_namespace_sizes[0]), 49 .n_bits = ARRAY_SIZE(rtap_namespace_sizes),
49 .align_size = rtap_namespace_sizes, 50 .align_size = rtap_namespace_sizes,
50}; 51};
51 52
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f180db0de66c..d14bbf960c18 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -36,6 +36,7 @@
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/list.h> 37#include <linux/list.h>
38#include <linux/random.h> 38#include <linux/random.h>
39#include <linux/ctype.h>
39#include <linux/nl80211.h> 40#include <linux/nl80211.h>
40#include <linux/platform_device.h> 41#include <linux/platform_device.h>
41#include <net/cfg80211.h> 42#include <net/cfg80211.h>
@@ -73,7 +74,11 @@ const struct ieee80211_regdomain *cfg80211_regdomain;
73 * - last_request 74 * - last_request
74 */ 75 */
75static DEFINE_MUTEX(reg_mutex); 76static DEFINE_MUTEX(reg_mutex);
76#define assert_reg_lock() WARN_ON(!mutex_is_locked(&reg_mutex)) 77
78static inline void assert_reg_lock(void)
79{
80 lockdep_assert_held(&reg_mutex);
81}
77 82
78/* Used to queue up regulatory hints */ 83/* Used to queue up regulatory hints */
79static LIST_HEAD(reg_requests_list); 84static LIST_HEAD(reg_requests_list);
@@ -181,14 +186,6 @@ static bool is_alpha2_set(const char *alpha2)
181 return false; 186 return false;
182} 187}
183 188
184static bool is_alpha_upper(char letter)
185{
186 /* ASCII A - Z */
187 if (letter >= 65 && letter <= 90)
188 return true;
189 return false;
190}
191
192static bool is_unknown_alpha2(const char *alpha2) 189static bool is_unknown_alpha2(const char *alpha2)
193{ 190{
194 if (!alpha2) 191 if (!alpha2)
@@ -220,7 +217,7 @@ static bool is_an_alpha2(const char *alpha2)
220{ 217{
221 if (!alpha2) 218 if (!alpha2)
222 return false; 219 return false;
223 if (is_alpha_upper(alpha2[0]) && is_alpha_upper(alpha2[1])) 220 if (isalpha(alpha2[0]) && isalpha(alpha2[1]))
224 return true; 221 return true;
225 return false; 222 return false;
226} 223}
@@ -1399,6 +1396,11 @@ static DECLARE_WORK(reg_work, reg_todo);
1399 1396
1400static void queue_regulatory_request(struct regulatory_request *request) 1397static void queue_regulatory_request(struct regulatory_request *request)
1401{ 1398{
1399 if (isalpha(request->alpha2[0]))
1400 request->alpha2[0] = toupper(request->alpha2[0]);
1401 if (isalpha(request->alpha2[1]))
1402 request->alpha2[1] = toupper(request->alpha2[1]);
1403
1402 spin_lock(&reg_requests_lock); 1404 spin_lock(&reg_requests_lock);
1403 list_add_tail(&request->list, &reg_requests_list); 1405 list_add_tail(&request->list, &reg_requests_list);
1404 spin_unlock(&reg_requests_lock); 1406 spin_unlock(&reg_requests_lock);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a8c2d6b877ae..f161b9844542 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -411,7 +411,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
411 411
412 ASSERT_WDEV_LOCK(wdev); 412 ASSERT_WDEV_LOCK(wdev);
413 413
414 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) 414 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
415 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
415 return; 416 return;
416 417
417 if (wdev->sme_state != CFG80211_SME_CONNECTING) 418 if (wdev->sme_state != CFG80211_SME_CONNECTING)
@@ -548,7 +549,8 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
548 549
549 ASSERT_WDEV_LOCK(wdev); 550 ASSERT_WDEV_LOCK(wdev);
550 551
551 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) 552 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
553 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
552 return; 554 return;
553 555
554 if (wdev->sme_state != CFG80211_SME_CONNECTED) 556 if (wdev->sme_state != CFG80211_SME_CONNECTED)
@@ -644,7 +646,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
644 646
645 ASSERT_WDEV_LOCK(wdev); 647 ASSERT_WDEV_LOCK(wdev);
646 648
647 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) 649 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
650 wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
648 return; 651 return;
649 652
650 if (wdev->sme_state != CFG80211_SME_CONNECTED) 653 if (wdev->sme_state != CFG80211_SME_CONNECTED)
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9f2cef3e0ca0..74a9e3cce452 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -110,6 +110,13 @@ static int wiphy_resume(struct device *dev)
110 return ret; 110 return ret;
111} 111}
112 112
113static const void *wiphy_namespace(struct device *d)
114{
115 struct wiphy *wiphy = container_of(d, struct wiphy, dev);
116
117 return wiphy_net(wiphy);
118}
119
113struct class ieee80211_class = { 120struct class ieee80211_class = {
114 .name = "ieee80211", 121 .name = "ieee80211",
115 .owner = THIS_MODULE, 122 .owner = THIS_MODULE,
@@ -120,6 +127,8 @@ struct class ieee80211_class = {
120#endif 127#endif
121 .suspend = wiphy_suspend, 128 .suspend = wiphy_suspend,
122 .resume = wiphy_resume, 129 .resume = wiphy_resume,
130 .ns_type = &net_ns_type_operations,
131 .namespace = wiphy_namespace,
123}; 132};
124 133
125int wiphy_sysfs_init(void) 134int wiphy_sysfs_init(void)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 0c8a1e8b7690..fb5448f7d55a 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -183,7 +183,14 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
183 return -EINVAL; 183 return -EINVAL;
184 break; 184 break;
185 default: 185 default:
186 return -EINVAL; 186 /*
187 * We don't know anything about this algorithm,
188 * allow using it -- but the driver must check
189 * all parameters! We still check below whether
190 * or not the driver supports this algorithm,
191 * of course.
192 */
193 break;
187 } 194 }
188 195
189 if (params->seq) { 196 if (params->seq) {
@@ -221,7 +228,7 @@ const unsigned char bridge_tunnel_header[] __aligned(2) =
221 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; 228 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
222EXPORT_SYMBOL(bridge_tunnel_header); 229EXPORT_SYMBOL(bridge_tunnel_header);
223 230
224unsigned int ieee80211_hdrlen(__le16 fc) 231unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
225{ 232{
226 unsigned int hdrlen = 24; 233 unsigned int hdrlen = 24;
227 234
@@ -319,7 +326,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
319 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 326 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
320 case cpu_to_le16(IEEE80211_FCTL_TODS): 327 case cpu_to_le16(IEEE80211_FCTL_TODS):
321 if (unlikely(iftype != NL80211_IFTYPE_AP && 328 if (unlikely(iftype != NL80211_IFTYPE_AP &&
322 iftype != NL80211_IFTYPE_AP_VLAN)) 329 iftype != NL80211_IFTYPE_AP_VLAN &&
330 iftype != NL80211_IFTYPE_P2P_GO))
323 return -1; 331 return -1;
324 break; 332 break;
325 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): 333 case cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
@@ -347,7 +355,8 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
347 break; 355 break;
348 case cpu_to_le16(IEEE80211_FCTL_FROMDS): 356 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
349 if ((iftype != NL80211_IFTYPE_STATION && 357 if ((iftype != NL80211_IFTYPE_STATION &&
350 iftype != NL80211_IFTYPE_MESH_POINT) || 358 iftype != NL80211_IFTYPE_P2P_CLIENT &&
359 iftype != NL80211_IFTYPE_MESH_POINT) ||
351 (is_multicast_ether_addr(dst) && 360 (is_multicast_ether_addr(dst) &&
352 !compare_ether_addr(src, addr))) 361 !compare_ether_addr(src, addr)))
353 return -1; 362 return -1;
@@ -424,6 +433,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
424 switch (iftype) { 433 switch (iftype) {
425 case NL80211_IFTYPE_AP: 434 case NL80211_IFTYPE_AP:
426 case NL80211_IFTYPE_AP_VLAN: 435 case NL80211_IFTYPE_AP_VLAN:
436 case NL80211_IFTYPE_P2P_GO:
427 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 437 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
428 /* DA BSSID SA */ 438 /* DA BSSID SA */
429 memcpy(hdr.addr1, skb->data, ETH_ALEN); 439 memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -432,6 +442,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
432 hdrlen = 24; 442 hdrlen = 24;
433 break; 443 break;
434 case NL80211_IFTYPE_STATION: 444 case NL80211_IFTYPE_STATION:
445 case NL80211_IFTYPE_P2P_CLIENT:
435 fc |= cpu_to_le16(IEEE80211_FCTL_TODS); 446 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
436 /* BSSID SA DA */ 447 /* BSSID SA DA */
437 memcpy(hdr.addr1, bssid, ETH_ALEN); 448 memcpy(hdr.addr1, bssid, ETH_ALEN);
@@ -771,7 +782,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
771 782
772 /* if it's part of a bridge, reject changing type to station/ibss */ 783 /* if it's part of a bridge, reject changing type to station/ibss */
773 if ((dev->priv_flags & IFF_BRIDGE_PORT) && 784 if ((dev->priv_flags & IFF_BRIDGE_PORT) &&
774 (ntype == NL80211_IFTYPE_ADHOC || ntype == NL80211_IFTYPE_STATION)) 785 (ntype == NL80211_IFTYPE_ADHOC ||
786 ntype == NL80211_IFTYPE_STATION ||
787 ntype == NL80211_IFTYPE_P2P_CLIENT))
775 return -EBUSY; 788 return -EBUSY;
776 789
777 if (ntype != otype) { 790 if (ntype != otype) {
@@ -782,6 +795,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
782 cfg80211_leave_ibss(rdev, dev, false); 795 cfg80211_leave_ibss(rdev, dev, false);
783 break; 796 break;
784 case NL80211_IFTYPE_STATION: 797 case NL80211_IFTYPE_STATION:
798 case NL80211_IFTYPE_P2P_CLIENT:
785 cfg80211_disconnect(rdev, dev, 799 cfg80211_disconnect(rdev, dev,
786 WLAN_REASON_DEAUTH_LEAVING, true); 800 WLAN_REASON_DEAUTH_LEAVING, true);
787 break; 801 break;
@@ -810,9 +824,11 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
810 if (dev->ieee80211_ptr->use_4addr) 824 if (dev->ieee80211_ptr->use_4addr)
811 break; 825 break;
812 /* fall through */ 826 /* fall through */
827 case NL80211_IFTYPE_P2P_CLIENT:
813 case NL80211_IFTYPE_ADHOC: 828 case NL80211_IFTYPE_ADHOC:
814 dev->priv_flags |= IFF_DONT_BRIDGE; 829 dev->priv_flags |= IFF_DONT_BRIDGE;
815 break; 830 break;
831 case NL80211_IFTYPE_P2P_GO:
816 case NL80211_IFTYPE_AP: 832 case NL80211_IFTYPE_AP:
817 case NL80211_IFTYPE_AP_VLAN: 833 case NL80211_IFTYPE_AP_VLAN:
818 case NL80211_IFTYPE_WDS: 834 case NL80211_IFTYPE_WDS:
@@ -823,7 +839,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
823 /* monitor can't bridge anyway */ 839 /* monitor can't bridge anyway */
824 break; 840 break;
825 case NL80211_IFTYPE_UNSPECIFIED: 841 case NL80211_IFTYPE_UNSPECIFIED:
826 case __NL80211_IFTYPE_AFTER_LAST: 842 case NUM_NL80211_IFTYPES:
827 /* not happening */ 843 /* not happening */
828 break; 844 break;
829 } 845 }
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 8f5116f5af19..dc675a3daa3d 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -611,7 +611,7 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev)
611#endif 611#endif
612 612
613#ifdef CONFIG_CFG80211_WEXT 613#ifdef CONFIG_CFG80211_WEXT
614 if (dev->ieee80211_ptr && dev->ieee80211_ptr && 614 if (dev->ieee80211_ptr &&
615 dev->ieee80211_ptr->wiphy && 615 dev->ieee80211_ptr->wiphy &&
616 dev->ieee80211_ptr->wiphy->wext && 616 dev->ieee80211_ptr->wiphy->wext &&
617 dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) 617 dev->ieee80211_ptr->wiphy->wext->get_wireless_stats)
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 9818198add8a..6fffe62d7c25 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -197,6 +197,8 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
197 wdev->wext.connect.ssid_len = len; 197 wdev->wext.connect.ssid_len = len;
198 198
199 wdev->wext.connect.crypto.control_port = false; 199 wdev->wext.connect.crypto.control_port = false;
200 wdev->wext.connect.crypto.control_port_ethertype =
201 cpu_to_be16(ETH_P_PAE);
200 202
201 err = cfg80211_mgd_wext_connect(rdev, wdev); 203 err = cfg80211_mgd_wext_connect(rdev, wdev);
202 out: 204 out:
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5e86d4e97dce..f7af98dff409 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -507,14 +507,14 @@ static int x25_listen(struct socket *sock, int backlog)
507 struct sock *sk = sock->sk; 507 struct sock *sk = sock->sk;
508 int rc = -EOPNOTSUPP; 508 int rc = -EOPNOTSUPP;
509 509
510 lock_kernel(); 510 lock_sock(sk);
511 if (sk->sk_state != TCP_LISTEN) { 511 if (sk->sk_state != TCP_LISTEN) {
512 memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); 512 memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
513 sk->sk_max_ack_backlog = backlog; 513 sk->sk_max_ack_backlog = backlog;
514 sk->sk_state = TCP_LISTEN; 514 sk->sk_state = TCP_LISTEN;
515 rc = 0; 515 rc = 0;
516 } 516 }
517 unlock_kernel(); 517 release_sock(sk);
518 518
519 return rc; 519 return rc;
520} 520}
@@ -688,7 +688,6 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
688 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; 688 struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
689 int len, i, rc = 0; 689 int len, i, rc = 0;
690 690
691 lock_kernel();
692 if (!sock_flag(sk, SOCK_ZAPPED) || 691 if (!sock_flag(sk, SOCK_ZAPPED) ||
693 addr_len != sizeof(struct sockaddr_x25) || 692 addr_len != sizeof(struct sockaddr_x25) ||
694 addr->sx25_family != AF_X25) { 693 addr->sx25_family != AF_X25) {
@@ -704,12 +703,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
704 } 703 }
705 } 704 }
706 705
706 lock_sock(sk);
707 x25_sk(sk)->source_addr = addr->sx25_addr; 707 x25_sk(sk)->source_addr = addr->sx25_addr;
708 x25_insert_socket(sk); 708 x25_insert_socket(sk);
709 sock_reset_flag(sk, SOCK_ZAPPED); 709 sock_reset_flag(sk, SOCK_ZAPPED);
710 release_sock(sk);
710 SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); 711 SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
711out: 712out:
712 unlock_kernel();
713 return rc; 713 return rc;
714} 714}
715 715
@@ -751,7 +751,6 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
751 struct x25_route *rt; 751 struct x25_route *rt;
752 int rc = 0; 752 int rc = 0;
753 753
754 lock_kernel();
755 lock_sock(sk); 754 lock_sock(sk);
756 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { 755 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
757 sock->state = SS_CONNECTED; 756 sock->state = SS_CONNECTED;
@@ -829,7 +828,6 @@ out_put_route:
829 x25_route_put(rt); 828 x25_route_put(rt);
830out: 829out:
831 release_sock(sk); 830 release_sock(sk);
832 unlock_kernel();
833 return rc; 831 return rc;
834} 832}
835 833
@@ -869,8 +867,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
869 struct sk_buff *skb; 867 struct sk_buff *skb;
870 int rc = -EINVAL; 868 int rc = -EINVAL;
871 869
872 lock_kernel(); 870 if (!sk)
873 if (!sk || sk->sk_state != TCP_LISTEN)
874 goto out; 871 goto out;
875 872
876 rc = -EOPNOTSUPP; 873 rc = -EOPNOTSUPP;
@@ -878,6 +875,10 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
878 goto out; 875 goto out;
879 876
880 lock_sock(sk); 877 lock_sock(sk);
878 rc = -EINVAL;
879 if (sk->sk_state != TCP_LISTEN)
880 goto out2;
881
881 rc = x25_wait_for_data(sk, sk->sk_rcvtimeo); 882 rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
882 if (rc) 883 if (rc)
883 goto out2; 884 goto out2;
@@ -897,7 +898,6 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
897out2: 898out2:
898 release_sock(sk); 899 release_sock(sk);
899out: 900out:
900 unlock_kernel();
901 return rc; 901 return rc;
902} 902}
903 903
@@ -909,7 +909,6 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
909 struct x25_sock *x25 = x25_sk(sk); 909 struct x25_sock *x25 = x25_sk(sk);
910 int rc = 0; 910 int rc = 0;
911 911
912 lock_kernel();
913 if (peer) { 912 if (peer) {
914 if (sk->sk_state != TCP_ESTABLISHED) { 913 if (sk->sk_state != TCP_ESTABLISHED) {
915 rc = -ENOTCONN; 914 rc = -ENOTCONN;
@@ -923,19 +922,6 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
923 *uaddr_len = sizeof(*sx25); 922 *uaddr_len = sizeof(*sx25);
924 923
925out: 924out:
926 unlock_kernel();
927 return rc;
928}
929
930static unsigned int x25_datagram_poll(struct file *file, struct socket *sock,
931 poll_table *wait)
932{
933 int rc;
934
935 lock_kernel();
936 rc = datagram_poll(file, sock, wait);
937 unlock_kernel();
938
939 return rc; 925 return rc;
940} 926}
941 927
@@ -1746,7 +1732,7 @@ static const struct proto_ops x25_proto_ops = {
1746 .socketpair = sock_no_socketpair, 1732 .socketpair = sock_no_socketpair,
1747 .accept = x25_accept, 1733 .accept = x25_accept,
1748 .getname = x25_getname, 1734 .getname = x25_getname,
1749 .poll = x25_datagram_poll, 1735 .poll = datagram_poll,
1750 .ioctl = x25_ioctl, 1736 .ioctl = x25_ioctl,
1751#ifdef CONFIG_COMPAT 1737#ifdef CONFIG_COMPAT
1752 .compat_ioctl = compat_x25_ioctl, 1738 .compat_ioctl = compat_x25_ioctl,